mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 11:15:07 +07:00
ead5084cdf
Currently arm64 doesn't initialize the primary CRNG in a (potentially) trusted manner as we only detect the presence of the RNG once secondary CPUs are up. Now that the core RNG code distinguishes the early initialization of the primary CRNG, we can implement arch_get_random_seed_long_early() to support this. This patch does so. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Mark Brown <broonie@kernel.org> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20200210130015.17664-4-mark.rutland@arm.com Signed-off-by: Theodore Ts'o <tytso@mit.edu>
90 lines
1.9 KiB
C
90 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_ARCHRANDOM_H
|
|
#define _ASM_ARCHRANDOM_H
|
|
|
|
#ifdef CONFIG_ARCH_RANDOM
|
|
|
|
#include <linux/bug.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/random.h>
|
|
#include <asm/cpufeature.h>
|
|
|
|
static inline bool __arm64_rndr(unsigned long *v)
|
|
{
|
|
bool ok;
|
|
|
|
/*
|
|
* Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
|
|
* and set PSTATE.NZCV to 0b0100 otherwise.
|
|
*/
|
|
asm volatile(
|
|
__mrs_s("%0", SYS_RNDR_EL0) "\n"
|
|
" cset %w1, ne\n"
|
|
: "=r" (*v), "=r" (ok)
|
|
:
|
|
: "cc");
|
|
|
|
return ok;
|
|
}
|
|
|
|
static inline bool __must_check arch_get_random_long(unsigned long *v)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool __must_check arch_get_random_int(unsigned int *v)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
|
{
|
|
/*
|
|
* Only support the generic interface after we have detected
|
|
* the system wide capability, avoiding complexity with the
|
|
* cpufeature code and with potential scheduling between CPUs
|
|
* with and without the feature.
|
|
*/
|
|
if (!cpus_have_const_cap(ARM64_HAS_RNG))
|
|
return false;
|
|
|
|
return __arm64_rndr(v);
|
|
}
|
|
|
|
|
|
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
|
|
{
|
|
unsigned long val;
|
|
bool ok = arch_get_random_seed_long(&val);
|
|
|
|
*v = val;
|
|
return ok;
|
|
}
|
|
|
|
static inline bool __init __early_cpu_has_rndr(void)
|
|
{
|
|
/* Open code as we run prior to the first call to cpufeature. */
|
|
unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
|
|
return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
|
|
}
|
|
|
|
static inline bool __init __must_check
|
|
arch_get_random_seed_long_early(unsigned long *v)
|
|
{
|
|
WARN_ON(system_state != SYSTEM_BOOTING);
|
|
|
|
if (!__early_cpu_has_rndr())
|
|
return false;
|
|
|
|
return __arm64_rndr(v);
|
|
}
|
|
#define arch_get_random_seed_long_early arch_get_random_seed_long_early
|
|
|
|
#else
|
|
|
|
static inline bool __arm64_rndr(unsigned long *v) { return false; }
|
|
static inline bool __init __early_cpu_has_rndr(void) { return false; }
|
|
|
|
#endif /* CONFIG_ARCH_RANDOM */
|
|
#endif /* _ASM_ARCHRANDOM_H */
|