mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 12:29:44 +07:00
2e8e1ea88c
When seeding KALSR on a system where we have architecture level random number generation make use of that entropy, mixing it in with the seed passed by the bootloader. Since this is run very early in init before feature detection is complete we open code rather than use archrandom.h. Signed-off-by: Mark Brown <broonie@kernel.org> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Will Deacon <will@kernel.org>
76 lines
1.6 KiB
C
76 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_ARCHRANDOM_H
|
|
#define _ASM_ARCHRANDOM_H
|
|
|
|
#ifdef CONFIG_ARCH_RANDOM
|
|
|
|
#include <linux/random.h>
|
|
#include <asm/cpufeature.h>
|
|
|
|
static inline bool __arm64_rndr(unsigned long *v)
|
|
{
|
|
bool ok;
|
|
|
|
/*
|
|
* Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
|
|
* and set PSTATE.NZCV to 0b0100 otherwise.
|
|
*/
|
|
asm volatile(
|
|
__mrs_s("%0", SYS_RNDR_EL0) "\n"
|
|
" cset %w1, ne\n"
|
|
: "=r" (*v), "=r" (ok)
|
|
:
|
|
: "cc");
|
|
|
|
return ok;
|
|
}
|
|
|
|
static inline bool __must_check arch_get_random_long(unsigned long *v)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool __must_check arch_get_random_int(unsigned int *v)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
|
{
|
|
/*
|
|
* Only support the generic interface after we have detected
|
|
* the system wide capability, avoiding complexity with the
|
|
* cpufeature code and with potential scheduling between CPUs
|
|
* with and without the feature.
|
|
*/
|
|
if (!cpus_have_const_cap(ARM64_HAS_RNG))
|
|
return false;
|
|
|
|
return __arm64_rndr(v);
|
|
}
|
|
|
|
|
|
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
|
|
{
|
|
unsigned long val;
|
|
bool ok = arch_get_random_seed_long(&val);
|
|
|
|
*v = val;
|
|
return ok;
|
|
}
|
|
|
|
static inline bool __init __early_cpu_has_rndr(void)
|
|
{
|
|
/* Open code as we run prior to the first call to cpufeature. */
|
|
unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
|
|
return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool __arm64_rndr(unsigned long *v) { return false; }
|
|
static inline bool __init __early_cpu_has_rndr(void) { return false; }
|
|
|
|
#endif /* CONFIG_ARCH_RANDOM */
|
|
#endif /* _ASM_ARCHRANDOM_H */
|