mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 09:56:40 +07:00
fcce810986
The current "no hash" MMU context management code is written with the assumption that one CPU == one TLB. This is not the case on implementations that support HW multithreading, where several linux CPUs can share the same TLB. This adds some basic support for this to our context management and our TLB flushing code. It also cleans up the optional debugging output a bit Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
88 lines
2.1 KiB
C
88 lines
2.1 KiB
C
#ifndef _ASM_POWERPC_CPUTHREADS_H
|
|
#define _ASM_POWERPC_CPUTHREADS_H
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
/*
|
|
* Mapping of threads to cores
|
|
*
|
|
* Note: This implementation is limited to a power of 2 number of
|
|
* threads per core and the same number for each core in the system
|
|
* (though it would work if some processors had less threads as long
|
|
* as the CPU numbers are still allocated, just not brought offline).
|
|
*
|
|
* However, the API allows for a different implementation in the future
|
|
* if needed, as long as you only use the functions and not the variables
|
|
* directly.
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern int threads_per_core;
|
|
extern int threads_shift;
|
|
extern cpumask_t threads_core_mask;
|
|
#else
|
|
#define threads_per_core 1
|
|
#define threads_shift 0
|
|
#define threads_core_mask (CPU_MASK_CPU0)
|
|
#endif
|
|
|
|
/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
|
|
* hit by the argument
|
|
*
|
|
* @threads: a cpumask of threads
|
|
*
|
|
* This function returns a cpumask which will have one "cpu" (or thread)
|
|
* bit set for each core that has at least one thread set in the argument.
|
|
*
|
|
* This can typically be used for things like IPI for tlb invalidations
|
|
* since those need to be done only once per core/TLB
|
|
*/
|
|
static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads)
|
|
{
|
|
cpumask_t tmp, res;
|
|
int i;
|
|
|
|
res = CPU_MASK_NONE;
|
|
for (i = 0; i < NR_CPUS; i += threads_per_core) {
|
|
cpus_shift_left(tmp, threads_core_mask, i);
|
|
if (cpus_intersects(threads, tmp))
|
|
cpu_set(i, res);
|
|
}
|
|
return res;
|
|
}
|
|
|
|
static inline int cpu_nr_cores(void)
|
|
{
|
|
return NR_CPUS >> threads_shift;
|
|
}
|
|
|
|
static inline cpumask_t cpu_online_cores_map(void)
|
|
{
|
|
return cpu_thread_mask_to_cores(cpu_online_map);
|
|
}
|
|
|
|
static inline int cpu_thread_to_core(int cpu)
|
|
{
|
|
return cpu >> threads_shift;
|
|
}
|
|
|
|
static inline int cpu_thread_in_core(int cpu)
|
|
{
|
|
return cpu & (threads_per_core - 1);
|
|
}
|
|
|
|
static inline int cpu_first_thread_in_core(int cpu)
|
|
{
|
|
return cpu & ~(threads_per_core - 1);
|
|
}
|
|
|
|
static inline int cpu_last_thread_in_core(int cpu)
|
|
{
|
|
return cpu | (threads_per_core - 1);
|
|
}
|
|
|
|
|
|
|
|
#endif /* _ASM_POWERPC_CPUTHREADS_H */
|
|
|