2009-09-28 02:55:43 +07:00
|
|
|
/*
|
|
|
|
* ARM specific SMP header, this contains our implementation
|
|
|
|
* details.
|
|
|
|
*/
|
|
|
|
#ifndef __ASMARM_SMP_PLAT_H
|
|
|
|
#define __ASMARM_SMP_PLAT_H
|
|
|
|
|
2011-11-18 00:36:24 +07:00
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
|
2014-08-15 21:53:14 +07:00
|
|
|
#include <asm/cpu.h>
|
2009-09-28 02:55:43 +07:00
|
|
|
#include <asm/cputype.h>
|
|
|
|
|
2010-09-04 16:47:48 +07:00
|
|
|
/*
|
|
|
|
* Return true if we are running on a SMP platform
|
|
|
|
*/
|
|
|
|
static inline bool is_smp(void)
|
|
|
|
{
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
return false;
|
|
|
|
#elif defined(CONFIG_SMP_ON_UP)
|
|
|
|
extern unsigned int smp_on_up;
|
|
|
|
return !!smp_on_up;
|
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-08-15 21:53:14 +07:00
|
|
|
/**
|
|
|
|
* smp_cpuid_part() - return part id for a given cpu
|
|
|
|
* @cpu: logical cpu id.
|
|
|
|
*
|
|
|
|
* Return: part id of logical cpu passed as argument.
|
|
|
|
*/
|
|
|
|
static inline unsigned int smp_cpuid_part(int cpu)
|
|
|
|
{
|
|
|
|
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu);
|
|
|
|
|
|
|
|
return is_smp() ? cpu_info->cpuid & ARM_CPU_PART_MASK :
|
|
|
|
read_cpuid_part();
|
|
|
|
}
|
|
|
|
|
2009-09-28 02:55:43 +07:00
|
|
|
/* all SMP configurations have the extended CPUID registers */
|
2012-02-28 19:56:06 +07:00
|
|
|
#ifndef CONFIG_MMU
|
|
|
|
#define tlb_ops_need_broadcast() 0
|
|
|
|
#else
|
2009-09-28 02:55:43 +07:00
|
|
|
static inline int tlb_ops_need_broadcast(void)
|
|
|
|
{
|
2010-10-05 22:40:13 +07:00
|
|
|
if (!is_smp())
|
|
|
|
return 0;
|
|
|
|
|
2009-09-28 02:55:43 +07:00
|
|
|
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
|
|
|
|
}
|
2012-02-28 19:56:06 +07:00
|
|
|
#endif
|
2009-09-28 02:55:43 +07:00
|
|
|
|
2010-09-13 21:58:37 +07:00
|
|
|
#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
|
|
|
|
#define cache_ops_need_broadcast() 0
|
|
|
|
#else
|
2009-11-05 20:29:36 +07:00
|
|
|
static inline int cache_ops_need_broadcast(void)
|
|
|
|
{
|
2010-10-05 22:40:13 +07:00
|
|
|
if (!is_smp())
|
|
|
|
return 0;
|
|
|
|
|
2009-11-05 20:29:36 +07:00
|
|
|
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
|
|
|
|
}
|
2010-09-13 21:58:37 +07:00
|
|
|
#endif
|
2009-11-05 20:29:36 +07:00
|
|
|
|
2012-01-20 18:01:12 +07:00
|
|
|
/*
|
|
|
|
* Logical CPU mapping.
|
|
|
|
*/
|
2013-06-19 16:40:48 +07:00
|
|
|
extern u32 __cpu_logical_map[];
|
2012-01-20 18:01:12 +07:00
|
|
|
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
|
2011-11-18 00:36:24 +07:00
|
|
|
/*
|
|
|
|
* Retrieve logical cpu index corresponding to a given MPIDR[23:0]
|
|
|
|
* - mpidr: MPIDR[23:0] to be used for the look-up
|
|
|
|
*
|
|
|
|
* Returns the cpu logical index or -EINVAL on look-up error
|
|
|
|
*/
|
|
|
|
static inline int get_logical_index(u32 mpidr)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
|
|
|
if (cpu_logical_map(cpu) == mpidr)
|
|
|
|
return cpu;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-01-20 18:01:12 +07:00
|
|
|
|
2013-05-16 16:34:30 +07:00
|
|
|
/*
|
|
|
|
* NOTE ! Assembly code relies on the following
|
|
|
|
* structure memory layout in order to carry out load
|
|
|
|
* multiple from its base address. For more
|
|
|
|
* information check arch/arm/kernel/sleep.S
|
|
|
|
*/
|
ARM: kernel: build MPIDR hash function data structure
On ARM SMP systems, cores are identified by their MPIDR register.
The MPIDR guidelines in the ARM ARM do not provide strict enforcement of
MPIDR layout, only recommendations that, if followed, split the MPIDR
on ARM 32 bit platforms in three affinity levels. In multi-cluster
systems like big.LITTLE, if the affinity guidelines are followed, the
MPIDR can not be considered an index anymore. This means that the
association between logical CPU in the kernel and the HW CPU identifier
becomes somewhat more complicated requiring methods like hashing to
associate a given MPIDR to a CPU logical index, in order for the look-up
to be carried out in an efficient and scalable way.
This patch provides a function in the kernel that starting from the
cpu_logical_map, implement collision-free hashing of MPIDR values by checking
all significative bits of MPIDR affinity level bitfields. The hashing
can then be carried out through bits shifting and ORing; the resulting
hash algorithm is a collision-free though not minimal hash that can be
executed with few assembly instructions. The mpidr is filtered through a
mpidr mask that is built by checking all bits that toggle in the set of
MPIDRs corresponding to possible CPUs. Bits that do not toggle do not carry
information so they do not contribute to the resulting hash.
Pseudo code:
/* check all bits that toggle, so they are required */
for (i = 1, mpidr_mask = 0; i < num_possible_cpus(); i++)
mpidr_mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
/*
* Build shifts to be applied to aff0, aff1, aff2 values to hash the mpidr
* fls() returns the last bit set in a word, 0 if none
* ffs() returns the first bit set in a word, 0 if none
*/
fs0 = mpidr_mask[7:0] ? ffs(mpidr_mask[7:0]) - 1 : 0;
fs1 = mpidr_mask[15:8] ? ffs(mpidr_mask[15:8]) - 1 : 0;
fs2 = mpidr_mask[23:16] ? ffs(mpidr_mask[23:16]) - 1 : 0;
ls0 = fls(mpidr_mask[7:0]);
ls1 = fls(mpidr_mask[15:8]);
ls2 = fls(mpidr_mask[23:16]);
bits0 = ls0 - fs0;
bits1 = ls1 - fs1;
bits2 = ls2 - fs2;
aff0_shift = fs0;
aff1_shift = 8 + fs1 - bits0;
aff2_shift = 16 + fs2 - (bits0 + bits1);
u32 hash(u32 mpidr) {
u32 l0, l1, l2;
u32 mpidr_masked = mpidr & mpidr_mask;
l0 = mpidr_masked & 0xff;
l1 = mpidr_masked & 0xff00;
l2 = mpidr_masked & 0xff0000;
return (l0 >> aff0_shift | l1 >> aff1_shift | l2 >> aff2_shift);
}
The hashing algorithm relies on the inherent properties set in the ARM ARM
recommendations for the MPIDR. Exotic configurations, where for instance the
MPIDR values at a given affinity level have large holes, can end up requiring
big hash tables since the compression of values that can be achieved through
shifting is somewhat crippled when holes are present. Kernel warns if
the number of buckets of the resulting hash table exceeds the number of
possible CPUs by a factor of 4, which is a symptom of a very sparse HW
MPIDR configuration.
The hash algorithm is quite simple and can easily be implemented in assembly
code, to be used in code paths where the kernel virtual address space is
not set-up (ie cpu_resume) and instruction and data fetches are strongly
ordered so code must be compact and must carry out few data accesses.
Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Colin Cross <ccross@android.com>
Cc: Santosh Shilimkar <santosh.shilimkar@ti.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Amit Kucheria <amit.kucheria@linaro.org>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Tested-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Stephen Warren <swarren@wwwdotorg.org>
2013-05-16 16:32:09 +07:00
|
|
|
struct mpidr_hash {
|
2013-05-16 16:34:30 +07:00
|
|
|
u32 mask; /* used by sleep.S */
|
|
|
|
u32 shift_aff[3]; /* used by sleep.S */
|
ARM: kernel: build MPIDR hash function data structure
On ARM SMP systems, cores are identified by their MPIDR register.
The MPIDR guidelines in the ARM ARM do not provide strict enforcement of
MPIDR layout, only recommendations that, if followed, split the MPIDR
on ARM 32 bit platforms in three affinity levels. In multi-cluster
systems like big.LITTLE, if the affinity guidelines are followed, the
MPIDR can not be considered an index anymore. This means that the
association between logical CPU in the kernel and the HW CPU identifier
becomes somewhat more complicated requiring methods like hashing to
associate a given MPIDR to a CPU logical index, in order for the look-up
to be carried out in an efficient and scalable way.
This patch provides a function in the kernel that starting from the
cpu_logical_map, implement collision-free hashing of MPIDR values by checking
all significative bits of MPIDR affinity level bitfields. The hashing
can then be carried out through bits shifting and ORing; the resulting
hash algorithm is a collision-free though not minimal hash that can be
executed with few assembly instructions. The mpidr is filtered through a
mpidr mask that is built by checking all bits that toggle in the set of
MPIDRs corresponding to possible CPUs. Bits that do not toggle do not carry
information so they do not contribute to the resulting hash.
Pseudo code:
/* check all bits that toggle, so they are required */
for (i = 1, mpidr_mask = 0; i < num_possible_cpus(); i++)
mpidr_mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
/*
* Build shifts to be applied to aff0, aff1, aff2 values to hash the mpidr
* fls() returns the last bit set in a word, 0 if none
* ffs() returns the first bit set in a word, 0 if none
*/
fs0 = mpidr_mask[7:0] ? ffs(mpidr_mask[7:0]) - 1 : 0;
fs1 = mpidr_mask[15:8] ? ffs(mpidr_mask[15:8]) - 1 : 0;
fs2 = mpidr_mask[23:16] ? ffs(mpidr_mask[23:16]) - 1 : 0;
ls0 = fls(mpidr_mask[7:0]);
ls1 = fls(mpidr_mask[15:8]);
ls2 = fls(mpidr_mask[23:16]);
bits0 = ls0 - fs0;
bits1 = ls1 - fs1;
bits2 = ls2 - fs2;
aff0_shift = fs0;
aff1_shift = 8 + fs1 - bits0;
aff2_shift = 16 + fs2 - (bits0 + bits1);
u32 hash(u32 mpidr) {
u32 l0, l1, l2;
u32 mpidr_masked = mpidr & mpidr_mask;
l0 = mpidr_masked & 0xff;
l1 = mpidr_masked & 0xff00;
l2 = mpidr_masked & 0xff0000;
return (l0 >> aff0_shift | l1 >> aff1_shift | l2 >> aff2_shift);
}
The hashing algorithm relies on the inherent properties set in the ARM ARM
recommendations for the MPIDR. Exotic configurations, where for instance the
MPIDR values at a given affinity level have large holes, can end up requiring
big hash tables since the compression of values that can be achieved through
shifting is somewhat crippled when holes are present. Kernel warns if
the number of buckets of the resulting hash table exceeds the number of
possible CPUs by a factor of 4, which is a symptom of a very sparse HW
MPIDR configuration.
The hash algorithm is quite simple and can easily be implemented in assembly
code, to be used in code paths where the kernel virtual address space is
not set-up (ie cpu_resume) and instruction and data fetches are strongly
ordered so code must be compact and must carry out few data accesses.
Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Colin Cross <ccross@android.com>
Cc: Santosh Shilimkar <santosh.shilimkar@ti.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Amit Kucheria <amit.kucheria@linaro.org>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Tested-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Stephen Warren <swarren@wwwdotorg.org>
2013-05-16 16:32:09 +07:00
|
|
|
u32 bits;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct mpidr_hash mpidr_hash;
|
|
|
|
|
|
|
|
static inline u32 mpidr_hash_size(void)
|
|
|
|
{
|
|
|
|
return 1 << mpidr_hash.bits;
|
|
|
|
}
|
2013-08-03 02:52:49 +07:00
|
|
|
|
2015-04-01 19:36:57 +07:00
|
|
|
extern int platform_can_secondary_boot(void);
|
2013-08-03 02:52:49 +07:00
|
|
|
extern int platform_can_cpu_hotplug(void);
|
|
|
|
|
2015-07-29 06:34:48 +07:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
extern int platform_can_hotplug_cpu(unsigned int cpu);
|
|
|
|
#else
|
|
|
|
static inline int platform_can_hotplug_cpu(unsigned int cpu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-09-28 02:55:43 +07:00
|
|
|
#endif
|