2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2013-10-12 06:29:46 +07:00
|
|
|
* Routines to identify caches on Intel CPU.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2008-07-28 21:20:08 +07:00
|
|
|
* Changes:
|
|
|
|
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
|
2009-07-04 06:35:45 +07:00
|
|
|
* Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
|
2007-07-21 22:10:03 +07:00
|
|
|
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/cpu.h>
|
2005-10-31 06:03:48 +07:00
|
|
|
#include <linux/sched.h>
|
2008-07-23 01:06:02 +07:00
|
|
|
#include <linux/pci.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/processor.h>
|
2009-07-04 06:35:45 +07:00
|
|
|
#include <linux/smp.h>
|
2010-09-17 23:03:43 +07:00
|
|
|
#include <asm/amd_nb.h>
|
2010-01-22 22:01:05 +07:00
|
|
|
#include <asm/smp.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define LVL_1_INST 1
|
|
|
|
#define LVL_1_DATA 2
|
|
|
|
#define LVL_2 3
|
|
|
|
#define LVL_3 4
|
|
|
|
#define LVL_TRACE 5
|
|
|
|
|
2009-07-04 06:35:45 +07:00
|
|
|
struct _cache_table {
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned char descriptor;
|
|
|
|
char cache_type;
|
|
|
|
short size;
|
|
|
|
};
|
|
|
|
|
2010-01-04 21:47:35 +07:00
|
|
|
#define MB(x) ((x) * 1024)
|
|
|
|
|
2009-07-04 06:35:45 +07:00
|
|
|
/* All the cache descriptor types we care about (no TLB or
|
|
|
|
trace cache entries) */
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static const struct _cache_table cache_table[] =
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
|
|
|
|
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
|
2009-02-01 08:12:14 +07:00
|
|
|
{ 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
|
2005-04-17 05:20:36 +07:00
|
|
|
{ 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
|
|
|
|
{ 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
|
2009-02-01 08:12:14 +07:00
|
|
|
{ 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
|
2011-01-20 08:20:56 +07:00
|
|
|
{ 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
|
2009-02-01 08:12:14 +07:00
|
|
|
{ 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
|
2005-04-17 05:20:36 +07:00
|
|
|
{ 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
2010-01-04 21:47:35 +07:00
|
|
|
{ 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
2005-04-17 05:20:36 +07:00
|
|
|
{ 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
|
|
|
|
{ 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
|
|
|
|
{ 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
2006-01-27 13:40:40 +07:00
|
|
|
{ 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
|
2005-04-17 05:20:36 +07:00
|
|
|
{ 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
2006-01-27 13:40:40 +07:00
|
|
|
{ 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
2007-12-21 07:27:19 +07:00
|
|
|
{ 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
|
2005-04-17 05:20:36 +07:00
|
|
|
{ 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
|
|
|
|
{ 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
|
|
|
|
{ 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
|
2010-01-04 21:47:35 +07:00
|
|
|
{ 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
|
|
|
|
{ 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
|
|
|
|
{ 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
|
|
|
|
{ 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
|
2011-01-20 08:20:56 +07:00
|
|
|
{ 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
|
2010-01-04 21:47:35 +07:00
|
|
|
{ 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
|
|
|
|
{ 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
|
|
|
|
{ 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
|
|
|
|
{ 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
|
|
|
|
{ 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
|
|
|
|
{ 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
|
2005-04-17 05:20:36 +07:00
|
|
|
{ 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
|
|
|
|
{ 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
|
|
|
|
{ 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
|
2006-01-27 13:40:40 +07:00
|
|
|
{ 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
|
2010-01-04 21:47:35 +07:00
|
|
|
{ 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
|
|
|
|
{ 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
|
|
|
{ 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
|
|
|
|
{ 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
|
2011-01-20 08:20:56 +07:00
|
|
|
{ 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
|
2010-01-04 21:47:35 +07:00
|
|
|
{ 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
|
|
|
|
{ 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
|
|
|
|
{ 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
|
|
|
|
{ 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
|
|
|
|
{ 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
|
|
|
|
{ 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
|
|
|
|
{ 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
|
|
|
|
{ 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
|
|
|
|
{ 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
|
|
|
|
{ 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
|
|
|
|
{ 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
|
|
|
|
{ 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
|
|
|
|
{ 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
|
|
|
|
{ 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
|
|
|
|
{ 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
|
|
|
|
{ 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
|
|
|
|
{ 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
|
|
|
|
{ 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
|
|
|
|
{ 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
|
|
|
|
{ 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
|
|
|
|
{ 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
|
2005-04-17 05:20:36 +07:00
|
|
|
{ 0x00, 0, 0}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-07-04 06:35:45 +07:00
|
|
|
enum _cache_type {
|
2005-04-17 05:20:36 +07:00
|
|
|
CACHE_TYPE_NULL = 0,
|
|
|
|
CACHE_TYPE_DATA = 1,
|
|
|
|
CACHE_TYPE_INST = 2,
|
|
|
|
CACHE_TYPE_UNIFIED = 3
|
|
|
|
};
|
|
|
|
|
|
|
|
union _cpuid4_leaf_eax {
|
|
|
|
struct {
|
|
|
|
enum _cache_type type:5;
|
|
|
|
unsigned int level:3;
|
|
|
|
unsigned int is_self_initializing:1;
|
|
|
|
unsigned int is_fully_associative:1;
|
|
|
|
unsigned int reserved:4;
|
|
|
|
unsigned int num_threads_sharing:12;
|
|
|
|
unsigned int num_cores_on_die:6;
|
|
|
|
} split;
|
|
|
|
u32 full;
|
|
|
|
};
|
|
|
|
|
|
|
|
union _cpuid4_leaf_ebx {
|
|
|
|
struct {
|
|
|
|
unsigned int coherency_line_size:12;
|
|
|
|
unsigned int physical_line_partition:10;
|
|
|
|
unsigned int ways_of_associativity:10;
|
|
|
|
} split;
|
|
|
|
u32 full;
|
|
|
|
};
|
|
|
|
|
|
|
|
union _cpuid4_leaf_ecx {
|
|
|
|
struct {
|
|
|
|
unsigned int number_of_sets:32;
|
|
|
|
} split;
|
|
|
|
u32 full;
|
|
|
|
};
|
|
|
|
|
2011-07-24 16:46:08 +07:00
|
|
|
struct _cpuid4_info_regs {
|
2005-04-17 05:20:36 +07:00
|
|
|
union _cpuid4_leaf_eax eax;
|
|
|
|
union _cpuid4_leaf_ebx ebx;
|
|
|
|
union _cpuid4_leaf_ecx ecx;
|
|
|
|
unsigned long size;
|
2011-07-24 16:46:09 +07:00
|
|
|
struct amd_northbridge *nb;
|
2009-01-11 12:58:10 +07:00
|
|
|
};
|
|
|
|
|
2011-07-24 16:46:08 +07:00
|
|
|
struct _cpuid4_info {
|
|
|
|
struct _cpuid4_info_regs base;
|
|
|
|
DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2006-06-26 18:56:13 +07:00
|
|
|
unsigned short num_cache_leaves;
|
|
|
|
|
|
|
|
/* AMD doesn't have CPUID4. Emulate it here to report the same
|
|
|
|
information to the user. This makes some assumptions about the machine:
|
2007-07-21 22:10:03 +07:00
|
|
|
L2 not shared, no SMT etc. that is currently true on AMD CPUs.
|
2006-06-26 18:56:13 +07:00
|
|
|
|
|
|
|
In theory the TLBs could be reported as fake type (they are in "dummy").
|
|
|
|
Maybe later */
|
|
|
|
union l1_cache {
|
|
|
|
struct {
|
2009-07-04 06:35:45 +07:00
|
|
|
unsigned line_size:8;
|
|
|
|
unsigned lines_per_tag:8;
|
|
|
|
unsigned assoc:8;
|
|
|
|
unsigned size_in_kb:8;
|
2006-06-26 18:56:13 +07:00
|
|
|
};
|
|
|
|
unsigned val;
|
|
|
|
};
|
|
|
|
|
|
|
|
union l2_cache {
|
|
|
|
struct {
|
2009-07-04 06:35:45 +07:00
|
|
|
unsigned line_size:8;
|
|
|
|
unsigned lines_per_tag:4;
|
|
|
|
unsigned assoc:4;
|
|
|
|
unsigned size_in_kb:16;
|
2006-06-26 18:56:13 +07:00
|
|
|
};
|
|
|
|
unsigned val;
|
|
|
|
};
|
|
|
|
|
2007-07-21 22:10:03 +07:00
|
|
|
union l3_cache {
|
|
|
|
struct {
|
2009-07-04 06:35:45 +07:00
|
|
|
unsigned line_size:8;
|
|
|
|
unsigned lines_per_tag:4;
|
|
|
|
unsigned assoc:4;
|
|
|
|
unsigned res:2;
|
|
|
|
unsigned size_encoded:14;
|
2007-07-21 22:10:03 +07:00
|
|
|
};
|
|
|
|
unsigned val;
|
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static const unsigned short assocs[] = {
|
2009-04-09 20:47:10 +07:00
|
|
|
[1] = 1,
|
|
|
|
[2] = 2,
|
|
|
|
[4] = 4,
|
|
|
|
[6] = 8,
|
|
|
|
[8] = 16,
|
|
|
|
[0xa] = 32,
|
|
|
|
[0xb] = 48,
|
2007-07-21 22:10:03 +07:00
|
|
|
[0xc] = 64,
|
2009-04-09 20:47:10 +07:00
|
|
|
[0xd] = 96,
|
|
|
|
[0xe] = 128,
|
|
|
|
[0xf] = 0xffff /* fully associative - no way to show this currently */
|
2007-07-21 22:10:03 +07:00
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static const unsigned char levels[] = { 1, 1, 2, 3 };
|
|
|
|
static const unsigned char types[] = { 1, 2, 3, 3 };
|
2006-06-26 18:56:13 +07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void
|
2008-07-28 21:20:08 +07:00
|
|
|
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
|
|
|
union _cpuid4_leaf_ebx *ebx,
|
|
|
|
union _cpuid4_leaf_ecx *ecx)
|
2006-06-26 18:56:13 +07:00
|
|
|
{
|
|
|
|
unsigned dummy;
|
|
|
|
unsigned line_size, lines_per_tag, assoc, size_in_kb;
|
|
|
|
union l1_cache l1i, l1d;
|
|
|
|
union l2_cache l2;
|
2007-07-21 22:10:03 +07:00
|
|
|
union l3_cache l3;
|
|
|
|
union l1_cache *l1 = &l1d;
|
2006-06-26 18:56:13 +07:00
|
|
|
|
|
|
|
eax->full = 0;
|
|
|
|
ebx->full = 0;
|
|
|
|
ecx->full = 0;
|
|
|
|
|
|
|
|
cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
|
2007-07-21 22:10:03 +07:00
|
|
|
cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
|
2006-06-26 18:56:13 +07:00
|
|
|
|
2007-07-21 22:10:03 +07:00
|
|
|
switch (leaf) {
|
|
|
|
case 1:
|
|
|
|
l1 = &l1i;
|
|
|
|
case 0:
|
|
|
|
if (!l1->val)
|
|
|
|
return;
|
2009-09-03 14:41:19 +07:00
|
|
|
assoc = assocs[l1->assoc];
|
2006-06-26 18:56:13 +07:00
|
|
|
line_size = l1->line_size;
|
|
|
|
lines_per_tag = l1->lines_per_tag;
|
|
|
|
size_in_kb = l1->size_in_kb;
|
2007-07-21 22:10:03 +07:00
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (!l2.val)
|
|
|
|
return;
|
2009-09-03 14:41:19 +07:00
|
|
|
assoc = assocs[l2.assoc];
|
2006-06-26 18:56:13 +07:00
|
|
|
line_size = l2.line_size;
|
|
|
|
lines_per_tag = l2.lines_per_tag;
|
|
|
|
/* cpu_data has errata corrections for K7 applied */
|
2010-12-18 22:30:05 +07:00
|
|
|
size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
|
2007-07-21 22:10:03 +07:00
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
if (!l3.val)
|
|
|
|
return;
|
2009-09-03 14:41:19 +07:00
|
|
|
assoc = assocs[l3.assoc];
|
2007-07-21 22:10:03 +07:00
|
|
|
line_size = l3.line_size;
|
|
|
|
lines_per_tag = l3.lines_per_tag;
|
|
|
|
size_in_kb = l3.size_encoded * 512;
|
2009-09-03 14:41:19 +07:00
|
|
|
if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
|
|
|
|
size_in_kb = size_in_kb >> 1;
|
|
|
|
assoc = assoc >> 1;
|
|
|
|
}
|
2007-07-21 22:10:03 +07:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
2006-06-26 18:56:13 +07:00
|
|
|
}
|
|
|
|
|
2007-07-21 22:10:03 +07:00
|
|
|
eax->split.is_self_initializing = 1;
|
|
|
|
eax->split.type = types[leaf];
|
|
|
|
eax->split.level = levels[leaf];
|
2009-09-03 14:41:19 +07:00
|
|
|
eax->split.num_threads_sharing = 0;
|
2010-12-18 22:30:05 +07:00
|
|
|
eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
|
2007-07-21 22:10:03 +07:00
|
|
|
|
|
|
|
|
2009-09-03 14:41:19 +07:00
|
|
|
if (assoc == 0xffff)
|
2006-06-26 18:56:13 +07:00
|
|
|
eax->split.is_fully_associative = 1;
|
|
|
|
ebx->split.coherency_line_size = line_size - 1;
|
2009-09-03 14:41:19 +07:00
|
|
|
ebx->split.ways_of_associativity = assoc - 1;
|
2006-06-26 18:56:13 +07:00
|
|
|
ebx->split.physical_line_partition = lines_per_tag - 1;
|
|
|
|
ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
|
|
|
|
(ebx->split.ways_of_associativity + 1) - 1;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-02-19 01:37:14 +07:00
|
|
|
struct _cache_attr {
|
|
|
|
struct attribute attr;
|
2011-02-08 00:10:39 +07:00
|
|
|
ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
|
|
|
|
ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
|
|
|
|
unsigned int);
|
2010-02-19 01:37:14 +07:00
|
|
|
};
|
|
|
|
|
2013-02-04 16:13:15 +07:00
|
|
|
#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
|
2010-04-22 21:07:01 +07:00
|
|
|
/*
|
|
|
|
* L3 cache descriptors
|
|
|
|
*/
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void amd_calc_l3_indices(struct amd_northbridge *nb)
|
2010-01-22 22:01:07 +07:00
|
|
|
{
|
2011-07-24 16:46:09 +07:00
|
|
|
struct amd_l3_cache *l3 = &nb->l3_cache;
|
2010-01-22 22:01:07 +07:00
|
|
|
unsigned int sc0, sc1, sc2, sc3;
|
2010-02-19 01:37:14 +07:00
|
|
|
u32 val = 0;
|
2010-01-22 22:01:07 +07:00
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
pci_read_config_dword(nb->misc, 0x1C4, &val);
|
2010-01-22 22:01:07 +07:00
|
|
|
|
|
|
|
/* calculate subcache sizes */
|
2010-04-22 21:07:00 +07:00
|
|
|
l3->subcaches[0] = sc0 = !(val & BIT(0));
|
|
|
|
l3->subcaches[1] = sc1 = !(val & BIT(4));
|
2011-05-18 16:32:10 +07:00
|
|
|
|
|
|
|
if (boot_cpu_data.x86 == 0x15) {
|
|
|
|
l3->subcaches[0] = sc0 += !(val & BIT(1));
|
|
|
|
l3->subcaches[1] = sc1 += !(val & BIT(5));
|
|
|
|
}
|
|
|
|
|
2010-04-22 21:07:00 +07:00
|
|
|
l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
|
|
|
|
l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
|
|
|
|
|
2010-10-27 04:22:23 +07:00
|
|
|
l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
|
2010-04-22 21:07:01 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
|
2008-07-19 04:03:52 +07:00
|
|
|
{
|
2010-04-22 21:07:01 +07:00
|
|
|
int node;
|
|
|
|
|
2010-10-29 22:14:32 +07:00
|
|
|
/* only for L3, and not in virtualized environments */
|
2011-07-24 16:46:09 +07:00
|
|
|
if (index < 3)
|
2010-04-22 21:06:59 +07:00
|
|
|
return;
|
|
|
|
|
2010-04-22 21:07:01 +07:00
|
|
|
node = amd_get_nb_id(smp_processor_id());
|
2011-07-24 16:46:09 +07:00
|
|
|
this_leaf->nb = node_to_amd_nb(node);
|
|
|
|
if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
|
|
|
|
amd_calc_l3_indices(this_leaf->nb);
|
2008-07-19 04:03:52 +07:00
|
|
|
}
|
|
|
|
|
2010-06-02 23:18:40 +07:00
|
|
|
/*
|
|
|
|
* check whether a slot used for disabling an L3 index is occupied.
|
|
|
|
* @l3: L3 cache descriptor
|
|
|
|
* @slot: slot number (0..1)
|
|
|
|
*
|
|
|
|
* @returns: the disabled index if used or negative value if slot free.
|
|
|
|
*/
|
2011-07-24 16:46:09 +07:00
|
|
|
int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
|
2010-06-02 23:18:40 +07:00
|
|
|
{
|
|
|
|
unsigned int reg = 0;
|
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
|
2010-06-02 23:18:40 +07:00
|
|
|
|
|
|
|
/* check whether this slot is activated already */
|
|
|
|
if (reg & (3UL << 30))
|
|
|
|
return reg & 0xfff;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-02-19 01:37:14 +07:00
|
|
|
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
|
2010-04-22 21:07:02 +07:00
|
|
|
unsigned int slot)
|
2010-02-19 01:37:14 +07:00
|
|
|
{
|
2010-06-02 23:18:40 +07:00
|
|
|
int index;
|
2010-02-19 01:37:14 +07:00
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
2010-02-19 01:37:14 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
|
2010-06-02 23:18:40 +07:00
|
|
|
if (index >= 0)
|
|
|
|
return sprintf(buf, "%d\n", index);
|
2010-02-19 01:37:14 +07:00
|
|
|
|
2010-06-02 23:18:40 +07:00
|
|
|
return sprintf(buf, "FREE\n");
|
2010-02-19 01:37:14 +07:00
|
|
|
}
|
|
|
|
|
2010-04-22 21:07:02 +07:00
|
|
|
#define SHOW_CACHE_DISABLE(slot) \
|
2010-02-19 01:37:14 +07:00
|
|
|
static ssize_t \
|
2011-02-08 00:10:39 +07:00
|
|
|
show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
|
|
|
|
unsigned int cpu) \
|
2010-02-19 01:37:14 +07:00
|
|
|
{ \
|
2010-04-22 21:07:02 +07:00
|
|
|
return show_cache_disable(this_leaf, buf, slot); \
|
2010-02-19 01:37:14 +07:00
|
|
|
}
|
|
|
|
SHOW_CACHE_DISABLE(0)
|
|
|
|
SHOW_CACHE_DISABLE(1)
|
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
|
2010-04-22 21:07:02 +07:00
|
|
|
unsigned slot, unsigned long idx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
idx |= BIT(30);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* disable index in all 4 subcaches
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
u32 reg = idx | (i << 20);
|
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
if (!nb->l3_cache.subcaches[i])
|
2010-04-22 21:07:02 +07:00
|
|
|
continue;
|
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
|
2010-04-22 21:07:02 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to WBINVD on a core on the node containing the L3
|
|
|
|
* cache which indices we disable therefore a simple wbinvd()
|
|
|
|
* is not sufficient.
|
|
|
|
*/
|
|
|
|
wbinvd_on_cpu(cpu);
|
|
|
|
|
|
|
|
reg |= BIT(31);
|
2011-07-24 16:46:09 +07:00
|
|
|
pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
|
2010-04-22 21:07:02 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-02 23:18:40 +07:00
|
|
|
/*
|
|
|
|
* disable a L3 cache index by using a disable-slot
|
|
|
|
*
|
|
|
|
* @l3: L3 cache descriptor
|
|
|
|
* @cpu: A CPU on the node containing the L3 cache
|
|
|
|
* @slot: slot number (0..1)
|
|
|
|
* @index: index to disable
|
|
|
|
*
|
|
|
|
* @return: 0 on success, error status on failure
|
|
|
|
*/
|
2011-07-24 16:46:09 +07:00
|
|
|
int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
|
2010-06-02 23:18:40 +07:00
|
|
|
unsigned long index)
|
2010-02-19 01:37:14 +07:00
|
|
|
{
|
2010-06-02 23:18:40 +07:00
|
|
|
int ret = 0;
|
2010-02-19 01:37:14 +07:00
|
|
|
|
2011-05-16 20:39:47 +07:00
|
|
|
/* check if @slot is already used or the index is already disabled */
|
2011-07-24 16:46:09 +07:00
|
|
|
ret = amd_get_l3_disable_slot(nb, slot);
|
2010-06-02 23:18:40 +07:00
|
|
|
if (ret >= 0)
|
2012-04-19 17:35:08 +07:00
|
|
|
return -EEXIST;
|
2010-02-19 01:37:14 +07:00
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
if (index > nb->l3_cache.indices)
|
2010-06-02 23:18:40 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-05-16 20:39:47 +07:00
|
|
|
/* check whether the other slot has disabled the same index already */
|
2011-07-24 16:46:09 +07:00
|
|
|
if (index == amd_get_l3_disable_slot(nb, !slot))
|
2012-04-19 17:35:08 +07:00
|
|
|
return -EEXIST;
|
2010-06-02 23:18:40 +07:00
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
amd_l3_disable_index(nb, cpu, slot, index);
|
2010-06-02 23:18:40 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
|
|
|
const char *buf, size_t count,
|
|
|
|
unsigned int slot)
|
|
|
|
{
|
|
|
|
unsigned long val = 0;
|
|
|
|
int cpu, err = 0;
|
|
|
|
|
2010-02-19 01:37:14 +07:00
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
2010-02-19 01:37:14 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-06-02 23:18:40 +07:00
|
|
|
cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
|
2010-02-19 01:37:14 +07:00
|
|
|
|
2014-08-09 04:24:03 +07:00
|
|
|
if (kstrtoul(buf, 10, &val) < 0)
|
2010-02-19 01:37:14 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
|
2010-06-02 23:18:40 +07:00
|
|
|
if (err) {
|
|
|
|
if (err == -EEXIST)
|
2012-04-19 17:35:08 +07:00
|
|
|
pr_warning("L3 slot %d in use/index already disabled!\n",
|
|
|
|
slot);
|
2010-06-02 23:18:40 +07:00
|
|
|
return err;
|
|
|
|
}
|
2010-02-19 01:37:14 +07:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2010-04-22 21:07:02 +07:00
|
|
|
#define STORE_CACHE_DISABLE(slot) \
|
2010-02-19 01:37:14 +07:00
|
|
|
static ssize_t \
|
2010-04-22 21:07:02 +07:00
|
|
|
store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
|
2011-02-08 00:10:39 +07:00
|
|
|
const char *buf, size_t count, \
|
|
|
|
unsigned int cpu) \
|
2010-02-19 01:37:14 +07:00
|
|
|
{ \
|
2010-04-22 21:07:02 +07:00
|
|
|
return store_cache_disable(this_leaf, buf, count, slot); \
|
2008-07-19 04:03:52 +07:00
|
|
|
}
|
2010-02-19 01:37:14 +07:00
|
|
|
STORE_CACHE_DISABLE(0)
|
|
|
|
STORE_CACHE_DISABLE(1)
|
|
|
|
|
|
|
|
static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
|
|
|
|
show_cache_disable_0, store_cache_disable_0);
|
|
|
|
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
|
|
|
show_cache_disable_1, store_cache_disable_1);
|
|
|
|
|
2011-02-08 00:10:39 +07:00
|
|
|
static ssize_t
|
|
|
|
show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
|
|
|
|
{
|
2011-07-24 16:46:09 +07:00
|
|
|
if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
2011-02-08 00:10:39 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
|
|
|
|
unsigned int cpu)
|
|
|
|
{
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2011-07-24 16:46:09 +07:00
|
|
|
if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
2011-02-08 00:10:39 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2014-08-09 04:24:03 +07:00
|
|
|
if (kstrtoul(buf, 16, &val) < 0)
|
2011-02-08 00:10:39 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (amd_set_subcaches(cpu, val))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct _cache_attr subcaches =
|
|
|
|
__ATTR(subcaches, 0644, show_subcaches, store_subcaches);
|
|
|
|
|
2013-02-04 16:13:15 +07:00
|
|
|
#else
|
2010-10-29 22:14:32 +07:00
|
|
|
#define amd_init_l3_cache(x, y)
|
2013-02-04 16:13:15 +07:00
|
|
|
#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
|
2008-07-19 04:03:52 +07:00
|
|
|
|
2008-07-21 18:34:21 +07:00
|
|
|
static int
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-02-08 00:10:39 +07:00
|
|
|
union _cpuid4_leaf_eax eax;
|
|
|
|
union _cpuid4_leaf_ebx ebx;
|
|
|
|
union _cpuid4_leaf_ecx ecx;
|
2006-06-26 18:56:13 +07:00
|
|
|
unsigned edx;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-19 04:03:52 +07:00
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
2012-10-19 16:00:49 +07:00
|
|
|
if (cpu_has_topoext)
|
|
|
|
cpuid_count(0x8000001d, index, &eax.full,
|
|
|
|
&ebx.full, &ecx.full, &edx);
|
|
|
|
else
|
|
|
|
amd_cpuid4(index, &eax, &ebx, &ecx);
|
2010-10-29 22:14:32 +07:00
|
|
|
amd_init_l3_cache(this_leaf, index);
|
2008-07-21 18:34:21 +07:00
|
|
|
} else {
|
|
|
|
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
|
|
|
|
}
|
|
|
|
|
2006-06-26 18:56:13 +07:00
|
|
|
if (eax.split.type == CACHE_TYPE_NULL)
|
2005-07-29 11:15:46 +07:00
|
|
|
return -EIO; /* better error ? */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-26 18:56:13 +07:00
|
|
|
this_leaf->eax = eax;
|
|
|
|
this_leaf->ebx = ebx;
|
|
|
|
this_leaf->ecx = ecx;
|
2008-07-21 18:34:21 +07:00
|
|
|
this_leaf->size = (ecx.split.number_of_sets + 1) *
|
|
|
|
(ebx.split.coherency_line_size + 1) *
|
|
|
|
(ebx.split.physical_line_partition + 1) *
|
|
|
|
(ebx.split.ways_of_associativity + 1);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int find_num_cache_leaves(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2012-10-19 15:59:33 +07:00
|
|
|
unsigned int eax, ebx, ecx, edx, op;
|
2005-04-17 05:20:36 +07:00
|
|
|
union _cpuid4_leaf_eax cache_eax;
|
2005-10-31 05:59:30 +07:00
|
|
|
int i = -1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-10-19 15:59:33 +07:00
|
|
|
if (c->x86_vendor == X86_VENDOR_AMD)
|
|
|
|
op = 0x8000001d;
|
|
|
|
else
|
|
|
|
op = 4;
|
|
|
|
|
2005-10-31 05:59:30 +07:00
|
|
|
do {
|
|
|
|
++i;
|
2012-10-19 15:59:33 +07:00
|
|
|
/* Do cpuid(op) loop to find out num_cache_leaves */
|
|
|
|
cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
|
2005-04-17 05:20:36 +07:00
|
|
|
cache_eax.full = eax;
|
2005-10-31 05:59:30 +07:00
|
|
|
} while (cache_eax.split.type != CACHE_TYPE_NULL);
|
|
|
|
return i;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
2012-10-19 15:59:33 +07:00
|
|
|
{
|
|
|
|
|
|
|
|
if (cpu_has_topoext) {
|
|
|
|
num_cache_leaves = find_num_cache_leaves(c);
|
|
|
|
} else if (c->extended_cpuid_level >= 0x80000006) {
|
|
|
|
if (cpuid_edx(0x80000006) & 0xf000)
|
|
|
|
num_cache_leaves = 4;
|
|
|
|
else
|
|
|
|
num_cache_leaves = 3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2009-07-04 06:35:45 +07:00
|
|
|
/* Cache sizes */
|
|
|
|
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
|
|
|
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
2006-03-27 16:15:22 +07:00
|
|
|
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
2006-06-27 16:53:49 +07:00
|
|
|
#ifdef CONFIG_X86_HT
|
2007-10-20 01:35:04 +07:00
|
|
|
unsigned int cpu = c->cpu_index;
|
2006-03-27 16:15:22 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-23 17:59:52 +07:00
|
|
|
if (c->cpuid_level > 3) {
|
2005-04-17 05:20:36 +07:00
|
|
|
static int is_initialized;
|
|
|
|
|
|
|
|
if (is_initialized == 0) {
|
|
|
|
/* Init num_cache_leaves from boot CPU */
|
2012-10-19 15:59:33 +07:00
|
|
|
num_cache_leaves = find_num_cache_leaves(c);
|
2005-04-17 05:20:36 +07:00
|
|
|
is_initialized++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Whenever possible use cpuid(4), deterministic cache
|
|
|
|
* parameters cpuid leaf to find the cache details
|
|
|
|
*/
|
|
|
|
for (i = 0; i < num_cache_leaves; i++) {
|
2013-06-08 23:48:15 +07:00
|
|
|
struct _cpuid4_info_regs this_leaf = {};
|
2005-04-17 05:20:36 +07:00
|
|
|
int retval;
|
|
|
|
|
2009-01-11 12:58:10 +07:00
|
|
|
retval = cpuid4_cache_lookup_regs(i, &this_leaf);
|
2013-06-08 23:48:15 +07:00
|
|
|
if (retval < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (this_leaf.eax.split.level) {
|
|
|
|
case 1:
|
|
|
|
if (this_leaf.eax.split.type == CACHE_TYPE_DATA)
|
|
|
|
new_l1d = this_leaf.size/1024;
|
|
|
|
else if (this_leaf.eax.split.type == CACHE_TYPE_INST)
|
|
|
|
new_l1i = this_leaf.size/1024;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
new_l2 = this_leaf.size/1024;
|
|
|
|
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
|
|
|
index_msb = get_count_order(num_threads_sharing);
|
|
|
|
l2_id = c->apicid & ~((1 << index_msb) - 1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
new_l3 = this_leaf.size/1024;
|
|
|
|
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
|
|
|
index_msb = get_count_order(num_threads_sharing);
|
|
|
|
l3_id = c->apicid & ~((1 << index_msb) - 1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-03-27 16:15:24 +07:00
|
|
|
/*
|
|
|
|
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
|
|
|
|
* trace cache
|
|
|
|
*/
|
|
|
|
if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* supports eax=2 call */
|
2008-02-01 04:05:43 +07:00
|
|
|
int j, n;
|
|
|
|
unsigned int regs[4];
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned char *dp = (unsigned char *)regs;
|
2006-03-27 16:15:24 +07:00
|
|
|
int only_trace = 0;
|
|
|
|
|
|
|
|
if (num_cache_leaves != 0 && c->x86 == 15)
|
|
|
|
only_trace = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Number of times to iterate */
|
|
|
|
n = cpuid_eax(2) & 0xFF;
|
|
|
|
|
2009-07-04 06:35:45 +07:00
|
|
|
for (i = 0 ; i < n ; i++) {
|
2005-04-17 05:20:36 +07:00
|
|
|
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
|
|
|
|
|
|
|
/* If bit 31 is set, this is an unknown format */
|
2009-07-04 06:35:45 +07:00
|
|
|
for (j = 0 ; j < 3 ; j++)
|
|
|
|
if (regs[j] & (1 << 31))
|
|
|
|
regs[j] = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Byte 0 is level count, not a descriptor */
|
2009-07-04 06:35:45 +07:00
|
|
|
for (j = 1 ; j < 16 ; j++) {
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned char des = dp[j];
|
|
|
|
unsigned char k = 0;
|
|
|
|
|
|
|
|
/* look up this descriptor in the table */
|
2009-07-04 06:35:45 +07:00
|
|
|
while (cache_table[k].descriptor != 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (cache_table[k].descriptor == des) {
|
2006-03-27 16:15:24 +07:00
|
|
|
if (only_trace && cache_table[k].cache_type != LVL_TRACE)
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
switch (cache_table[k].cache_type) {
|
|
|
|
case LVL_1_INST:
|
|
|
|
l1i += cache_table[k].size;
|
|
|
|
break;
|
|
|
|
case LVL_1_DATA:
|
|
|
|
l1d += cache_table[k].size;
|
|
|
|
break;
|
|
|
|
case LVL_2:
|
|
|
|
l2 += cache_table[k].size;
|
|
|
|
break;
|
|
|
|
case LVL_3:
|
|
|
|
l3 += cache_table[k].size;
|
|
|
|
break;
|
|
|
|
case LVL_TRACE:
|
|
|
|
trace += cache_table[k].size;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
k++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-03-27 16:15:24 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-27 16:15:24 +07:00
|
|
|
if (new_l1d)
|
|
|
|
l1d = new_l1d;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-27 16:15:24 +07:00
|
|
|
if (new_l1i)
|
|
|
|
l1i = new_l1i;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-27 16:15:24 +07:00
|
|
|
if (new_l2) {
|
|
|
|
l2 = new_l2;
|
2006-06-27 16:53:49 +07:00
|
|
|
#ifdef CONFIG_X86_HT
|
2007-10-20 01:35:03 +07:00
|
|
|
per_cpu(cpu_llc_id, cpu) = l2_id;
|
2006-03-27 16:15:22 +07:00
|
|
|
#endif
|
2006-03-27 16:15:24 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-27 16:15:24 +07:00
|
|
|
if (new_l3) {
|
|
|
|
l3 = new_l3;
|
2006-06-27 16:53:49 +07:00
|
|
|
#ifdef CONFIG_X86_HT
|
2007-10-20 01:35:03 +07:00
|
|
|
per_cpu(cpu_llc_id, cpu) = l3_id;
|
2006-03-27 16:15:22 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-07-22 20:35:14 +07:00
|
|
|
#ifdef CONFIG_X86_HT
|
|
|
|
/*
|
|
|
|
* If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
|
|
|
|
* turns means that the only possibility is SMT (as indicated in
|
|
|
|
* cpuid1). Since cpuid2 doesn't specify shared caches, and we know
|
|
|
|
* that SMT shares all caches, we can unconditionally set cpu_llc_id to
|
|
|
|
* c->phys_proc_id.
|
|
|
|
*/
|
|
|
|
if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
|
|
|
|
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
|
|
|
|
#endif
|
|
|
|
|
2006-03-27 16:15:24 +07:00
|
|
|
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return l2;
|
|
|
|
}
|
|
|
|
|
2008-10-19 02:24:45 +07:00
|
|
|
#ifdef CONFIG_SYSFS
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* pointer to _cpuid4_info array (for each cache leaf) */
|
2009-10-29 20:34:14 +07:00
|
|
|
static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
|
|
|
|
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2012-02-09 02:52:29 +07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2012-02-09 02:52:29 +07:00
|
|
|
struct _cpuid4_info *this_leaf;
|
2012-10-19 16:02:09 +07:00
|
|
|
int i, sibling;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-10-19 16:02:09 +07:00
|
|
|
if (cpu_has_topoext) {
|
|
|
|
unsigned int apicid, nshared, first, last;
|
|
|
|
|
|
|
|
if (!per_cpu(ici_cpuid4_info, cpu))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
this_leaf = CPUID4_INFO_IDX(cpu, index);
|
|
|
|
nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
|
|
|
|
apicid = cpu_data(cpu).apicid;
|
|
|
|
first = apicid - (apicid % nshared);
|
|
|
|
last = first + nshared - 1;
|
|
|
|
|
|
|
|
for_each_online_cpu(i) {
|
|
|
|
apicid = cpu_data(i).apicid;
|
|
|
|
if ((apicid < first) || (apicid > last))
|
|
|
|
continue;
|
2009-10-29 20:34:14 +07:00
|
|
|
if (!per_cpu(ici_cpuid4_info, i))
|
2009-09-03 14:41:19 +07:00
|
|
|
continue;
|
|
|
|
this_leaf = CPUID4_INFO_IDX(i, index);
|
2012-10-19 16:02:09 +07:00
|
|
|
|
|
|
|
for_each_online_cpu(sibling) {
|
|
|
|
apicid = cpu_data(sibling).apicid;
|
|
|
|
if ((apicid < first) || (apicid > last))
|
2009-12-10 01:36:45 +07:00
|
|
|
continue;
|
|
|
|
set_bit(sibling, this_leaf->shared_cpu_map);
|
|
|
|
}
|
2009-09-03 14:41:19 +07:00
|
|
|
}
|
2012-10-19 16:02:09 +07:00
|
|
|
} else if (index == 3) {
|
|
|
|
for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
|
2012-02-09 02:52:29 +07:00
|
|
|
if (!per_cpu(ici_cpuid4_info, i))
|
|
|
|
continue;
|
|
|
|
this_leaf = CPUID4_INFO_IDX(i, index);
|
2012-10-19 16:02:09 +07:00
|
|
|
for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
|
2012-02-09 02:52:29 +07:00
|
|
|
if (!cpu_online(sibling))
|
|
|
|
continue;
|
|
|
|
set_bit(sibling, this_leaf->shared_cpu_map);
|
|
|
|
}
|
|
|
|
}
|
2012-10-19 16:02:09 +07:00
|
|
|
} else
|
|
|
|
return 0;
|
2012-02-09 02:52:29 +07:00
|
|
|
|
2012-10-19 16:02:09 +07:00
|
|
|
return 1;
|
2012-02-09 02:52:29 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
2012-02-09 02:52:29 +07:00
|
|
|
{
|
|
|
|
struct _cpuid4_info *this_leaf, *sibling_leaf;
|
|
|
|
unsigned long num_threads_sharing;
|
|
|
|
int index_msb, i;
|
|
|
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
|
|
|
|
|
|
|
if (c->x86_vendor == X86_VENDOR_AMD) {
|
|
|
|
if (cache_shared_amd_cpu_map_setup(cpu, index))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
this_leaf = CPUID4_INFO_IDX(cpu, index);
|
2011-07-24 16:46:08 +07:00
|
|
|
num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (num_threads_sharing == 1)
|
2009-01-11 12:58:10 +07:00
|
|
|
cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
|
2005-11-05 23:25:54 +07:00
|
|
|
else {
|
|
|
|
index_msb = get_count_order(num_threads_sharing);
|
|
|
|
|
|
|
|
for_each_online_cpu(i) {
|
2007-10-20 01:35:04 +07:00
|
|
|
if (cpu_data(i).apicid >> index_msb ==
|
|
|
|
c->apicid >> index_msb) {
|
2009-01-11 12:58:10 +07:00
|
|
|
cpumask_set_cpu(i,
|
|
|
|
to_cpumask(this_leaf->shared_cpu_map));
|
2009-10-29 20:34:14 +07:00
|
|
|
if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
|
2009-01-11 12:58:10 +07:00
|
|
|
sibling_leaf =
|
|
|
|
CPUID4_INFO_IDX(i, index);
|
|
|
|
cpumask_set_cpu(cpu, to_cpumask(
|
|
|
|
sibling_leaf->shared_cpu_map));
|
2005-11-05 23:25:54 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
2005-11-05 23:25:54 +07:00
|
|
|
{
|
|
|
|
struct _cpuid4_info *this_leaf, *sibling_leaf;
|
|
|
|
int sibling;
|
|
|
|
|
|
|
|
this_leaf = CPUID4_INFO_IDX(cpu, index);
|
2009-01-11 12:58:10 +07:00
|
|
|
for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
|
2008-07-28 21:20:08 +07:00
|
|
|
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
|
2009-01-11 12:58:10 +07:00
|
|
|
cpumask_clear_cpu(cpu,
|
|
|
|
to_cpumask(sibling_leaf->shared_cpu_map));
|
2005-11-05 23:25:54 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
#else
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
2009-07-04 06:35:45 +07:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
2009-07-04 06:35:45 +07:00
|
|
|
{
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void free_cache_attributes(unsigned int cpu)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2007-10-18 17:05:16 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num_cache_leaves; i++)
|
|
|
|
cache_remove_shared_cpu_map(cpu, i);
|
|
|
|
|
2009-10-29 20:34:14 +07:00
|
|
|
kfree(per_cpu(ici_cpuid4_info, cpu));
|
|
|
|
per_cpu(ici_cpuid4_info, cpu) = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void get_cpu_leaves(void *_retval)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-12-17 08:34:03 +07:00
|
|
|
int j, *retval = _retval, cpu = smp_processor_id();
|
2005-07-29 11:15:46 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Do cpuid and store the results */
|
|
|
|
for (j = 0; j < num_cache_leaves; j++) {
|
2011-07-24 16:46:08 +07:00
|
|
|
struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
|
|
|
|
|
|
|
|
*retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
|
2008-12-17 08:34:03 +07:00
|
|
|
if (unlikely(*retval < 0)) {
|
2007-10-18 17:05:16 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < j; i++)
|
|
|
|
cache_remove_shared_cpu_map(cpu, i);
|
2005-07-29 11:15:46 +07:00
|
|
|
break;
|
2007-10-18 17:05:16 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
cache_shared_cpu_map_setup(cpu, j);
|
|
|
|
}
|
2008-12-17 08:34:03 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int detect_cache_attributes(unsigned int cpu)
|
2008-12-17 08:34:03 +07:00
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (num_cache_leaves == 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2009-10-29 20:34:14 +07:00
|
|
|
per_cpu(ici_cpuid4_info, cpu) = kzalloc(
|
2008-12-17 08:34:03 +07:00
|
|
|
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
|
2009-10-29 20:34:14 +07:00
|
|
|
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
|
2008-12-17 08:34:03 +07:00
|
|
|
return -ENOMEM;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-12-17 08:34:03 +07:00
|
|
|
smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
|
2007-10-18 17:05:16 +07:00
|
|
|
if (retval) {
|
2009-10-29 20:34:14 +07:00
|
|
|
kfree(per_cpu(ici_cpuid4_info, cpu));
|
|
|
|
per_cpu(ici_cpuid4_info, cpu) = NULL;
|
2007-10-18 17:05:16 +07:00
|
|
|
}
|
|
|
|
|
2005-07-29 11:15:46 +07:00
|
|
|
return retval;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#include <linux/kobject.h>
|
|
|
|
#include <linux/sysfs.h>
|
2011-12-22 05:29:42 +07:00
|
|
|
#include <linux/cpu.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* pointer to kobject for cpuX/cache */
|
2009-10-29 20:34:14 +07:00
|
|
|
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
struct _index_kobject {
|
|
|
|
struct kobject kobj;
|
|
|
|
unsigned int cpu;
|
|
|
|
unsigned short index;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* pointer to array of kobjects for cpuX/cache/indexY */
|
2009-10-29 20:34:14 +07:00
|
|
|
static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
|
|
|
|
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define show_one_plus(file_name, object, val) \
|
2011-02-08 00:10:39 +07:00
|
|
|
static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
|
|
|
|
unsigned int cpu) \
|
2005-04-17 05:20:36 +07:00
|
|
|
{ \
|
2009-07-04 06:35:45 +07:00
|
|
|
return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2011-07-24 16:46:08 +07:00
|
|
|
show_one_plus(level, base.eax.split.level, 0);
|
|
|
|
show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
|
|
|
|
show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
|
|
|
|
show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
|
|
|
|
show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-02-08 00:10:39 +07:00
|
|
|
static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
|
|
|
|
unsigned int cpu)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-07-24 16:46:08 +07:00
|
|
|
return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-04-09 01:43:02 +07:00
|
|
|
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
|
|
|
|
int type, char *buf)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2015-02-14 05:37:12 +07:00
|
|
|
const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (type)
|
|
|
|
ret = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
|
|
|
|
cpumask_pr_args(mask));
|
|
|
|
else
|
|
|
|
ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb",
|
|
|
|
cpumask_pr_args(mask));
|
|
|
|
buf[ret++] = '\n';
|
|
|
|
buf[ret] = '\0';
|
|
|
|
return ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2011-02-08 00:10:39 +07:00
|
|
|
static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
|
|
|
|
unsigned int cpu)
|
2008-04-09 01:43:02 +07:00
|
|
|
{
|
|
|
|
return show_shared_cpu_map_func(leaf, 0, buf);
|
|
|
|
}
|
|
|
|
|
2011-02-08 00:10:39 +07:00
|
|
|
static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
|
|
|
|
unsigned int cpu)
|
2008-04-09 01:43:02 +07:00
|
|
|
{
|
|
|
|
return show_shared_cpu_map_func(leaf, 1, buf);
|
|
|
|
}
|
|
|
|
|
2011-02-08 00:10:39 +07:00
|
|
|
static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
|
|
|
|
unsigned int cpu)
|
2008-11-30 04:33:16 +07:00
|
|
|
{
|
2011-07-24 16:46:08 +07:00
|
|
|
switch (this_leaf->base.eax.split.type) {
|
2008-11-30 04:33:16 +07:00
|
|
|
case CACHE_TYPE_DATA:
|
2005-04-17 05:20:36 +07:00
|
|
|
return sprintf(buf, "Data\n");
|
2008-11-30 04:33:16 +07:00
|
|
|
case CACHE_TYPE_INST:
|
2005-04-17 05:20:36 +07:00
|
|
|
return sprintf(buf, "Instruction\n");
|
2008-11-30 04:33:16 +07:00
|
|
|
case CACHE_TYPE_UNIFIED:
|
2005-04-17 05:20:36 +07:00
|
|
|
return sprintf(buf, "Unified\n");
|
2008-11-30 04:33:16 +07:00
|
|
|
default:
|
2005-04-17 05:20:36 +07:00
|
|
|
return sprintf(buf, "Unknown\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-21 18:34:21 +07:00
|
|
|
#define to_object(k) container_of(k, struct _index_kobject, kobj)
|
|
|
|
#define to_attr(a) container_of(a, struct _cache_attr, attr)
|
2008-07-19 04:03:52 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#define define_one_ro(_name) \
|
|
|
|
static struct _cache_attr _name = \
|
|
|
|
__ATTR(_name, 0444, show_##_name, NULL)
|
|
|
|
|
|
|
|
define_one_ro(level);
|
|
|
|
define_one_ro(type);
|
|
|
|
define_one_ro(coherency_line_size);
|
|
|
|
define_one_ro(physical_line_partition);
|
|
|
|
define_one_ro(ways_of_associativity);
|
|
|
|
define_one_ro(number_of_sets);
|
|
|
|
define_one_ro(size);
|
|
|
|
define_one_ro(shared_cpu_map);
|
2008-04-09 01:43:02 +07:00
|
|
|
define_one_ro(shared_cpu_list);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-07-04 06:35:45 +07:00
|
|
|
static struct attribute *default_attrs[] = {
|
2010-10-29 22:14:32 +07:00
|
|
|
&type.attr,
|
|
|
|
&level.attr,
|
|
|
|
&coherency_line_size.attr,
|
|
|
|
&physical_line_partition.attr,
|
|
|
|
&ways_of_associativity.attr,
|
|
|
|
&number_of_sets.attr,
|
|
|
|
&size.attr,
|
|
|
|
&shared_cpu_map.attr,
|
|
|
|
&shared_cpu_list.attr,
|
2010-01-22 22:01:06 +07:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2010-09-17 23:03:43 +07:00
|
|
|
#ifdef CONFIG_AMD_NB
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static struct attribute **amd_l3_attrs(void)
|
2010-10-29 22:14:32 +07:00
|
|
|
{
|
|
|
|
static struct attribute **attrs;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (attrs)
|
|
|
|
return attrs;
|
|
|
|
|
2012-10-02 15:34:09 +07:00
|
|
|
n = ARRAY_SIZE(default_attrs);
|
2010-10-29 22:14:32 +07:00
|
|
|
|
|
|
|
if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
|
|
|
n += 2;
|
|
|
|
|
2011-02-08 00:10:39 +07:00
|
|
|
if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
|
|
n += 1;
|
|
|
|
|
2010-10-29 22:14:32 +07:00
|
|
|
attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
|
|
|
|
if (attrs == NULL)
|
|
|
|
return attrs = default_attrs;
|
|
|
|
|
|
|
|
for (n = 0; default_attrs[n]; n++)
|
|
|
|
attrs[n] = default_attrs[n];
|
|
|
|
|
|
|
|
if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
|
|
|
|
attrs[n++] = &cache_disable_0.attr;
|
|
|
|
attrs[n++] = &cache_disable_1.attr;
|
|
|
|
}
|
|
|
|
|
2011-02-08 00:10:39 +07:00
|
|
|
if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
|
|
|
attrs[n++] = &subcaches.attr;
|
|
|
|
|
2010-10-29 22:14:32 +07:00
|
|
|
return attrs;
|
|
|
|
}
|
2010-02-19 01:37:14 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-07-04 06:35:45 +07:00
|
|
|
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct _cache_attr *fattr = to_attr(attr);
|
|
|
|
struct _index_kobject *this_leaf = to_object(kobj);
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
ret = fattr->show ?
|
|
|
|
fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
|
2011-02-08 00:10:39 +07:00
|
|
|
buf, this_leaf->cpu) :
|
2008-07-28 21:20:08 +07:00
|
|
|
0;
|
2005-04-17 05:20:36 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-07-04 06:35:45 +07:00
|
|
|
static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-07-19 04:03:52 +07:00
|
|
|
struct _cache_attr *fattr = to_attr(attr);
|
|
|
|
struct _index_kobject *this_leaf = to_object(kobj);
|
|
|
|
ssize_t ret;
|
|
|
|
|
2008-07-28 21:20:08 +07:00
|
|
|
ret = fattr->store ?
|
|
|
|
fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
|
2011-02-08 00:10:39 +07:00
|
|
|
buf, count, this_leaf->cpu) :
|
2008-07-19 04:03:52 +07:00
|
|
|
0;
|
|
|
|
return ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2010-01-19 08:58:23 +07:00
|
|
|
static const struct sysfs_ops sysfs_ops = {
|
2005-04-17 05:20:36 +07:00
|
|
|
.show = show,
|
|
|
|
.store = store,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kobj_type ktype_cache = {
|
|
|
|
.sysfs_ops = &sysfs_ops,
|
|
|
|
.default_attrs = default_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kobj_type ktype_percpu_entry = {
|
|
|
|
.sysfs_ops = &sysfs_ops,
|
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void cpuid4_cache_sysfs_exit(unsigned int cpu)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2009-10-29 20:34:14 +07:00
|
|
|
kfree(per_cpu(ici_cache_kobject, cpu));
|
|
|
|
kfree(per_cpu(ici_index_kobject, cpu));
|
|
|
|
per_cpu(ici_cache_kobject, cpu) = NULL;
|
|
|
|
per_cpu(ici_index_kobject, cpu) = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
free_cache_attributes(cpu);
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int cpuid4_cache_sysfs_init(unsigned int cpu)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2007-10-18 17:05:16 +07:00
|
|
|
int err;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (num_cache_leaves == 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2007-10-18 17:05:16 +07:00
|
|
|
err = detect_cache_attributes(cpu);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Allocate all required memory */
|
2009-10-29 20:34:14 +07:00
|
|
|
per_cpu(ici_cache_kobject, cpu) =
|
2008-03-26 05:06:56 +07:00
|
|
|
kzalloc(sizeof(struct kobject), GFP_KERNEL);
|
2009-10-29 20:34:14 +07:00
|
|
|
if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto err_out;
|
|
|
|
|
2009-10-29 20:34:14 +07:00
|
|
|
per_cpu(ici_index_kobject, cpu) = kzalloc(
|
2009-07-04 06:35:45 +07:00
|
|
|
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
|
2009-10-29 20:34:14 +07:00
|
|
|
if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
cpuid4_cache_sysfs_exit(cpu);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2009-01-11 12:58:10 +07:00
|
|
|
static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
|
2007-10-18 17:05:16 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Add/Remove cache interface for CPU device */
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int cache_add_dev(struct device *dev)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-12-22 05:29:42 +07:00
|
|
|
unsigned int cpu = dev->id;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long i, j;
|
|
|
|
struct _index_kobject *this_object;
|
2010-01-22 22:01:06 +07:00
|
|
|
struct _cpuid4_info *this_leaf;
|
2007-10-18 17:05:16 +07:00
|
|
|
int retval;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
retval = cpuid4_cache_sysfs_init(cpu);
|
|
|
|
if (unlikely(retval < 0))
|
|
|
|
return retval;
|
|
|
|
|
2009-10-29 20:34:14 +07:00
|
|
|
retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
|
2008-03-26 05:06:56 +07:00
|
|
|
&ktype_percpu_entry,
|
2011-12-22 05:29:42 +07:00
|
|
|
&dev->kobj, "%s", "cache");
|
2007-10-18 17:05:16 +07:00
|
|
|
if (retval < 0) {
|
|
|
|
cpuid4_cache_sysfs_exit(cpu);
|
|
|
|
return retval;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
for (i = 0; i < num_cache_leaves; i++) {
|
2009-07-04 06:35:45 +07:00
|
|
|
this_object = INDEX_KOBJECT_PTR(cpu, i);
|
2005-04-17 05:20:36 +07:00
|
|
|
this_object->cpu = cpu;
|
|
|
|
this_object->index = i;
|
2010-01-22 22:01:06 +07:00
|
|
|
|
|
|
|
this_leaf = CPUID4_INFO_IDX(cpu, i);
|
|
|
|
|
2010-10-29 22:14:32 +07:00
|
|
|
ktype_cache.default_attrs = default_attrs;
|
|
|
|
#ifdef CONFIG_AMD_NB
|
2011-07-24 16:46:09 +07:00
|
|
|
if (this_leaf->base.nb)
|
2010-10-29 22:14:32 +07:00
|
|
|
ktype_cache.default_attrs = amd_l3_attrs();
|
|
|
|
#endif
|
2007-12-18 02:54:39 +07:00
|
|
|
retval = kobject_init_and_add(&(this_object->kobj),
|
2008-03-26 05:06:56 +07:00
|
|
|
&ktype_cache,
|
2009-10-29 20:34:14 +07:00
|
|
|
per_cpu(ici_cache_kobject, cpu),
|
2007-12-18 02:54:39 +07:00
|
|
|
"index%1lu", i);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (unlikely(retval)) {
|
2009-07-04 06:35:45 +07:00
|
|
|
for (j = 0; j < i; j++)
|
|
|
|
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
|
2009-10-29 20:34:14 +07:00
|
|
|
kobject_put(per_cpu(ici_cache_kobject, cpu));
|
2005-04-17 05:20:36 +07:00
|
|
|
cpuid4_cache_sysfs_exit(cpu);
|
2008-07-15 15:09:03 +07:00
|
|
|
return retval;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2007-12-18 02:54:39 +07:00
|
|
|
kobject_uevent(&(this_object->kobj), KOBJ_ADD);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2009-01-11 12:58:10 +07:00
|
|
|
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
|
2007-10-18 17:05:16 +07:00
|
|
|
|
2009-10-29 20:34:14 +07:00
|
|
|
kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
|
2008-07-15 15:09:03 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void cache_remove_dev(struct device *dev)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-12-22 05:29:42 +07:00
|
|
|
unsigned int cpu = dev->id;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long i;
|
|
|
|
|
2009-10-29 20:34:14 +07:00
|
|
|
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
|
2007-09-11 19:02:11 +07:00
|
|
|
return;
|
2009-01-11 12:58:10 +07:00
|
|
|
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
|
2007-10-18 17:05:16 +07:00
|
|
|
return;
|
2009-01-11 12:58:10 +07:00
|
|
|
cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
|
2007-10-18 17:05:16 +07:00
|
|
|
|
|
|
|
for (i = 0; i < num_cache_leaves; i++)
|
2009-07-04 06:35:45 +07:00
|
|
|
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
|
2009-10-29 20:34:14 +07:00
|
|
|
kobject_put(per_cpu(ici_cache_kobject, cpu));
|
2005-04-17 05:20:36 +07:00
|
|
|
cpuid4_cache_sysfs_exit(cpu);
|
2005-10-31 05:59:50 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int cacheinfo_cpu_callback(struct notifier_block *nfb,
|
|
|
|
unsigned long action, void *hcpu)
|
2005-10-31 05:59:50 +07:00
|
|
|
{
|
|
|
|
unsigned int cpu = (unsigned long)hcpu;
|
2011-12-22 05:29:42 +07:00
|
|
|
struct device *dev;
|
2005-10-31 05:59:50 +07:00
|
|
|
|
2011-12-22 05:29:42 +07:00
|
|
|
dev = get_cpu_device(cpu);
|
2005-10-31 05:59:50 +07:00
|
|
|
switch (action) {
|
|
|
|
case CPU_ONLINE:
|
2007-05-09 16:35:10 +07:00
|
|
|
case CPU_ONLINE_FROZEN:
|
2011-12-22 05:29:42 +07:00
|
|
|
cache_add_dev(dev);
|
2005-10-31 05:59:50 +07:00
|
|
|
break;
|
|
|
|
case CPU_DEAD:
|
2007-05-09 16:35:10 +07:00
|
|
|
case CPU_DEAD_FROZEN:
|
2011-12-22 05:29:42 +07:00
|
|
|
cache_remove_dev(dev);
|
2005-10-31 05:59:50 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return NOTIFY_OK;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static struct notifier_block cacheinfo_cpu_notifier = {
|
2007-10-18 17:05:16 +07:00
|
|
|
.notifier_call = cacheinfo_cpu_callback,
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2012-11-23 23:33:05 +07:00
|
|
|
static int __init cache_sysfs_init(void)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2014-03-11 03:37:57 +07:00
|
|
|
int i, err = 0;
|
2005-10-31 05:59:50 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (num_cache_leaves == 0)
|
|
|
|
return 0;
|
|
|
|
|
2014-03-11 03:37:57 +07:00
|
|
|
cpu_notifier_register_begin();
|
2005-10-31 05:59:50 +07:00
|
|
|
for_each_online_cpu(i) {
|
2011-12-22 05:29:42 +07:00
|
|
|
struct device *dev = get_cpu_device(i);
|
2007-10-17 23:04:40 +07:00
|
|
|
|
2011-12-22 05:29:42 +07:00
|
|
|
err = cache_add_dev(dev);
|
2007-10-18 17:05:16 +07:00
|
|
|
if (err)
|
2014-03-11 03:37:57 +07:00
|
|
|
goto out;
|
2005-10-31 05:59:50 +07:00
|
|
|
}
|
2014-03-11 03:37:57 +07:00
|
|
|
__register_hotcpu_notifier(&cacheinfo_cpu_notifier);
|
|
|
|
|
|
|
|
out:
|
|
|
|
cpu_notifier_register_done();
|
|
|
|
return err;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2005-10-31 05:59:50 +07:00
|
|
|
device_initcall(cache_sysfs_init);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#endif
|