2018-06-23 05:08:41 +07:00
|
|
|
/* cpu_feature_enabled() cannot be used this early */
|
|
|
|
#define USE_EARLY_PGTABLE_L5
|
|
|
|
|
2008-09-05 10:09:00 +07:00
|
|
|
#include <linux/bootmem.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <linux/linkage.h>
|
2008-09-05 10:09:00 +07:00
|
|
|
#include <linux/bitops.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <linux/kernel.h>
|
2016-07-14 07:18:56 +07:00
|
|
|
#include <linux/export.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/string.h>
|
x86/cpu: Trim model ID whitespace
We did try trimming whitespace surrounding the 'model name'
field in /proc/cpuinfo since reportedly some userspace uses it
in string comparisons and there were discrepancies:
[thetango@prarit ~]# grep "^model name" /proc/cpuinfo | uniq -c | sed 's/\ /_/g'
______1_model_name :_AMD_Opteron(TM)_Processor_6272
_____63_model_name :_AMD_Opteron(TM)_Processor_6272_________________
However, there were issues with overlapping buffers, string
sizes and non-byte-sized copies in the previous proposed
solutions; see Link tags below for the whole farce.
So, instead of diddling with this more, let's simply extend what
was there originally with trimming any present trailing
whitespace. Final result is really simple and obvious.
Testing with the most insane model IDs qemu can generate, looks
good:
.model_id = " My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = "My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " My funny model ID CPU",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " ",
______4_model_name :__
.model_id = "",
______4_model_name :_15/02
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1432050210-32036-1-git-send-email-prarit@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-06-01 17:06:57 +07:00
|
|
|
#include <linux/ctype.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/delay.h>
|
2017-02-02 01:08:20 +07:00
|
|
|
#include <linux/sched/mm.h>
|
2017-02-01 22:36:40 +07:00
|
|
|
#include <linux/sched/clock.h>
|
2017-02-04 07:20:53 +07:00
|
|
|
#include <linux/sched/task.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <linux/init.h>
|
2014-04-17 15:17:12 +07:00
|
|
|
#include <linux/kprobes.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <linux/kgdb.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/smp.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <linux/io.h>
|
2015-07-21 04:47:58 +07:00
|
|
|
#include <linux/syscore_ops.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
|
|
|
|
#include <asm/stackprotector.h>
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 17:02:48 +07:00
|
|
|
#include <asm/perf_event.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/mmu_context.h>
|
2011-08-01 04:02:19 +07:00
|
|
|
#include <asm/archrandom.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
#include <asm/processor.h>
|
2014-10-25 05:58:08 +07:00
|
|
|
#include <asm/tlbflush.h>
|
2012-01-21 04:24:09 +07:00
|
|
|
#include <asm/debugreg.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <asm/sections.h>
|
2014-05-06 02:19:36 +07:00
|
|
|
#include <asm/vsyscall.h>
|
2009-07-04 06:35:45 +07:00
|
|
|
#include <linux/topology.h>
|
|
|
|
#include <linux/cpumask.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <asm/pgtable.h>
|
2011-07-27 06:09:06 +07:00
|
|
|
#include <linux/atomic.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <asm/proto.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/apic.h>
|
|
|
|
#include <asm/desc.h>
|
2015-04-24 07:54:44 +07:00
|
|
|
#include <asm/fpu/internal.h>
|
2006-06-23 16:04:18 +07:00
|
|
|
#include <asm/mtrr.h>
|
2017-01-20 20:22:34 +07:00
|
|
|
#include <asm/hwcap2.h>
|
2009-07-04 06:35:45 +07:00
|
|
|
#include <linux/numa.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <asm/asm.h>
|
x86/mm/mpx: Work around MPX erratum SKD046
This erratum essentially causes the CPU to forget which privilege
level it is operating on (kernel vs. user) for the purposes of MPX.
This erratum can only be triggered when a system is not using
Supervisor Mode Execution Prevention (SMEP). Our workaround for
the erratum is to ensure that MPX can only be used in cases where
SMEP is present in the processor and is enabled.
This erratum only affects Core processors. Atom is unaffected.
But, there is no architectural way to determine Atom vs. Core.
So, we just apply this workaround to all processors. It's
possible that it will mistakenly disable MPX on some Atom
processsors or future unaffected Core processors. There are
currently no processors that have MPX and not SMEP. It would
take something akin to a hypervisor masking SMEP out on an Atom
processor for this to present itself on current hardware.
More details can be found at:
http://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/desktop-6th-gen-core-family-spec-update.pdf
"
SKD046 Branch Instructions May Initialize MPX Bound Registers Incorrectly
Problem:
Depending on the current Intel MPX (Memory Protection
Extensions) configuration, execution of certain branch
instructions (near CALL, near RET, near JMP, and Jcc
instructions) without a BND prefix (F2H) initialize the MPX bound
registers. Due to this erratum, such a branch instruction that is
executed both with CPL = 3 and with CPL < 3 may not use the
correct MPX configuration register (BNDCFGU or BNDCFGS,
respectively) for determining whether to initialize the bound
registers; it may thus initialize the bound registers when it
should not, or fail to initialize them when it should.
Implication:
A branch instruction that has executed both in user mode and in
supervisor mode (from the same linear address) may cause a #BR
(bound range fault) when it should not have or may not cause a
#BR when it should have. Workaround An operating system can
avoid this erratum by setting CR4.SMEP[bit 20] to enable
supervisor-mode execution prevention (SMEP). When SMEP is
enabled, no code can be executed both with CPL = 3 and with CPL < 3.
"
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20160512220400.3B35F1BC@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-05-13 05:04:00 +07:00
|
|
|
#include <asm/bugs.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <asm/cpu.h>
|
2006-06-23 16:04:20 +07:00
|
|
|
#include <asm/mce.h>
|
2009-03-14 12:49:49 +07:00
|
|
|
#include <asm/msr.h>
|
2008-05-08 14:18:43 +07:00
|
|
|
#include <asm/pat.h>
|
2012-12-21 14:44:23 +07:00
|
|
|
#include <asm/microcode.h>
|
|
|
|
#include <asm/microcode_intel.h>
|
2018-01-25 23:14:13 +07:00
|
|
|
#include <asm/intel-family.h>
|
|
|
|
#include <asm/cpu_device_id.h>
|
2009-02-17 20:02:01 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
2009-01-21 15:26:06 +07:00
|
|
|
#include <asm/uv/uv.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "cpu.h"
|
|
|
|
|
2017-01-20 20:22:34 +07:00
|
|
|
u32 elf_hwcap2 __read_mostly;
|
|
|
|
|
2009-01-04 20:18:03 +07:00
|
|
|
/* all of these masks are initialized in setup_cpu_local_masks() */
|
|
|
|
cpumask_var_t cpu_initialized_mask;
|
2009-03-14 12:49:49 +07:00
|
|
|
cpumask_var_t cpu_callout_mask;
|
|
|
|
cpumask_var_t cpu_callin_mask;
|
2009-01-04 20:18:03 +07:00
|
|
|
|
|
|
|
/* representing cpus for which sibling maps can be computed */
|
|
|
|
cpumask_var_t cpu_sibling_setup_mask;
|
|
|
|
|
2018-04-28 04:34:34 +07:00
|
|
|
/* Number of siblings per CPU package */
|
|
|
|
int smp_num_siblings = 1;
|
|
|
|
EXPORT_SYMBOL(smp_num_siblings);
|
|
|
|
|
|
|
|
/* Last level cache ID of each logical CPU */
|
|
|
|
DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
|
|
|
|
|
2009-01-27 10:56:47 +07:00
|
|
|
/* correctly size the local cpu masks */
|
2009-01-27 18:03:24 +07:00
|
|
|
void __init setup_cpu_local_masks(void)
|
2009-01-27 10:56:47 +07:00
|
|
|
{
|
|
|
|
alloc_bootmem_cpumask_var(&cpu_initialized_mask);
|
|
|
|
alloc_bootmem_cpumask_var(&cpu_callin_mask);
|
|
|
|
alloc_bootmem_cpumask_var(&cpu_callout_mask);
|
|
|
|
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void default_init(struct cpuinfo_x86 *c)
|
2009-08-12 01:00:11 +07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_64
|
2009-11-21 20:01:45 +07:00
|
|
|
cpu_detect_cache_sizes(c);
|
2009-08-12 01:00:11 +07:00
|
|
|
#else
|
|
|
|
/* Not much we can do here... */
|
|
|
|
/* Check if at least it has cpuid */
|
|
|
|
if (c->cpuid_level == -1) {
|
|
|
|
/* No cpuid. It must be an ancient CPU */
|
|
|
|
if (c->x86 == 4)
|
|
|
|
strcpy(c->x86_model_id, "486");
|
|
|
|
else if (c->x86 == 3)
|
|
|
|
strcpy(c->x86_model_id, "386");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static const struct cpu_dev default_cpu = {
|
2009-08-12 01:00:11 +07:00
|
|
|
.c_init = default_init,
|
|
|
|
.c_vendor = "Unknown",
|
|
|
|
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static const struct cpu_dev *this_cpu = &default_cpu;
|
2008-09-05 02:09:47 +07:00
|
|
|
|
2009-01-21 15:26:05 +07:00
|
|
|
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
2008-09-05 10:09:01 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2009-01-21 15:26:05 +07:00
|
|
|
/*
|
|
|
|
* We need valid kernel segments for data and code in long mode too
|
|
|
|
* IRET will check the segment types kkeil 2000/10/28
|
|
|
|
* Also sysret mandates a special GDT layout
|
|
|
|
*
|
2009-03-14 12:49:49 +07:00
|
|
|
* TLS descriptors are currently at a different place compared to i386.
|
2009-01-21 15:26:05 +07:00
|
|
|
* Hopefully nobody expects them at a fixed place (Wine?)
|
|
|
|
*/
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
|
2008-09-05 10:09:01 +07:00
|
|
|
#else
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
|
2007-05-03 00:27:10 +07:00
|
|
|
/*
|
|
|
|
* Segments used for calling PnP BIOS have byte granularity.
|
|
|
|
* They code segments and data segments have fixed 64k limits,
|
|
|
|
* the transfer segment sizes are set at run time.
|
|
|
|
*/
|
2008-01-30 19:31:11 +07:00
|
|
|
/* 32-bit code */
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
|
2008-01-30 19:31:11 +07:00
|
|
|
/* 16-bit code */
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
|
2008-01-30 19:31:11 +07:00
|
|
|
/* 16-bit data */
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
|
2008-01-30 19:31:11 +07:00
|
|
|
/* 16-bit data */
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
|
2008-01-30 19:31:11 +07:00
|
|
|
/* 16-bit data */
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
|
2007-05-03 00:27:10 +07:00
|
|
|
/*
|
|
|
|
* The APM segments have byte granularity and their bases
|
|
|
|
* are set at run time. All have 64k limits.
|
|
|
|
*/
|
2008-01-30 19:31:11 +07:00
|
|
|
/* 32-bit code */
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
|
2007-05-03 00:27:10 +07:00
|
|
|
/* 16-bit code */
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
|
2008-01-30 19:31:11 +07:00
|
|
|
/* data */
|
2009-08-03 13:47:07 +07:00
|
|
|
[GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
|
2007-05-03 00:27:10 +07:00
|
|
|
|
2009-07-18 22:12:20 +07:00
|
|
|
[GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
2009-02-09 20:17:40 +07:00
|
|
|
GDT_STACK_CANARY_INIT
|
2008-09-05 10:09:01 +07:00
|
|
|
#endif
|
2009-01-21 15:26:05 +07:00
|
|
|
} };
|
2007-05-03 00:27:15 +07:00
|
|
|
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
2007-05-03 00:27:10 +07:00
|
|
|
|
2015-06-08 01:37:02 +07:00
|
|
|
static int __init x86_mpx_setup(char *s)
|
2009-05-23 02:17:45 +07:00
|
|
|
{
|
2015-06-08 01:37:02 +07:00
|
|
|
/* require an exact match without trailing characters */
|
2014-11-12 05:01:33 +07:00
|
|
|
if (strlen(s))
|
|
|
|
return 0;
|
2009-05-23 02:17:45 +07:00
|
|
|
|
2015-06-08 01:37:02 +07:00
|
|
|
/* do not emit a message if the feature is not present */
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_MPX))
|
|
|
|
return 1;
|
2010-07-20 06:05:52 +07:00
|
|
|
|
2015-06-08 01:37:02 +07:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_MPX);
|
|
|
|
pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
|
2014-05-30 01:12:31 +07:00
|
|
|
return 1;
|
|
|
|
}
|
2015-06-08 01:37:02 +07:00
|
|
|
__setup("nompx", x86_mpx_setup);
|
2014-05-30 01:12:31 +07:00
|
|
|
|
2017-06-29 22:53:20 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2017-09-11 07:48:27 +07:00
|
|
|
static int __init x86_nopcid_setup(char *s)
|
2017-06-29 22:53:20 +07:00
|
|
|
{
|
2017-09-11 07:48:27 +07:00
|
|
|
/* nopcid doesn't accept parameters */
|
|
|
|
if (s)
|
|
|
|
return -EINVAL;
|
2017-06-29 22:53:20 +07:00
|
|
|
|
|
|
|
/* do not emit a message if the feature is not present */
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_PCID))
|
2017-09-11 07:48:27 +07:00
|
|
|
return 0;
|
2017-06-29 22:53:20 +07:00
|
|
|
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_PCID);
|
|
|
|
pr_info("nopcid: PCID feature disabled\n");
|
2017-09-11 07:48:27 +07:00
|
|
|
return 0;
|
2017-06-29 22:53:20 +07:00
|
|
|
}
|
2017-09-11 07:48:27 +07:00
|
|
|
early_param("nopcid", x86_nopcid_setup);
|
2017-06-29 22:53:20 +07:00
|
|
|
#endif
|
|
|
|
|
2016-01-30 02:42:58 +07:00
|
|
|
static int __init x86_noinvpcid_setup(char *s)
|
|
|
|
{
|
|
|
|
/* noinvpcid doesn't accept parameters */
|
|
|
|
if (s)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* do not emit a message if the feature is not present */
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_INVPCID))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_INVPCID);
|
|
|
|
pr_info("noinvpcid: INVPCID feature disabled\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("noinvpcid", x86_noinvpcid_setup);
|
|
|
|
|
2008-09-05 10:09:02 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static int cachesize_override = -1;
|
|
|
|
static int disable_x86_serial_nr = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:47 +07:00
|
|
|
static int __init cachesize_setup(char *str)
|
|
|
|
{
|
|
|
|
get_option(&str, &cachesize_override);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("cachesize=", cachesize_setup);
|
|
|
|
|
|
|
|
static int __init x86_sep_setup(char *s)
|
|
|
|
{
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_SEP);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("nosep", x86_sep_setup);
|
|
|
|
|
|
|
|
/* Standard macro to see if a specific flag is changeable */
|
|
|
|
static inline int flag_is_changeable_p(u32 flag)
|
|
|
|
{
|
|
|
|
u32 f1, f2;
|
|
|
|
|
2008-10-01 04:17:51 +07:00
|
|
|
/*
|
|
|
|
* Cyrix and IDT cpus allow disabling of CPUID
|
|
|
|
* so the code below may return different results
|
|
|
|
* when it is executed before and after enabling
|
|
|
|
* the CPUID. Add "volatile" to not allow gcc to
|
|
|
|
* optimize the subsequent calls to this function.
|
|
|
|
*/
|
2009-03-14 14:46:17 +07:00
|
|
|
asm volatile ("pushfl \n\t"
|
|
|
|
"pushfl \n\t"
|
|
|
|
"popl %0 \n\t"
|
|
|
|
"movl %0, %1 \n\t"
|
|
|
|
"xorl %2, %0 \n\t"
|
|
|
|
"pushl %0 \n\t"
|
|
|
|
"popfl \n\t"
|
|
|
|
"pushfl \n\t"
|
|
|
|
"popl %0 \n\t"
|
|
|
|
"popfl \n\t"
|
|
|
|
|
2008-10-01 04:17:51 +07:00
|
|
|
: "=&r" (f1), "=&r" (f2)
|
|
|
|
: "ir" (flag));
|
2008-09-05 02:09:47 +07:00
|
|
|
|
|
|
|
return ((f1^f2) & flag) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Probe for the CPUID instruction */
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
int have_cpuid_p(void)
|
2008-09-05 02:09:47 +07:00
|
|
|
{
|
|
|
|
return flag_is_changeable_p(X86_EFLAGS_ID);
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
2008-09-05 02:09:47 +07:00
|
|
|
{
|
2009-03-14 14:46:17 +07:00
|
|
|
unsigned long lo, hi;
|
|
|
|
|
|
|
|
if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Disable processor serial number: */
|
|
|
|
|
|
|
|
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
|
|
|
lo |= 0x200000;
|
|
|
|
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
|
|
|
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_notice("CPU serial number disabled.\n");
|
2009-03-14 14:46:17 +07:00
|
|
|
clear_cpu_cap(c, X86_FEATURE_PN);
|
|
|
|
|
|
|
|
/* Disabling the serial number may affect the cpuid level */
|
|
|
|
c->cpuid_level = cpuid_eax(0);
|
2008-09-05 02:09:47 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init x86_serial_nr_setup(char *s)
|
|
|
|
{
|
|
|
|
disable_x86_serial_nr = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("serialnumber", x86_serial_nr_setup);
|
2008-09-05 10:09:02 +07:00
|
|
|
#else
|
2008-09-05 10:09:13 +07:00
|
|
|
static inline int flag_is_changeable_p(u32 flag)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
}
|
2008-09-05 10:09:02 +07:00
|
|
|
#endif
|
2008-09-05 02:09:47 +07:00
|
|
|
|
2011-05-12 06:51:05 +07:00
|
|
|
static __init int setup_disable_smep(char *arg)
|
|
|
|
{
|
2012-09-27 08:02:28 +07:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_SMEP);
|
x86/mm/mpx: Work around MPX erratum SKD046
This erratum essentially causes the CPU to forget which privilege
level it is operating on (kernel vs. user) for the purposes of MPX.
This erratum can only be triggered when a system is not using
Supervisor Mode Execution Prevention (SMEP). Our workaround for
the erratum is to ensure that MPX can only be used in cases where
SMEP is present in the processor and is enabled.
This erratum only affects Core processors. Atom is unaffected.
But, there is no architectural way to determine Atom vs. Core.
So, we just apply this workaround to all processors. It's
possible that it will mistakenly disable MPX on some Atom
processsors or future unaffected Core processors. There are
currently no processors that have MPX and not SMEP. It would
take something akin to a hypervisor masking SMEP out on an Atom
processor for this to present itself on current hardware.
More details can be found at:
http://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/desktop-6th-gen-core-family-spec-update.pdf
"
SKD046 Branch Instructions May Initialize MPX Bound Registers Incorrectly
Problem:
Depending on the current Intel MPX (Memory Protection
Extensions) configuration, execution of certain branch
instructions (near CALL, near RET, near JMP, and Jcc
instructions) without a BND prefix (F2H) initialize the MPX bound
registers. Due to this erratum, such a branch instruction that is
executed both with CPL = 3 and with CPL < 3 may not use the
correct MPX configuration register (BNDCFGU or BNDCFGS,
respectively) for determining whether to initialize the bound
registers; it may thus initialize the bound registers when it
should not, or fail to initialize them when it should.
Implication:
A branch instruction that has executed both in user mode and in
supervisor mode (from the same linear address) may cause a #BR
(bound range fault) when it should not have or may not cause a
#BR when it should have. Workaround An operating system can
avoid this erratum by setting CR4.SMEP[bit 20] to enable
supervisor-mode execution prevention (SMEP). When SMEP is
enabled, no code can be executed both with CPL = 3 and with CPL < 3.
"
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20160512220400.3B35F1BC@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-05-13 05:04:00 +07:00
|
|
|
/* Check for things that depend on SMEP being enabled: */
|
|
|
|
check_mpx_erratum(&boot_cpu_data);
|
2011-05-12 06:51:05 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("nosmep", setup_disable_smep);
|
|
|
|
|
2012-09-27 08:02:28 +07:00
|
|
|
static __always_inline void setup_smep(struct cpuinfo_x86 *c)
|
2011-05-12 06:51:05 +07:00
|
|
|
{
|
2012-09-27 08:02:28 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_SMEP))
|
2014-10-25 05:58:07 +07:00
|
|
|
cr4_set_bits(X86_CR4_SMEP);
|
2011-05-12 06:51:05 +07:00
|
|
|
}
|
|
|
|
|
2012-09-22 02:43:13 +07:00
|
|
|
static __init int setup_disable_smap(char *arg)
|
|
|
|
{
|
2012-09-27 08:02:28 +07:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_SMAP);
|
2012-09-22 02:43:13 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("nosmap", setup_disable_smap);
|
|
|
|
|
2012-09-27 08:02:28 +07:00
|
|
|
static __always_inline void setup_smap(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2015-06-03 16:31:14 +07:00
|
|
|
unsigned long eflags = native_save_fl();
|
2012-09-27 08:02:28 +07:00
|
|
|
|
|
|
|
/* This should have been cleared long ago */
|
|
|
|
BUG_ON(eflags & X86_EFLAGS_AC);
|
|
|
|
|
2014-02-13 22:34:30 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_SMAP)) {
|
|
|
|
#ifdef CONFIG_X86_SMAP
|
2014-10-25 05:58:07 +07:00
|
|
|
cr4_set_bits(X86_CR4_SMAP);
|
2014-02-13 22:34:30 +07:00
|
|
|
#else
|
2014-10-25 05:58:07 +07:00
|
|
|
cr4_clear_bits(X86_CR4_SMAP);
|
2014-02-13 22:34:30 +07:00
|
|
|
#endif
|
|
|
|
}
|
2011-05-12 06:51:05 +07:00
|
|
|
}
|
|
|
|
|
2017-11-06 09:27:54 +07:00
|
|
|
static __always_inline void setup_umip(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
/* Check the boot processor, plus build option for UMIP. */
|
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_UMIP))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Check the current processor's cpuid bits. */
|
|
|
|
if (!cpu_has(c, X86_FEATURE_UMIP))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cr4_set_bits(X86_CR4_UMIP);
|
|
|
|
|
2017-11-14 13:29:43 +07:00
|
|
|
pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
|
|
|
|
|
2017-11-06 09:27:54 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
out:
|
|
|
|
/*
|
|
|
|
* Make sure UMIP is disabled in case it was enabled in a
|
|
|
|
* previous boot (e.g., via kexec).
|
|
|
|
*/
|
|
|
|
cr4_clear_bits(X86_CR4_UMIP);
|
|
|
|
}
|
|
|
|
|
2016-02-13 04:02:29 +07:00
|
|
|
/*
|
|
|
|
* Protection Keys are not available in 32-bit mode.
|
|
|
|
*/
|
|
|
|
static bool pku_disabled;
|
|
|
|
|
|
|
|
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2016-05-14 05:13:28 +07:00
|
|
|
/* check the boot processor, plus compile options for PKU: */
|
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_PKU))
|
|
|
|
return;
|
|
|
|
/* checks the actual processor's cpuid bits: */
|
2016-02-13 04:02:29 +07:00
|
|
|
if (!cpu_has(c, X86_FEATURE_PKU))
|
|
|
|
return;
|
|
|
|
if (pku_disabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cr4_set_bits(X86_CR4_PKE);
|
|
|
|
/*
|
|
|
|
* Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
|
|
|
|
* cpuid bit to be set. We need to ensure that we
|
|
|
|
* update that bit in this CPU's "cpu_info".
|
|
|
|
*/
|
|
|
|
get_cpu_cap(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
|
|
|
static __init int setup_disable_pku(char *arg)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Do not clear the X86_FEATURE_PKU bit. All of the
|
|
|
|
* runtime checks are against OSPKE so clearing the
|
|
|
|
* bit does nothing.
|
|
|
|
*
|
|
|
|
* This way, we will see "pku" in cpuinfo, but not
|
|
|
|
* "ospke", which is exactly what we want. It shows
|
|
|
|
* that the CPU has PKU, but the OS has not enabled it.
|
|
|
|
* This happens to be exactly how a system would look
|
|
|
|
* if we disabled the config option.
|
|
|
|
*/
|
|
|
|
pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
|
|
|
|
pku_disabled = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("nopku", setup_disable_pku);
|
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
|
2009-01-24 08:20:50 +07:00
|
|
|
/*
|
|
|
|
* Some CPU features depend on higher CPUID levels, which may not always
|
|
|
|
* be available due to CPUID level capping or broken virtualization
|
|
|
|
* software. Add those features to this table to auto-disable them.
|
|
|
|
*/
|
|
|
|
struct cpuid_dependent_feature {
|
|
|
|
u32 feature;
|
|
|
|
u32 level;
|
|
|
|
};
|
2009-03-14 14:46:17 +07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static const struct cpuid_dependent_feature
|
2009-01-24 08:20:50 +07:00
|
|
|
cpuid_dependent_features[] = {
|
|
|
|
{ X86_FEATURE_MWAIT, 0x00000005 },
|
|
|
|
{ X86_FEATURE_DCA, 0x00000009 },
|
|
|
|
{ X86_FEATURE_XSAVE, 0x0000000d },
|
|
|
|
{ 0, 0 }
|
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
|
2009-01-24 08:20:50 +07:00
|
|
|
{
|
|
|
|
const struct cpuid_dependent_feature *df;
|
2009-03-14 12:49:49 +07:00
|
|
|
|
2009-01-24 08:20:50 +07:00
|
|
|
for (df = cpuid_dependent_features; df->feature; df++) {
|
2009-03-14 14:46:17 +07:00
|
|
|
|
|
|
|
if (!cpu_has(c, df->feature))
|
|
|
|
continue;
|
2009-01-24 08:20:50 +07:00
|
|
|
/*
|
|
|
|
* Note: cpuid_level is set to -1 if unavailable, but
|
|
|
|
* extended_extended_level is set to 0 if unavailable
|
|
|
|
* and the legitimate extended levels are all negative
|
|
|
|
* when signed; hence the weird messing around with
|
|
|
|
* signs here...
|
|
|
|
*/
|
2009-03-14 14:46:17 +07:00
|
|
|
if (!((s32)df->level < 0 ?
|
2009-02-15 14:59:18 +07:00
|
|
|
(u32)df->level > (u32)c->extended_cpuid_level :
|
2009-03-14 14:46:17 +07:00
|
|
|
(s32)df->level > (s32)c->cpuid_level))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
clear_cpu_cap(c, df->feature);
|
|
|
|
if (!warn)
|
|
|
|
continue;
|
|
|
|
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
|
|
|
|
x86_cap_flag(df->feature), df->level);
|
2009-01-24 08:20:50 +07:00
|
|
|
}
|
2009-02-15 14:59:18 +07:00
|
|
|
}
|
2009-01-24 08:20:50 +07:00
|
|
|
|
2008-09-05 10:09:13 +07:00
|
|
|
/*
|
|
|
|
* Naming convention should be: <Name> [(<Codename>)]
|
|
|
|
* This table only is used unless init_<vendor>() below doesn't set it;
|
2009-03-14 14:46:17 +07:00
|
|
|
* in particular, if CPUID levels 0x80000002..4 are supported, this
|
|
|
|
* isn't used
|
2008-09-05 10:09:13 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Look up CPU names by table lookup. */
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static const char *table_lookup_model(struct cpuinfo_x86 *c)
|
2008-09-05 10:09:13 +07:00
|
|
|
{
|
2013-10-21 15:35:20 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
const struct legacy_cpu_model_info *info;
|
2008-09-05 10:09:13 +07:00
|
|
|
|
|
|
|
if (c->x86_model >= 16)
|
|
|
|
return NULL; /* Range check */
|
|
|
|
|
|
|
|
if (!this_cpu)
|
|
|
|
return NULL;
|
|
|
|
|
2013-10-21 15:35:20 +07:00
|
|
|
info = this_cpu->legacy_models;
|
2008-09-05 10:09:13 +07:00
|
|
|
|
2013-10-21 15:35:20 +07:00
|
|
|
while (info->family) {
|
2008-09-05 10:09:13 +07:00
|
|
|
if (info->family == c->x86)
|
|
|
|
return info->model_names[c->x86_model];
|
|
|
|
info++;
|
|
|
|
}
|
2013-10-21 15:35:20 +07:00
|
|
|
#endif
|
2008-09-05 10:09:13 +07:00
|
|
|
return NULL; /* Not found */
|
|
|
|
}
|
|
|
|
|
2017-12-04 21:07:32 +07:00
|
|
|
__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
|
|
|
|
__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
|
2008-01-30 19:33:20 +07:00
|
|
|
|
2009-01-30 15:47:54 +07:00
|
|
|
void load_percpu_segment(int cpu)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
loadsegment(fs, __KERNEL_PERCPU);
|
|
|
|
#else
|
2016-04-27 02:23:26 +07:00
|
|
|
__loadsegment_simple(gs, 0);
|
2018-03-14 00:48:05 +07:00
|
|
|
wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
|
2009-01-30 15:47:54 +07:00
|
|
|
#endif
|
2009-02-09 20:17:40 +07:00
|
|
|
load_stack_canary_segment();
|
2009-01-30 15:47:54 +07:00
|
|
|
}
|
|
|
|
|
2017-12-04 21:07:20 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/* The 32-bit entry code needs to find cpu_entry_area. */
|
|
|
|
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
|
|
|
#endif
|
|
|
|
|
2017-12-04 21:07:26 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/*
|
|
|
|
* Special IST stacks which the CPU switches to when it calls
|
|
|
|
* an IST-marked descriptor entry. Up to 7 stacks (hardware
|
|
|
|
* limit), all of them are 4K, except the debug stack which
|
|
|
|
* is 8K.
|
|
|
|
*/
|
|
|
|
static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
|
|
|
|
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
|
|
|
|
[DEBUG_STACK - 1] = DEBUG_STKSZ
|
|
|
|
};
|
2017-03-15 00:05:08 +07:00
|
|
|
#endif
|
2017-03-15 00:05:07 +07:00
|
|
|
|
2017-03-15 00:05:08 +07:00
|
|
|
/* Load the original GDT from the per-cpu structure */
|
|
|
|
void load_direct_gdt(int cpu)
|
|
|
|
{
|
|
|
|
struct desc_ptr gdt_descr;
|
|
|
|
|
|
|
|
gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
|
|
|
|
gdt_descr.size = GDT_SIZE - 1;
|
|
|
|
load_gdt(&gdt_descr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(load_direct_gdt);
|
|
|
|
|
2017-03-15 00:05:07 +07:00
|
|
|
/* Load a fixmap remapping of the per-cpu GDT */
|
|
|
|
void load_fixmap_gdt(int cpu)
|
|
|
|
{
|
|
|
|
struct desc_ptr gdt_descr;
|
|
|
|
|
|
|
|
gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
|
|
|
|
gdt_descr.size = GDT_SIZE - 1;
|
|
|
|
load_gdt(&gdt_descr);
|
|
|
|
}
|
2017-03-15 00:05:08 +07:00
|
|
|
EXPORT_SYMBOL_GPL(load_fixmap_gdt);
|
2017-03-15 00:05:07 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
/*
|
|
|
|
* Current gdt points %fs at the "master" per-cpu area: after this,
|
|
|
|
* it's on the real one.
|
|
|
|
*/
|
2009-01-30 15:47:53 +07:00
|
|
|
void switch_to_new_gdt(int cpu)
|
2008-09-05 02:09:44 +07:00
|
|
|
{
|
2017-03-15 00:05:08 +07:00
|
|
|
/* Load the original GDT */
|
|
|
|
load_direct_gdt(cpu);
|
2009-01-27 10:56:48 +07:00
|
|
|
/* Reload the per-cpu base */
|
2009-01-30 15:47:54 +07:00
|
|
|
load_percpu_segment(cpu);
|
2008-09-05 02:09:44 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
|
2005-04-17 05:20:36 +07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void get_model_name(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
unsigned int *v;
|
x86/cpu: Trim model ID whitespace
We did try trimming whitespace surrounding the 'model name'
field in /proc/cpuinfo since reportedly some userspace uses it
in string comparisons and there were discrepancies:
[thetango@prarit ~]# grep "^model name" /proc/cpuinfo | uniq -c | sed 's/\ /_/g'
______1_model_name :_AMD_Opteron(TM)_Processor_6272
_____63_model_name :_AMD_Opteron(TM)_Processor_6272_________________
However, there were issues with overlapping buffers, string
sizes and non-byte-sized copies in the previous proposed
solutions; see Link tags below for the whole farce.
So, instead of diddling with this more, let's simply extend what
was there originally with trimming any present trailing
whitespace. Final result is really simple and obvious.
Testing with the most insane model IDs qemu can generate, looks
good:
.model_id = " My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = "My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " My funny model ID CPU",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " ",
______4_model_name :__
.model_id = "",
______4_model_name :_15/02
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1432050210-32036-1-git-send-email-prarit@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-06-01 17:06:57 +07:00
|
|
|
char *p, *q, *s;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
if (c->extended_cpuid_level < 0x80000004)
|
2008-09-06 15:52:27 +07:00
|
|
|
return;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
v = (unsigned int *)c->x86_model_id;
|
2005-04-17 05:20:36 +07:00
|
|
|
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
|
|
|
|
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
|
|
|
|
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
|
|
|
|
c->x86_model_id[48] = 0;
|
|
|
|
|
x86/cpu: Trim model ID whitespace
We did try trimming whitespace surrounding the 'model name'
field in /proc/cpuinfo since reportedly some userspace uses it
in string comparisons and there were discrepancies:
[thetango@prarit ~]# grep "^model name" /proc/cpuinfo | uniq -c | sed 's/\ /_/g'
______1_model_name :_AMD_Opteron(TM)_Processor_6272
_____63_model_name :_AMD_Opteron(TM)_Processor_6272_________________
However, there were issues with overlapping buffers, string
sizes and non-byte-sized copies in the previous proposed
solutions; see Link tags below for the whole farce.
So, instead of diddling with this more, let's simply extend what
was there originally with trimming any present trailing
whitespace. Final result is really simple and obvious.
Testing with the most insane model IDs qemu can generate, looks
good:
.model_id = " My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = "My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " My funny model ID CPU",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " ",
______4_model_name :__
.model_id = "",
______4_model_name :_15/02
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1432050210-32036-1-git-send-email-prarit@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-06-01 17:06:57 +07:00
|
|
|
/* Trim whitespace */
|
|
|
|
p = q = s = &c->x86_model_id[0];
|
|
|
|
|
|
|
|
while (*p == ' ')
|
|
|
|
p++;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
/* Note the last non-whitespace index */
|
|
|
|
if (!isspace(*p))
|
|
|
|
s = q;
|
|
|
|
|
|
|
|
*q++ = *p++;
|
|
|
|
}
|
|
|
|
|
|
|
|
*(s + 1) = '\0';
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2018-05-13 16:43:53 +07:00
|
|
|
void detect_num_cpu_cores(struct cpuinfo_x86 *c)
|
2018-05-03 09:32:44 +07:00
|
|
|
{
|
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
|
2018-05-13 16:43:53 +07:00
|
|
|
c->x86_max_cores = 1;
|
2018-05-03 09:32:44 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
|
2018-05-13 16:43:53 +07:00
|
|
|
return;
|
2018-05-03 09:32:44 +07:00
|
|
|
|
|
|
|
cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
|
|
|
|
if (eax & 0x1f)
|
2018-05-13 16:43:53 +07:00
|
|
|
c->x86_max_cores = (eax >> 26) + 1;
|
2018-05-03 09:32:44 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-09-05 02:09:44 +07:00
|
|
|
unsigned int n, dummy, ebx, ecx, edx, l2size;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
n = c->extended_cpuid_level;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (n >= 0x80000005) {
|
2008-09-05 02:09:44 +07:00
|
|
|
cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
|
|
|
|
c->x86_cache_size = (ecx>>24) + (edx>>24);
|
2008-09-05 10:09:07 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/* On K8 L1 TLB is inclusive, so don't count it */
|
|
|
|
c->x86_tlbsize = 0;
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (n < 0x80000006) /* Some chips just has a large L1. */
|
|
|
|
return;
|
|
|
|
|
2008-09-05 02:09:47 +07:00
|
|
|
cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
|
2005-04-17 05:20:36 +07:00
|
|
|
l2size = ecx >> 16;
|
2008-02-24 17:58:13 +07:00
|
|
|
|
2008-09-05 10:09:07 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
|
|
|
|
#else
|
2005-04-17 05:20:36 +07:00
|
|
|
/* do processor-specific cache resizing */
|
2013-10-21 15:35:20 +07:00
|
|
|
if (this_cpu->legacy_cache_size)
|
|
|
|
l2size = this_cpu->legacy_cache_size(c, l2size);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Allow user to override all this if necessary. */
|
|
|
|
if (cachesize_override != -1)
|
|
|
|
l2size = cachesize_override;
|
|
|
|
|
2008-02-24 17:58:13 +07:00
|
|
|
if (l2size == 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
return; /* Again, no L2 cache is possible */
|
2008-09-05 10:09:07 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
c->x86_cache_size = l2size;
|
|
|
|
}
|
|
|
|
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 08:02:16 +07:00
|
|
|
u16 __read_mostly tlb_lli_4k[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lli_2m[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lli_4m[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lld_4k[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lld_2m[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lld_4m[NR_INFO];
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 19:16:58 +07:00
|
|
|
u16 __read_mostly tlb_lld_1g[NR_INFO];
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 08:02:16 +07:00
|
|
|
|
x86, CPU: Fix trivial printk formatting issues with dmesg
dmesg (from util-linux) currently has two methods for reading the kernel
message ring buffer: /dev/kmsg and syslog(2). Since kernel 3.5.0 kmsg
has been the default, which escapes control characters (e.g. new lines)
before they are shown.
This change means that when dmesg is using /dev/kmsg, a 2 line printk
makes the output messy, because the second line does not get a
timestamp.
For example:
[ 0.012863] CPU0: Thermal monitoring enabled (TM1)
[ 0.012869] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024
Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4
[ 0.012958] Freeing SMP alternatives memory: 28K (ffffffff81d86000 - ffffffff81d8d000)
[ 0.014961] dmar: Host address width 39
Because printk.c intentionally escapes control characters, they should
not be there in the first place. This patch fixes two occurrences of
this.
Signed-off-by: Steven Honeyman <stevenhoneyman@gmail.com>
Link: https://lkml.kernel.org/r/1414856696-8094-1-git-send-email-stevenhoneyman@gmail.com
[ Boris: make cpu_detect_tlb() static, while at it. ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2014-11-06 05:52:18 +07:00
|
|
|
static void cpu_detect_tlb(struct cpuinfo_x86 *c)
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 08:02:16 +07:00
|
|
|
{
|
|
|
|
if (this_cpu->c_detect_tlb)
|
|
|
|
this_cpu->c_detect_tlb(c);
|
|
|
|
|
x86, CPU: Fix trivial printk formatting issues with dmesg
dmesg (from util-linux) currently has two methods for reading the kernel
message ring buffer: /dev/kmsg and syslog(2). Since kernel 3.5.0 kmsg
has been the default, which escapes control characters (e.g. new lines)
before they are shown.
This change means that when dmesg is using /dev/kmsg, a 2 line printk
makes the output messy, because the second line does not get a
timestamp.
For example:
[ 0.012863] CPU0: Thermal monitoring enabled (TM1)
[ 0.012869] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024
Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4
[ 0.012958] Freeing SMP alternatives memory: 28K (ffffffff81d86000 - ffffffff81d8d000)
[ 0.014961] dmar: Host address width 39
Because printk.c intentionally escapes control characters, they should
not be there in the first place. This patch fixes two occurrences of
this.
Signed-off-by: Steven Honeyman <stevenhoneyman@gmail.com>
Link: https://lkml.kernel.org/r/1414856696-8094-1-git-send-email-stevenhoneyman@gmail.com
[ Boris: make cpu_detect_tlb() static, while at it. ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2014-11-06 05:52:18 +07:00
|
|
|
pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 08:02:16 +07:00
|
|
|
tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
|
x86, CPU: Fix trivial printk formatting issues with dmesg
dmesg (from util-linux) currently has two methods for reading the kernel
message ring buffer: /dev/kmsg and syslog(2). Since kernel 3.5.0 kmsg
has been the default, which escapes control characters (e.g. new lines)
before they are shown.
This change means that when dmesg is using /dev/kmsg, a 2 line printk
makes the output messy, because the second line does not get a
timestamp.
For example:
[ 0.012863] CPU0: Thermal monitoring enabled (TM1)
[ 0.012869] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024
Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4
[ 0.012958] Freeing SMP alternatives memory: 28K (ffffffff81d86000 - ffffffff81d8d000)
[ 0.014961] dmar: Host address width 39
Because printk.c intentionally escapes control characters, they should
not be there in the first place. This patch fixes two occurrences of
this.
Signed-off-by: Steven Honeyman <stevenhoneyman@gmail.com>
Link: https://lkml.kernel.org/r/1414856696-8094-1-git-send-email-stevenhoneyman@gmail.com
[ Boris: make cpu_detect_tlb() static, while at it. ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2014-11-06 05:52:18 +07:00
|
|
|
tlb_lli_4m[ENTRIES]);
|
|
|
|
|
|
|
|
pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
|
|
|
|
tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
|
|
|
|
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 08:02:16 +07:00
|
|
|
}
|
|
|
|
|
2018-06-06 05:53:57 +07:00
|
|
|
int detect_ht_early(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2015-06-04 23:55:25 +07:00
|
|
|
#ifdef CONFIG_SMP
|
2008-09-05 02:09:47 +07:00
|
|
|
u32 eax, ebx, ecx, edx;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:47 +07:00
|
|
|
if (!cpu_has(c, X86_FEATURE_HT))
|
2018-06-06 05:53:57 +07:00
|
|
|
return -1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:47 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
|
2018-06-06 05:53:57 +07:00
|
|
|
return -1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 10:09:08 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
|
2018-06-06 05:53:57 +07:00
|
|
|
return -1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:47 +07:00
|
|
|
cpuid(1, &eax, &ebx, &ecx, &edx);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
smp_num_siblings = (ebx & 0xff0000) >> 16;
|
2018-06-06 05:53:57 +07:00
|
|
|
if (smp_num_siblings == 1)
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_info_once("CPU0: Hyper-Threading is disabled\n");
|
2018-06-06 05:53:57 +07:00
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
2008-09-05 02:09:44 +07:00
|
|
|
|
2018-06-06 05:53:57 +07:00
|
|
|
void detect_ht(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
int index_msb, core_bits;
|
2018-06-06 05:36:15 +07:00
|
|
|
|
2018-06-06 05:53:57 +07:00
|
|
|
if (detect_ht_early(c) < 0)
|
2018-06-06 05:36:15 +07:00
|
|
|
return;
|
2008-09-05 02:09:44 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
index_msb = get_count_order(smp_num_siblings);
|
|
|
|
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
|
2008-09-05 02:09:44 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
|
2008-09-05 02:09:44 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
index_msb = get_count_order(smp_num_siblings);
|
2008-09-05 02:09:44 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
core_bits = get_count_order(c->x86_max_cores);
|
2008-09-05 02:09:44 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
|
|
|
|
((1 << core_bits) - 1);
|
2008-09-05 02:09:44 +07:00
|
|
|
#endif
|
2008-09-05 10:08:59 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void get_cpu_vendor(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
char *v = c->x86_vendor_id;
|
2009-03-14 14:46:17 +07:00
|
|
|
int i;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
for (i = 0; i < X86_VENDOR_NUM; i++) {
|
2008-09-05 02:09:45 +07:00
|
|
|
if (!cpu_devs[i])
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
|
|
|
|
(cpu_devs[i]->c_ident[1] &&
|
|
|
|
!strcmp(v, cpu_devs[i]->c_ident[1]))) {
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2008-09-05 02:09:45 +07:00
|
|
|
this_cpu = cpu_devs[i];
|
|
|
|
c->x86_vendor = this_cpu->c_x86_vendor;
|
|
|
|
return;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
2008-09-05 02:09:45 +07:00
|
|
|
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
|
|
|
|
"CPU: Your system may be unstable.\n", v);
|
2008-09-05 02:09:45 +07:00
|
|
|
|
2006-02-05 14:28:03 +07:00
|
|
|
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
|
|
|
this_cpu = &default_cpu;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void cpu_detect(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
/* Get vendor name */
|
2008-02-01 23:49:43 +07:00
|
|
|
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
|
|
|
|
(unsigned int *)&c->x86_vendor_id[0],
|
|
|
|
(unsigned int *)&c->x86_vendor_id[8],
|
|
|
|
(unsigned int *)&c->x86_vendor_id[4]);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
c->x86 = 4;
|
2008-09-05 02:09:44 +07:00
|
|
|
/* Intel-defined flags: level 0x00000001 */
|
2005-04-17 05:20:36 +07:00
|
|
|
if (c->cpuid_level >= 0x00000001) {
|
|
|
|
u32 junk, tfms, cap0, misc;
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
2015-11-23 17:12:21 +07:00
|
|
|
c->x86 = x86_family(tfms);
|
|
|
|
c->x86_model = x86_model(tfms);
|
2018-01-01 08:52:10 +07:00
|
|
|
c->x86_stepping = x86_stepping(tfms);
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2008-02-01 04:05:45 +07:00
|
|
|
if (cap0 & (1<<19)) {
|
|
|
|
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
2008-09-05 02:09:44 +07:00
|
|
|
c->x86_cache_alignment = c->x86_clflush_size;
|
2008-02-01 04:05:45 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
2008-09-05 02:09:44 +07:00
|
|
|
|
2017-01-19 02:15:38 +07:00
|
|
|
static void apply_forced_caps(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-12-04 21:07:32 +07:00
|
|
|
for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
|
2017-01-19 02:15:38 +07:00
|
|
|
c->x86_capability[i] &= ~cpu_caps_cleared[i];
|
|
|
|
c->x86_capability[i] |= cpu_caps_set[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-30 21:30:23 +07:00
|
|
|
static void init_speculation_control(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
|
|
|
|
* and they also have a different bit for STIBP support. Also,
|
|
|
|
* a hypervisor might have set the individual AMD bits even on
|
|
|
|
* Intel CPUs, for finer-grained selection of what's available.
|
|
|
|
*/
|
|
|
|
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
|
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
2018-05-11 00:13:18 +07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
2018-01-30 21:30:23 +07:00
|
|
|
}
|
2018-05-02 23:15:14 +07:00
|
|
|
|
2018-01-30 21:30:23 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
2018-05-02 23:15:14 +07:00
|
|
|
|
2018-05-11 03:06:39 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
|
|
|
|
cpu_has(c, X86_FEATURE_VIRT_SSBD))
|
2018-05-11 01:21:36 +07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_SSBD);
|
|
|
|
|
2018-05-11 00:13:18 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
2018-05-02 23:15:14 +07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
2018-05-11 00:13:18 +07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
|
|
}
|
2018-05-02 23:15:14 +07:00
|
|
|
|
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBPB))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
|
|
|
|
2018-05-11 00:13:18 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
2018-05-02 23:15:14 +07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
2018-05-11 00:13:18 +07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
|
|
}
|
2018-06-01 21:59:20 +07:00
|
|
|
|
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
|
|
|
|
set_cpu_cap(c, X86_FEATURE_SSBD);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
|
|
|
|
}
|
2018-01-30 21:30:23 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void get_cpu_cap(struct cpuinfo_x86 *c)
|
2008-01-30 19:33:32 +07:00
|
|
|
{
|
2015-12-07 16:39:40 +07:00
|
|
|
u32 eax, ebx, ecx, edx;
|
2008-01-30 19:33:32 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
/* Intel-defined flags: level 0x00000001 */
|
|
|
|
if (c->cpuid_level >= 0x00000001) {
|
2015-12-07 16:39:40 +07:00
|
|
|
cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2015-12-07 16:39:40 +07:00
|
|
|
c->x86_capability[CPUID_1_ECX] = ecx;
|
|
|
|
c->x86_capability[CPUID_1_EDX] = edx;
|
2008-09-05 02:09:44 +07:00
|
|
|
}
|
2008-01-30 19:33:32 +07:00
|
|
|
|
2016-12-16 01:14:42 +07:00
|
|
|
/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
|
|
|
|
if (c->cpuid_level >= 0x00000006)
|
|
|
|
c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
|
|
|
|
|
2010-07-08 07:29:18 +07:00
|
|
|
/* Additional Intel-defined flags: level 0x00000007 */
|
|
|
|
if (c->cpuid_level >= 0x00000007) {
|
|
|
|
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
|
2015-12-07 16:39:40 +07:00
|
|
|
c->x86_capability[CPUID_7_0_EBX] = ebx;
|
x86/cpufeature, x86/mm/pkeys: Add protection keys related CPUID definitions
There are two CPUID bits for protection keys. One is for whether
the CPU contains the feature, and the other will appear set once
the OS enables protection keys. Specifically:
Bit 04: OSPKE. If 1, OS has set CR4.PKE to enable
Protection keys (and the RDPKRU/WRPKRU instructions)
This is because userspace can not see CR4 contents, but it can
see CPUID contents.
X86_FEATURE_PKU is referred to as "PKU" in the hardware documentation:
CPUID.(EAX=07H,ECX=0H):ECX.PKU [bit 3]
X86_FEATURE_OSPKE is "OSPKU":
CPUID.(EAX=07H,ECX=0H):ECX.OSPKE [bit 4]
These are the first CPU features which need to look at the
ECX word in CPUID leaf 0x7, so this patch also includes
fetching that word in to the cpuinfo->x86_capability[] array.
Add it to the disabled-features mask when its config option is
off. Even though we are not using it here, we also extend the
REQUIRED_MASK_BIT_SET() macro to keep it mirroring the
DISABLED_MASK_BIT_SET() version.
This means that in almost all code, you should use:
cpu_has(c, X86_FEATURE_PKU)
and *not* the CONFIG option.
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20160212210201.7714C250@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-02-13 04:02:01 +07:00
|
|
|
c->x86_capability[CPUID_7_ECX] = ecx;
|
2018-01-25 23:14:09 +07:00
|
|
|
c->x86_capability[CPUID_7_EDX] = edx;
|
2010-07-08 07:29:18 +07:00
|
|
|
}
|
|
|
|
|
2014-05-30 01:12:30 +07:00
|
|
|
/* Extended state features: level 0x0000000d */
|
|
|
|
if (c->cpuid_level >= 0x0000000d) {
|
|
|
|
cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
2015-12-07 16:39:40 +07:00
|
|
|
c->x86_capability[CPUID_D_1_EAX] = eax;
|
2014-05-30 01:12:30 +07:00
|
|
|
}
|
|
|
|
|
2015-01-24 01:45:43 +07:00
|
|
|
/* Additional Intel-defined flags: level 0x0000000F */
|
|
|
|
if (c->cpuid_level >= 0x0000000F) {
|
|
|
|
|
|
|
|
/* QoS sub-leaf, EAX=0Fh, ECX=0 */
|
|
|
|
cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
|
2015-12-07 16:39:40 +07:00
|
|
|
c->x86_capability[CPUID_F_0_EDX] = edx;
|
|
|
|
|
2015-01-24 01:45:43 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
|
|
|
|
/* will be overridden if occupancy monitoring exists */
|
|
|
|
c->x86_cache_max_rmid = ebx;
|
|
|
|
|
|
|
|
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
|
|
|
|
cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
|
2015-12-07 16:39:40 +07:00
|
|
|
c->x86_capability[CPUID_F_1_EDX] = edx;
|
|
|
|
|
2016-03-11 06:32:09 +07:00
|
|
|
if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
|
|
|
|
((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
|
|
|
|
(cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
|
2015-01-24 01:45:43 +07:00
|
|
|
c->x86_cache_max_rmid = ecx;
|
|
|
|
c->x86_cache_occ_scale = ebx;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
c->x86_cache_max_rmid = -1;
|
|
|
|
c->x86_cache_occ_scale = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
/* AMD-defined flags: level 0x80000001 */
|
2015-12-07 16:39:40 +07:00
|
|
|
eax = cpuid_eax(0x80000000);
|
|
|
|
c->extended_cpuid_level = eax;
|
|
|
|
|
|
|
|
if ((eax & 0xffff0000) == 0x80000000) {
|
|
|
|
if (eax >= 0x80000001) {
|
|
|
|
cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2015-12-07 16:39:40 +07:00
|
|
|
c->x86_capability[CPUID_8000_0001_ECX] = ecx;
|
|
|
|
c->x86_capability[CPUID_8000_0001_EDX] = edx;
|
2008-01-30 19:33:32 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-11 19:58:26 +07:00
|
|
|
if (c->extended_cpuid_level >= 0x80000007) {
|
|
|
|
cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
c->x86_capability[CPUID_8000_0007_EBX] = ebx;
|
|
|
|
c->x86_power = edx;
|
|
|
|
}
|
|
|
|
|
2018-05-01 02:47:46 +07:00
|
|
|
if (c->extended_cpuid_level >= 0x80000008) {
|
|
|
|
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
|
|
|
|
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
|
|
|
|
}
|
|
|
|
|
2015-12-07 16:39:39 +07:00
|
|
|
if (c->extended_cpuid_level >= 0x8000000a)
|
2015-12-07 16:39:40 +07:00
|
|
|
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
|
2008-01-30 19:33:32 +07:00
|
|
|
|
2010-05-20 02:01:23 +07:00
|
|
|
init_scattered_cpuid_features(c);
|
2018-01-30 21:30:23 +07:00
|
|
|
init_speculation_control(c);
|
2017-01-19 02:15:39 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear/Set all flags overridden by options, after probe.
|
|
|
|
* This needs to happen each time we re-probe, which may happen
|
|
|
|
* several times during CPU initialization.
|
|
|
|
*/
|
|
|
|
apply_forced_caps(c);
|
2008-01-30 19:33:32 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2018-07-24 19:45:47 +07:00
|
|
|
void get_cpu_address_sizes(struct cpuinfo_x86 *c)
|
2018-04-10 16:27:04 +07:00
|
|
|
{
|
|
|
|
u32 eax, ebx, ecx, edx;
|
|
|
|
|
|
|
|
if (c->extended_cpuid_level >= 0x80000008) {
|
|
|
|
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
c->x86_virt_bits = (eax >> 8) & 0xff;
|
|
|
|
c->x86_phys_bits = eax & 0xff;
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
|
|
|
|
c->x86_phys_bits = 36;
|
|
|
|
#endif
|
2018-08-25 00:03:50 +07:00
|
|
|
c->x86_cache_bits = c->x86_phys_bits;
|
2018-04-10 16:27:04 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
2008-09-14 16:33:15 +07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First of all, decide if this is a 486 or higher
|
|
|
|
* It's a 486 if we can modify the AC flag
|
|
|
|
*/
|
|
|
|
if (flag_is_changeable_p(X86_EFLAGS_AC))
|
|
|
|
c->x86 = 4;
|
|
|
|
else
|
|
|
|
c->x86 = 3;
|
|
|
|
|
|
|
|
for (i = 0; i < X86_VENDOR_NUM; i++)
|
|
|
|
if (cpu_devs[i] && cpu_devs[i]->c_identify) {
|
|
|
|
c->x86_vendor_id[0] = 0;
|
|
|
|
cpu_devs[i]->c_identify(c);
|
|
|
|
if (c->x86_vendor_id[0]) {
|
|
|
|
get_cpu_vendor(c);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-02-03 04:39:23 +07:00
|
|
|
static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
|
2018-08-08 00:17:27 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY },
|
2018-01-25 23:14:13 +07:00
|
|
|
{ X86_VENDOR_CENTAUR, 5 },
|
|
|
|
{ X86_VENDOR_INTEL, 5 },
|
|
|
|
{ X86_VENDOR_NSC, 5 },
|
|
|
|
{ X86_VENDOR_ANY, 4 },
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2018-02-03 04:39:23 +07:00
|
|
|
static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
|
2018-01-25 23:14:13 +07:00
|
|
|
{ X86_VENDOR_AMD },
|
2018-09-23 16:35:50 +07:00
|
|
|
{ X86_VENDOR_HYGON },
|
2018-01-25 23:14:13 +07:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2018-05-22 16:05:39 +07:00
|
|
|
/* Only list CPUs which speculate but are non susceptible to SSB */
|
2018-04-26 09:04:20 +07:00
|
|
|
static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
2018-08-08 00:17:27 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
|
2018-04-26 09:04:20 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
2018-08-08 00:17:27 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
|
2018-04-26 09:04:20 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
2018-04-26 09:04:24 +07:00
|
|
|
{ X86_VENDOR_AMD, 0x12, },
|
|
|
|
{ X86_VENDOR_AMD, 0x11, },
|
|
|
|
{ X86_VENDOR_AMD, 0x10, },
|
|
|
|
{ X86_VENDOR_AMD, 0xf, },
|
2018-04-26 09:04:20 +07:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2018-06-14 05:48:26 +07:00
|
|
|
static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
|
|
|
|
/* in addition to cpu_no_speculation */
|
2018-08-08 00:17:27 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
|
2018-06-14 05:48:26 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
2018-08-08 00:17:27 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
|
2018-06-14 05:48:26 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
|
2018-08-08 00:17:27 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
|
2018-06-14 05:48:26 +07:00
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
|
|
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2018-04-26 09:04:16 +07:00
|
|
|
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
2018-01-25 23:14:13 +07:00
|
|
|
{
|
|
|
|
u64 ia32_cap = 0;
|
|
|
|
|
2018-05-22 16:05:39 +07:00
|
|
|
if (x86_match_cpu(cpu_no_speculation))
|
|
|
|
return;
|
|
|
|
|
|
|
|
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
|
|
|
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
|
|
|
|
2018-04-26 09:04:22 +07:00
|
|
|
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
|
|
|
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
|
|
|
|
|
|
|
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
|
2018-06-01 21:59:19 +07:00
|
|
|
!(ia32_cap & ARCH_CAP_SSB_NO) &&
|
|
|
|
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
2018-04-26 09:04:20 +07:00
|
|
|
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
|
|
|
|
2018-08-02 01:42:25 +07:00
|
|
|
if (ia32_cap & ARCH_CAP_IBRS_ALL)
|
|
|
|
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
|
|
|
|
|
2018-01-25 23:14:13 +07:00
|
|
|
if (x86_match_cpu(cpu_no_meltdown))
|
2018-04-26 09:04:16 +07:00
|
|
|
return;
|
2018-01-25 23:14:13 +07:00
|
|
|
|
|
|
|
/* Rogue Data Cache Load? No! */
|
|
|
|
if (ia32_cap & ARCH_CAP_RDCL_NO)
|
2018-04-26 09:04:16 +07:00
|
|
|
return;
|
2018-01-25 23:14:13 +07:00
|
|
|
|
2018-04-26 09:04:16 +07:00
|
|
|
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
2018-06-14 05:48:26 +07:00
|
|
|
|
|
|
|
if (x86_match_cpu(cpu_no_l1tf))
|
|
|
|
return;
|
|
|
|
|
|
|
|
setup_force_cpu_bug(X86_BUG_L1TF);
|
2018-01-25 23:14:13 +07:00
|
|
|
}
|
|
|
|
|
2018-07-20 03:55:28 +07:00
|
|
|
/*
|
|
|
|
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
|
|
|
|
* unfortunately, that's not true in practice because of early VIA
|
|
|
|
* chips and (more importantly) broken virtualizers that are not easy
|
|
|
|
* to detect. In the latter case it doesn't even *fail* reliably, so
|
|
|
|
* probing for it doesn't even work. Disable it completely on 32-bit
|
|
|
|
* unless we can find a reliable way to detect all the broken cases.
|
|
|
|
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
|
|
|
|
*/
|
2018-07-20 03:55:29 +07:00
|
|
|
static void detect_nopl(void)
|
2018-07-20 03:55:28 +07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_32
|
2018-07-20 03:55:29 +07:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_NOPL);
|
2018-07-20 03:55:28 +07:00
|
|
|
#else
|
2018-07-20 03:55:29 +07:00
|
|
|
setup_force_cpu_cap(X86_FEATURE_NOPL);
|
2018-07-20 03:55:28 +07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-02-24 17:58:13 +07:00
|
|
|
/*
|
|
|
|
* Do minimum CPU detection early.
|
|
|
|
* Fields really needed: vendor, cpuid_level, family, model, mask,
|
|
|
|
* cache alignment.
|
|
|
|
* The others are not touched to avoid unwanted side effects.
|
|
|
|
*
|
2017-10-03 16:47:27 +07:00
|
|
|
* WARNING: this function is only called on the boot CPU. Don't add code
|
|
|
|
* here that is supposed to run on all CPUs.
|
2008-02-24 17:58:13 +07:00
|
|
|
*/
|
2008-09-05 02:09:44 +07:00
|
|
|
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
2006-12-07 08:14:08 +07:00
|
|
|
{
|
2008-09-05 10:09:10 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
c->x86_clflush_size = 64;
|
2009-03-12 19:37:34 +07:00
|
|
|
c->x86_phys_bits = 36;
|
|
|
|
c->x86_virt_bits = 48;
|
2008-09-05 10:09:10 +07:00
|
|
|
#else
|
2008-02-01 04:05:45 +07:00
|
|
|
c->x86_clflush_size = 32;
|
2009-03-12 19:37:34 +07:00
|
|
|
c->x86_phys_bits = 32;
|
|
|
|
c->x86_virt_bits = 32;
|
2008-09-05 10:09:10 +07:00
|
|
|
#endif
|
2008-09-05 02:09:47 +07:00
|
|
|
c->x86_cache_alignment = c->x86_clflush_size;
|
2006-12-07 08:14:08 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
2008-09-05 02:09:47 +07:00
|
|
|
c->extended_cpuid_level = 0;
|
2006-12-07 08:14:08 +07:00
|
|
|
|
2018-09-22 04:20:41 +07:00
|
|
|
if (!have_cpuid_p())
|
|
|
|
identify_cpu_without_cpuid(c);
|
|
|
|
|
2008-09-14 16:33:15 +07:00
|
|
|
/* cyrix could have cpuid enabled via c_identify()*/
|
2016-09-29 06:06:33 +07:00
|
|
|
if (have_cpuid_p()) {
|
|
|
|
cpu_detect(c);
|
|
|
|
get_cpu_vendor(c);
|
|
|
|
get_cpu_cap(c);
|
2018-04-10 16:27:04 +07:00
|
|
|
get_cpu_address_sizes(c);
|
2017-01-19 02:15:37 +07:00
|
|
|
setup_force_cpu_cap(X86_FEATURE_CPUID);
|
2006-12-07 08:14:08 +07:00
|
|
|
|
2016-09-29 06:06:33 +07:00
|
|
|
if (this_cpu->c_early_init)
|
|
|
|
this_cpu->c_early_init(c);
|
2008-09-05 02:09:43 +07:00
|
|
|
|
2016-09-29 06:06:33 +07:00
|
|
|
c->cpu_index = 0;
|
|
|
|
filter_cpuid_features(c, false);
|
2008-01-30 19:33:32 +07:00
|
|
|
|
2016-09-29 06:06:33 +07:00
|
|
|
if (this_cpu->c_bsp_init)
|
|
|
|
this_cpu->c_bsp_init(c);
|
2017-01-19 02:15:37 +07:00
|
|
|
} else {
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_CPUID);
|
2016-09-29 06:06:33 +07:00
|
|
|
}
|
2013-06-09 17:07:30 +07:00
|
|
|
|
|
|
|
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
2017-12-04 21:07:33 +07:00
|
|
|
|
2018-04-26 09:04:16 +07:00
|
|
|
cpu_set_bug_bits(c);
|
2018-01-06 18:49:23 +07:00
|
|
|
|
2015-06-27 15:25:14 +07:00
|
|
|
fpu__init_system(c);
|
2017-09-17 23:03:50 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* Regardless of whether PCID is enumerated, the SDM says
|
|
|
|
* that it can't be enabled in 32-bit mode.
|
|
|
|
*/
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_PCID);
|
|
|
|
#endif
|
2018-05-18 17:35:25 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Later in the boot process pgtable_l5_enabled() relies on
|
|
|
|
* cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
|
|
|
|
* enabled by this point we need to clear the feature bit to avoid
|
|
|
|
* false-positives at the later stage.
|
|
|
|
*
|
|
|
|
* pgtable_l5_enabled() can be false here for several reasons:
|
|
|
|
* - 5-level paging is disabled compile-time;
|
|
|
|
* - it's 32-bit kernel;
|
|
|
|
* - machine doesn't support 5-level paging;
|
|
|
|
* - user specified 'no5lvl' in kernel command line.
|
|
|
|
*/
|
|
|
|
if (!pgtable_l5_enabled())
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_LA57);
|
2018-07-20 03:55:28 +07:00
|
|
|
|
2018-07-20 03:55:29 +07:00
|
|
|
detect_nopl();
|
2006-12-07 08:14:08 +07:00
|
|
|
}
|
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
void __init early_cpu_init(void)
|
|
|
|
{
|
2009-03-12 19:08:49 +07:00
|
|
|
const struct cpu_dev *const *cdev;
|
2008-09-05 02:09:45 +07:00
|
|
|
int count = 0;
|
|
|
|
|
2011-03-04 22:52:35 +07:00
|
|
|
#ifdef CONFIG_PROCESSOR_SELECT
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_info("KERNEL supported cpus:\n");
|
2009-11-14 16:34:41 +07:00
|
|
|
#endif
|
|
|
|
|
2008-09-05 02:09:45 +07:00
|
|
|
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
|
2009-03-12 19:08:49 +07:00
|
|
|
const struct cpu_dev *cpudev = *cdev;
|
2008-09-05 02:09:44 +07:00
|
|
|
|
2008-09-05 02:09:45 +07:00
|
|
|
if (count >= X86_VENDOR_NUM)
|
|
|
|
break;
|
|
|
|
cpu_devs[count] = cpudev;
|
|
|
|
count++;
|
|
|
|
|
2011-03-04 22:52:35 +07:00
|
|
|
#ifdef CONFIG_PROCESSOR_SELECT
|
2009-11-14 16:34:41 +07:00
|
|
|
{
|
|
|
|
unsigned int j;
|
|
|
|
|
|
|
|
for (j = 0; j < 2; j++) {
|
|
|
|
if (!cpudev->c_ident[j])
|
|
|
|
continue;
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_info(" %s %s\n", cpudev->c_vendor,
|
2009-11-14 16:34:41 +07:00
|
|
|
cpudev->c_ident[j]);
|
|
|
|
}
|
2008-09-05 02:09:45 +07:00
|
|
|
}
|
2009-11-14 03:30:00 +07:00
|
|
|
#endif
|
2008-09-05 02:09:45 +07:00
|
|
|
}
|
2008-09-05 02:09:44 +07:00
|
|
|
early_identify_cpu(&boot_cpu_data);
|
2006-12-07 08:14:08 +07:00
|
|
|
}
|
2008-01-30 19:33:32 +07:00
|
|
|
|
2016-04-08 07:31:46 +07:00
|
|
|
static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_64
|
x86/entry/32: Introduce and use X86_BUG_ESPFIX instead of paravirt_enabled
x86_64 has very clean espfix handling on paravirt: espfix64 is set
up in native_iret, so paravirt systems that override iret bypass
espfix64 automatically. This is robust and straightforward.
x86_32 is messier. espfix is set up before the IRET paravirt patch
point, so it can't be directly conditionalized on whether we use
native_iret. We also can't easily move it into native_iret without
regressing performance due to a bizarre consideration. Specifically,
on 64-bit kernels, the logic is:
if (regs->ss & 0x4)
setup_espfix;
On 32-bit kernels, the logic is:
if ((regs->ss & 0x4) && (regs->cs & 0x3) == 3 &&
(regs->flags & X86_EFLAGS_VM) == 0)
setup_espfix;
The performance of setup_espfix itself is essentially irrelevant, but
the comparison happens on every IRET so its performance matters. On
x86_64, there's no need for any registers except flags to implement
the comparison, so we fold the whole thing into native_iret. On
x86_32, we don't do that because we need a free register to
implement the comparison efficiently. We therefore do espfix setup
before restoring registers on x86_32.
This patch gets rid of the explicit paravirt_enabled check by
introducing X86_BUG_ESPFIX on 32-bit systems and using an ALTERNATIVE
to skip espfix on paravirt systems where iret != native_iret. This is
also messy, but it's at least in line with other things we do.
This improves espfix performance by removing a branch, but no one
cares. More importantly, it removes a paravirt_enabled user, which is
good because paravirt_enabled is ill-defined and is going away.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luis R. Rodriguez <mcgrof@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boris.ostrovsky@oracle.com
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: lguest@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-03-01 06:50:19 +07:00
|
|
|
/*
|
2016-04-08 07:31:46 +07:00
|
|
|
* Empirically, writing zero to a segment selector on AMD does
|
|
|
|
* not clear the base, whereas writing zero to a segment
|
|
|
|
* selector on Intel does clear the base. Intel's behavior
|
|
|
|
* allows slightly faster context switches in the common case
|
|
|
|
* where GS is unused by the prev and next threads.
|
x86/entry/32: Introduce and use X86_BUG_ESPFIX instead of paravirt_enabled
x86_64 has very clean espfix handling on paravirt: espfix64 is set
up in native_iret, so paravirt systems that override iret bypass
espfix64 automatically. This is robust and straightforward.
x86_32 is messier. espfix is set up before the IRET paravirt patch
point, so it can't be directly conditionalized on whether we use
native_iret. We also can't easily move it into native_iret without
regressing performance due to a bizarre consideration. Specifically,
on 64-bit kernels, the logic is:
if (regs->ss & 0x4)
setup_espfix;
On 32-bit kernels, the logic is:
if ((regs->ss & 0x4) && (regs->cs & 0x3) == 3 &&
(regs->flags & X86_EFLAGS_VM) == 0)
setup_espfix;
The performance of setup_espfix itself is essentially irrelevant, but
the comparison happens on every IRET so its performance matters. On
x86_64, there's no need for any registers except flags to implement
the comparison, so we fold the whole thing into native_iret. On
x86_32, we don't do that because we need a free register to
implement the comparison efficiently. We therefore do espfix setup
before restoring registers on x86_32.
This patch gets rid of the explicit paravirt_enabled check by
introducing X86_BUG_ESPFIX on 32-bit systems and using an ALTERNATIVE
to skip espfix on paravirt systems where iret != native_iret. This is
also messy, but it's at least in line with other things we do.
This improves espfix performance by removing a branch, but no one
cares. More importantly, it removes a paravirt_enabled user, which is
good because paravirt_enabled is ill-defined and is going away.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luis R. Rodriguez <mcgrof@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boris.ostrovsky@oracle.com
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: lguest@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-03-01 06:50:19 +07:00
|
|
|
*
|
2016-04-08 07:31:46 +07:00
|
|
|
* Since neither vendor documents this anywhere that I can see,
|
|
|
|
* detect it directly instead of hardcoding the choice by
|
|
|
|
* vendor.
|
|
|
|
*
|
|
|
|
* I've designated AMD's behavior as the "bug" because it's
|
|
|
|
* counterintuitive and less friendly.
|
x86/entry/32: Introduce and use X86_BUG_ESPFIX instead of paravirt_enabled
x86_64 has very clean espfix handling on paravirt: espfix64 is set
up in native_iret, so paravirt systems that override iret bypass
espfix64 automatically. This is robust and straightforward.
x86_32 is messier. espfix is set up before the IRET paravirt patch
point, so it can't be directly conditionalized on whether we use
native_iret. We also can't easily move it into native_iret without
regressing performance due to a bizarre consideration. Specifically,
on 64-bit kernels, the logic is:
if (regs->ss & 0x4)
setup_espfix;
On 32-bit kernels, the logic is:
if ((regs->ss & 0x4) && (regs->cs & 0x3) == 3 &&
(regs->flags & X86_EFLAGS_VM) == 0)
setup_espfix;
The performance of setup_espfix itself is essentially irrelevant, but
the comparison happens on every IRET so its performance matters. On
x86_64, there's no need for any registers except flags to implement
the comparison, so we fold the whole thing into native_iret. On
x86_32, we don't do that because we need a free register to
implement the comparison efficiently. We therefore do espfix setup
before restoring registers on x86_32.
This patch gets rid of the explicit paravirt_enabled check by
introducing X86_BUG_ESPFIX on 32-bit systems and using an ALTERNATIVE
to skip espfix on paravirt systems where iret != native_iret. This is
also messy, but it's at least in line with other things we do.
This improves espfix performance by removing a branch, but no one
cares. More importantly, it removes a paravirt_enabled user, which is
good because paravirt_enabled is ill-defined and is going away.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luis R. Rodriguez <mcgrof@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boris.ostrovsky@oracle.com
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: lguest@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-03-01 06:50:19 +07:00
|
|
|
*/
|
2016-04-08 07:31:46 +07:00
|
|
|
|
|
|
|
unsigned long old_base, tmp;
|
|
|
|
rdmsrl(MSR_FS_BASE, old_base);
|
|
|
|
wrmsrl(MSR_FS_BASE, 1);
|
|
|
|
loadsegment(fs, 0);
|
|
|
|
rdmsrl(MSR_FS_BASE, tmp);
|
|
|
|
if (tmp != 0)
|
|
|
|
set_cpu_bug(c, X86_BUG_NULL_SEG);
|
|
|
|
wrmsrl(MSR_FS_BASE, old_base);
|
2010-10-04 14:31:27 +07:00
|
|
|
#endif
|
2006-12-07 08:14:08 +07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void generic_identify(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-09-14 16:33:15 +07:00
|
|
|
c->extended_cpuid_level = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
if (!have_cpuid_p())
|
2008-09-14 16:33:15 +07:00
|
|
|
identify_cpu_without_cpuid(c);
|
2007-07-12 02:18:32 +07:00
|
|
|
|
2008-09-14 16:33:15 +07:00
|
|
|
/* cyrix could have cpuid enabled via c_identify()*/
|
2008-09-14 19:46:58 +07:00
|
|
|
if (!have_cpuid_p())
|
2008-09-14 16:33:15 +07:00
|
|
|
return;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
cpu_detect(c);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
get_cpu_vendor(c);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
get_cpu_cap(c);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2018-04-10 16:27:04 +07:00
|
|
|
get_cpu_address_sizes(c);
|
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
if (c->cpuid_level >= 0x00000001) {
|
|
|
|
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
|
2008-09-05 10:09:12 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2015-06-04 23:55:25 +07:00
|
|
|
# ifdef CONFIG_SMP
|
2009-01-28 19:24:54 +07:00
|
|
|
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
|
2008-09-05 10:09:12 +07:00
|
|
|
# else
|
2008-09-05 02:09:44 +07:00
|
|
|
c->apicid = c->initial_apicid;
|
2008-09-05 10:09:12 +07:00
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
c->phys_proc_id = c->initial_apicid;
|
2008-09-05 02:09:44 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-06 15:52:27 +07:00
|
|
|
get_model_name(c); /* Default name */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-04-08 07:31:46 +07:00
|
|
|
detect_null_seg_behavior(c);
|
2016-04-08 07:31:48 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ESPFIX is a strange bug. All real CPUs have it. Paravirt
|
|
|
|
* systems that run Linux at CPL > 0 may or may not have the
|
|
|
|
* issue, but, even if they have the issue, there's absolutely
|
|
|
|
* nothing we can do about it because we can't use the real IRET
|
|
|
|
* instruction.
|
|
|
|
*
|
|
|
|
* NB: For the time being, only 32-bit kernels support
|
|
|
|
* X86_BUG_ESPFIX as such. 64-bit kernels directly choose
|
|
|
|
* whether to apply espfix using paravirt hooks. If any
|
|
|
|
* non-paravirt system ever shows up that does *not* have the
|
|
|
|
* ESPFIX issue, we can change this.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_32
|
2018-08-28 14:40:23 +07:00
|
|
|
# ifdef CONFIG_PARAVIRT_XXL
|
2016-04-08 07:31:48 +07:00
|
|
|
do {
|
|
|
|
extern void native_iret(void);
|
2018-08-28 14:40:19 +07:00
|
|
|
if (pv_ops.cpu.iret == native_iret)
|
2016-04-08 07:31:48 +07:00
|
|
|
set_cpu_bug(c, X86_BUG_ESPFIX);
|
|
|
|
} while (0);
|
|
|
|
# else
|
|
|
|
set_cpu_bug(c, X86_BUG_ESPFIX);
|
|
|
|
# endif
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2015-01-24 01:45:43 +07:00
|
|
|
static void x86_init_cache_qos(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The heavy lifting of max_rmid and cache_occ_scale are handled
|
|
|
|
* in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
|
|
|
|
* in case CQM bits really aren't there in this CPU.
|
|
|
|
*/
|
|
|
|
if (c != &boot_cpu_data) {
|
|
|
|
boot_cpu_data.x86_cache_max_rmid =
|
|
|
|
min(boot_cpu_data.x86_cache_max_rmid,
|
|
|
|
c->x86_cache_max_rmid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-09 22:35:51 +07:00
|
|
|
/*
|
2016-12-12 17:04:53 +07:00
|
|
|
* Validate that ACPI/mptables have the same information about the
|
|
|
|
* effective APIC id and update the package map.
|
2016-11-09 22:35:51 +07:00
|
|
|
*/
|
2016-12-12 17:04:53 +07:00
|
|
|
static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
|
2016-11-09 22:35:51 +07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
2016-12-12 17:04:53 +07:00
|
|
|
unsigned int apicid, cpu = smp_processor_id();
|
2016-11-09 22:35:51 +07:00
|
|
|
|
|
|
|
apicid = apic->cpu_present_to_apicid(cpu);
|
|
|
|
|
2016-12-12 17:04:53 +07:00
|
|
|
if (apicid != c->apicid) {
|
|
|
|
pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
|
2016-11-09 22:35:51 +07:00
|
|
|
cpu, apicid, c->initial_apicid);
|
|
|
|
}
|
2016-12-12 17:04:53 +07:00
|
|
|
BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
|
2016-11-09 22:35:51 +07:00
|
|
|
#else
|
|
|
|
c->logical_proc_id = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* This does the hard work of actually picking apart the CPU stuff...
|
|
|
|
*/
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void identify_cpu(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
c->loops_per_jiffy = loops_per_jiffy;
|
2018-02-14 02:22:08 +07:00
|
|
|
c->x86_cache_size = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
2018-01-01 08:52:10 +07:00
|
|
|
c->x86_model = c->x86_stepping = 0; /* So far unknown... */
|
2005-04-17 05:20:36 +07:00
|
|
|
c->x86_vendor_id[0] = '\0'; /* Unset */
|
|
|
|
c->x86_model_id[0] = '\0'; /* Unset */
|
2005-11-05 23:25:54 +07:00
|
|
|
c->x86_max_cores = 1;
|
2008-09-05 10:09:13 +07:00
|
|
|
c->x86_coreid_bits = 0;
|
2017-02-05 17:50:21 +07:00
|
|
|
c->cu_id = 0xff;
|
2008-09-08 07:58:50 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-09-05 10:09:13 +07:00
|
|
|
c->x86_clflush_size = 64;
|
2009-03-12 19:37:34 +07:00
|
|
|
c->x86_phys_bits = 36;
|
|
|
|
c->x86_virt_bits = 48;
|
2008-09-05 10:09:13 +07:00
|
|
|
#else
|
|
|
|
c->cpuid_level = -1; /* CPUID not detected */
|
2006-12-07 08:14:05 +07:00
|
|
|
c->x86_clflush_size = 32;
|
2009-03-12 19:37:34 +07:00
|
|
|
c->x86_phys_bits = 32;
|
|
|
|
c->x86_virt_bits = 32;
|
2008-09-05 10:09:13 +07:00
|
|
|
#endif
|
|
|
|
c->x86_cache_alignment = c->x86_clflush_size;
|
2005-04-17 05:20:36 +07:00
|
|
|
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
|
|
|
|
|
|
|
generic_identify(c);
|
|
|
|
|
2008-01-30 19:32:49 +07:00
|
|
|
if (this_cpu->c_identify)
|
2005-04-17 05:20:36 +07:00
|
|
|
this_cpu->c_identify(c);
|
|
|
|
|
2016-02-24 06:34:30 +07:00
|
|
|
/* Clear/Set all flags overridden by options, after probe */
|
2017-01-19 02:15:38 +07:00
|
|
|
apply_forced_caps(c);
|
2009-05-16 03:05:16 +07:00
|
|
|
|
2008-09-05 10:09:13 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2009-01-28 19:24:54 +07:00
|
|
|
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
|
2008-09-05 10:09:13 +07:00
|
|
|
#endif
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Vendor-specific initialization. In this section we
|
|
|
|
* canonicalize the feature flags, meaning if there are
|
|
|
|
* features a certain CPU supports which CPUID doesn't
|
|
|
|
* tell us, CPUID claiming incorrect flags, or other bugs,
|
|
|
|
* we handle them here.
|
|
|
|
*
|
|
|
|
* At the end of this section, c->x86_capability better
|
|
|
|
* indicate the features this CPU genuinely supports!
|
|
|
|
*/
|
|
|
|
if (this_cpu->c_init)
|
|
|
|
this_cpu->c_init(c);
|
|
|
|
|
|
|
|
/* Disable the PN if appropriate */
|
|
|
|
squash_the_stupid_serial_number(c);
|
|
|
|
|
2017-11-06 09:27:54 +07:00
|
|
|
/* Set up SMEP/SMAP/UMIP */
|
2012-09-27 08:02:28 +07:00
|
|
|
setup_smep(c);
|
|
|
|
setup_smap(c);
|
2017-11-06 09:27:54 +07:00
|
|
|
setup_umip(c);
|
2012-09-27 08:02:28 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2009-03-14 14:46:17 +07:00
|
|
|
* The vendor-specific functions might have changed features.
|
|
|
|
* Now we do "generic changes."
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
2009-01-24 08:20:50 +07:00
|
|
|
/* Filter out anything that depends on CPUID levels we don't have */
|
|
|
|
filter_cpuid_features(c, true);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* If the model name is still unset, do table lookup. */
|
2008-02-24 17:58:13 +07:00
|
|
|
if (!c->x86_model_id[0]) {
|
2009-03-12 19:08:49 +07:00
|
|
|
const char *p;
|
2005-04-17 05:20:36 +07:00
|
|
|
p = table_lookup_model(c);
|
2008-02-24 17:58:13 +07:00
|
|
|
if (p)
|
2005-04-17 05:20:36 +07:00
|
|
|
strcpy(c->x86_model_id, p);
|
|
|
|
else
|
|
|
|
/* Last resort... */
|
|
|
|
sprintf(c->x86_model_id, "%02x/%02x",
|
2006-03-23 17:59:36 +07:00
|
|
|
c->x86, c->x86_model);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-09-05 10:09:13 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
detect_ht(c);
|
|
|
|
#endif
|
|
|
|
|
2011-08-01 04:02:19 +07:00
|
|
|
x86_init_rdrand(c);
|
2015-01-24 01:45:43 +07:00
|
|
|
x86_init_cache_qos(c);
|
2016-02-13 04:02:29 +07:00
|
|
|
setup_pku(c);
|
2009-05-10 13:47:42 +07:00
|
|
|
|
|
|
|
/*
|
2016-02-24 06:34:30 +07:00
|
|
|
* Clear/Set all flags overridden by options, need do it
|
2009-05-10 13:47:42 +07:00
|
|
|
* before following smp all cpus cap AND.
|
|
|
|
*/
|
2017-01-19 02:15:38 +07:00
|
|
|
apply_forced_caps(c);
|
2009-05-10 13:47:42 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* On SMP, boot_cpu_data holds the common feature set between
|
|
|
|
* all CPUs; so make sure that we indicate which features are
|
|
|
|
* common between the CPUs. The first time this routine gets
|
|
|
|
* executed, c == &boot_cpu_data.
|
|
|
|
*/
|
2008-02-24 17:58:13 +07:00
|
|
|
if (c != &boot_cpu_data) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* AND the already accumulated flags with these */
|
2008-09-05 02:09:44 +07:00
|
|
|
for (i = 0; i < NCAPINTS; i++)
|
2005-04-17 05:20:36 +07:00
|
|
|
boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
|
2013-03-20 21:07:23 +07:00
|
|
|
|
|
|
|
/* OR, i.e. replicate the bug flags */
|
|
|
|
for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
|
|
|
|
c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Init Machine Check Exception if available. */
|
2009-10-16 17:31:32 +07:00
|
|
|
mcheck_cpu_init(c);
|
2008-01-30 19:33:16 +07:00
|
|
|
|
|
|
|
select_idle_routine(c);
|
2008-09-05 10:09:13 +07:00
|
|
|
|
2011-01-23 20:37:41 +07:00
|
|
|
#ifdef CONFIG_NUMA
|
2008-09-05 10:09:13 +07:00
|
|
|
numa_add_cpu(smp_processor_id());
|
|
|
|
#endif
|
2007-05-03 00:27:12 +07:00
|
|
|
}
|
2005-11-07 15:58:42 +07:00
|
|
|
|
2015-03-16 16:32:20 +07:00
|
|
|
/*
|
|
|
|
* Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
|
|
|
|
* on 32-bit kernels:
|
|
|
|
*/
|
2014-05-06 02:19:33 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
void enable_sep_cpu(void)
|
|
|
|
{
|
2015-03-16 16:32:20 +07:00
|
|
|
struct tss_struct *tss;
|
|
|
|
int cpu;
|
2014-05-06 02:19:33 +07:00
|
|
|
|
2016-03-16 19:19:29 +07:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_SEP))
|
|
|
|
return;
|
|
|
|
|
2015-03-16 16:32:20 +07:00
|
|
|
cpu = get_cpu();
|
2017-12-04 21:07:29 +07:00
|
|
|
tss = &per_cpu(cpu_tss_rw, cpu);
|
2015-03-16 16:32:20 +07:00
|
|
|
|
|
|
|
/*
|
2015-04-03 02:41:45 +07:00
|
|
|
* We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
|
|
|
|
* see the big comment in struct x86_hw_tss's definition.
|
2015-03-16 16:32:20 +07:00
|
|
|
*/
|
2014-05-06 02:19:33 +07:00
|
|
|
|
|
|
|
tss->x86_tss.ss1 = __KERNEL_CS;
|
2015-03-16 16:32:20 +07:00
|
|
|
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
|
2017-12-05 08:25:07 +07:00
|
|
|
wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
|
2015-06-08 13:33:56 +07:00
|
|
|
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
|
2015-03-16 16:32:20 +07:00
|
|
|
|
2014-05-06 02:19:33 +07:00
|
|
|
put_cpu();
|
|
|
|
}
|
2008-09-23 00:35:08 +07:00
|
|
|
#endif
|
|
|
|
|
2007-05-03 00:27:12 +07:00
|
|
|
void __init identify_boot_cpu(void)
|
|
|
|
{
|
|
|
|
identify_cpu(&boot_cpu_data);
|
2008-09-05 10:09:13 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2007-05-03 00:27:12 +07:00
|
|
|
sysenter_setup();
|
2005-06-26 04:54:53 +07:00
|
|
|
enable_sep_cpu();
|
2008-09-05 10:09:13 +07:00
|
|
|
#endif
|
2012-08-07 00:00:37 +07:00
|
|
|
cpu_detect_tlb(&boot_cpu_data);
|
2007-05-03 00:27:12 +07:00
|
|
|
}
|
2005-07-08 07:56:38 +07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void identify_secondary_cpu(struct cpuinfo_x86 *c)
|
2007-05-03 00:27:12 +07:00
|
|
|
{
|
|
|
|
BUG_ON(c == &boot_cpu_data);
|
|
|
|
identify_cpu(c);
|
2008-09-05 10:09:13 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2007-05-03 00:27:12 +07:00
|
|
|
enable_sep_cpu();
|
2008-09-05 10:09:13 +07:00
|
|
|
#endif
|
2007-05-03 00:27:12 +07:00
|
|
|
mtrr_ap_init();
|
2016-12-12 17:04:53 +07:00
|
|
|
validate_apic_and_package_id(c);
|
2018-04-26 09:04:22 +07:00
|
|
|
x86_spec_ctrl_setup_ap();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-01-30 19:33:21 +07:00
|
|
|
static __init int setup_noclflush(char *arg)
|
|
|
|
{
|
2014-02-27 23:31:30 +07:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
|
2014-02-27 23:36:31 +07:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
|
2008-01-30 19:33:21 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("noclflush", setup_noclflush);
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void print_cpu_info(struct cpuinfo_x86 *c)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2009-03-12 19:08:49 +07:00
|
|
|
const char *vendor = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
if (c->x86_vendor < X86_VENDOR_NUM) {
|
2005-04-17 05:20:36 +07:00
|
|
|
vendor = this_cpu->c_vendor;
|
2009-03-14 14:46:17 +07:00
|
|
|
} else {
|
|
|
|
if (c->cpuid_level >= 0)
|
|
|
|
vendor = c->x86_vendor_id;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-20 08:41:16 +07:00
|
|
|
if (vendor && !strstr(c->x86_model_id, vendor))
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_cont("%s ", vendor);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 02:09:44 +07:00
|
|
|
if (c->x86_model_id[0])
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_cont("%s", c->x86_model_id);
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_cont("%d86", c->x86);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
|
2012-09-14 23:37:46 +07:00
|
|
|
|
2018-01-01 08:52:10 +07:00
|
|
|
if (c->x86_stepping || c->cpuid_level >= 0)
|
|
|
|
pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_cont(")\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2017-10-14 04:56:43 +07:00
|
|
|
/*
|
|
|
|
* clearcpuid= was already parsed in fpu__init_parse_early_param.
|
|
|
|
* But we need to keep a dummy __setup around otherwise it would
|
|
|
|
* show up as an environment variable for init.
|
|
|
|
*/
|
|
|
|
static __init int setup_clearcpuid(char *arg)
|
2008-01-30 19:33:21 +07:00
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
2017-10-14 04:56:43 +07:00
|
|
|
__setup("clearcpuid=", setup_clearcpuid);
|
2008-01-30 19:33:21 +07:00
|
|
|
|
2008-09-05 10:09:03 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2009-01-19 10:21:28 +07:00
|
|
|
DEFINE_PER_CPU_FIRST(union irq_stack_union,
|
2013-08-06 05:02:43 +07:00
|
|
|
irq_stack_union) __aligned(PAGE_SIZE) __visible;
|
2018-03-14 00:48:05 +07:00
|
|
|
EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2009-08-03 12:12:19 +07:00
|
|
|
/*
|
2015-03-07 08:50:19 +07:00
|
|
|
* The following percpu variables are hot. Align current_task to
|
|
|
|
* cacheline size such that they fall in the same cacheline.
|
2009-08-03 12:12:19 +07:00
|
|
|
*/
|
|
|
|
DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
|
|
|
|
&init_task;
|
|
|
|
EXPORT_PER_CPU_SYMBOL(current_task);
|
2008-09-05 10:09:03 +07:00
|
|
|
|
2009-08-03 12:12:19 +07:00
|
|
|
DEFINE_PER_CPU(char *, irq_stack_ptr) =
|
2016-08-18 22:59:08 +07:00
|
|
|
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
|
2009-08-03 12:12:19 +07:00
|
|
|
|
2013-08-06 05:02:43 +07:00
|
|
|
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
|
2008-09-05 10:09:03 +07:00
|
|
|
|
2013-08-14 19:51:00 +07:00
|
|
|
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
|
|
|
EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
|
|
|
|
2008-09-05 10:09:03 +07:00
|
|
|
/* May not be marked __init: used by software suspend */
|
|
|
|
void syscall_init(void)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
x86/cpu: Fix MSR value truncation issue
So sparse rightfully complains that the u64 MSR value we're
writing into the STAR MSR, i.e. 0xc0000081, is being truncated:
./arch/x86/include/asm/msr.h:193:36: warning: cast truncates
bits from constant value (23001000000000 becomes 0)
because the actual value doesn't fit into the unsigned 32-bit
quantity which are the @low and @high wrmsrl() parameters.
This is not a problem, practically, because gcc is actually
being smart enough here and does the right thing:
.loc 3 87 0
xorl %esi, %esi # we needz a 32-bit zero
movl $2293776, %edx # 0x00230010 == (__USER32_CS << 16) | __KERNEL_CS go into the high bits
movl $-1073741695, %ecx # MSR_STAR, i.e., 0xc0000081
movl %esi, %eax # low order 32 bits in the MSR which are 0
#APP
# 87 "./arch/x86/include/asm/msr.h" 1
wrmsr
More specifically, MSR_STAR[31:0] is being set to 0. That field
is reserved on Intel and on AMD it is 32-bit SYSCALL Target EIP.
I'd strongly guess because Intel doesn't have SYSCALL in
compat/legacy mode and we're using SYSENTER and INT80 there. And
for compat syscalls in long mode we use CSTAR.
So let's fix the sparse warning by writing SYSRET and SYSCALL CS
and SS into the high 32-bit half of STAR and 0 in the low half
explicitly.
[ Actually, if we had to be precise, we would have to read what's in
STAR[31:0] and write it back unchanged on Intel and write 0 on AMD. I
guess the current writing to 0 is still ok since Intel can apparently
stomach it. ]
The resulting code is identical to what we have above:
.loc 3 87 0
xorl %esi, %esi # tmp104
movl $2293776, %eax #, tmp103
movl $-1073741695, %ecx #, tmp102
movl %esi, %edx # tmp104, tmp104
...
wrmsr
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1448273546-2567-6-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-11-23 17:12:25 +07:00
|
|
|
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
|
x86/pti/64: Remove the SYSCALL64 entry trampoline
The SYSCALL64 trampoline has a couple of nice properties:
- The usual sequence of SWAPGS followed by two GS-relative accesses to
set up RSP is somewhat slow because the GS-relative accesses need
to wait for SWAPGS to finish. The trampoline approach allows
RIP-relative accesses to set up RSP, which avoids the stall.
- The trampoline avoids any percpu access before CR3 is set up,
which means that no percpu memory needs to be mapped in the user
page tables. This prevents using Meltdown to read any percpu memory
outside the cpu_entry_area and prevents using timing leaks
to directly locate the percpu areas.
The downsides of using a trampoline may outweigh the upsides, however.
It adds an extra non-contiguous I$ cache line to system calls, and it
forces an indirect jump to transfer control back to the normal kernel
text after CR3 is set up. The latter is because x86 lacks a 64-bit
direct jump instruction that could jump from the trampoline to the entry
text. With retpolines enabled, the indirect jump is extremely slow.
Change the code to map the percpu TSS into the user page tables to allow
the non-trampoline SYSCALL64 path to work under PTI. This does not add a
new direct information leak, since the TSS is readable by Meltdown from the
cpu_entry_area alias regardless. It does allow a timing attack to locate
the percpu area, but KASLR is more or less a lost cause against local
attack on CPUs vulnerable to Meltdown regardless. As far as I'm concerned,
on current hardware, KASLR is only useful to mitigate remote attacks that
try to attack the kernel without first gaining RCE against a vulnerable
user process.
On Skylake, with CONFIG_RETPOLINE=y and KPTI on, this reduces syscall
overhead from ~237ns to ~228ns.
There is a possible alternative approach: Move the trampoline within 2G of
the entry text and make a separate copy for each CPU. This would allow a
direct jump to rejoin the normal entry path. There are pro's and con's for
this approach:
+ It avoids a pipeline stall
- It executes from an extra page and read from another extra page during
the syscall. The latter is because it needs to use a relative
addressing mode to find sp1 -- it's the same *cacheline*, but accessed
using an alias, so it's an extra TLB entry.
- Slightly more memory. This would be one page per CPU for a simple
implementation and 64-ish bytes per CPU or one page per node for a more
complex implementation.
- More code complexity.
The current approach is chosen for simplicity and because the alternative
does not provide a significant benefit, which makes it worth.
[ tglx: Added the alternative discussion to the changelog ]
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lkml.kernel.org/r/8c7c6e483612c3e4e10ca89495dc160b1aa66878.1536015544.git.luto@kernel.org
2018-09-04 05:59:44 +07:00
|
|
|
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
|
2015-03-24 20:41:37 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
2015-07-24 02:14:40 +07:00
|
|
|
wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
|
2015-03-23 02:48:14 +07:00
|
|
|
/*
|
2015-03-27 17:59:16 +07:00
|
|
|
* This only works on Intel CPUs.
|
|
|
|
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
|
|
|
|
* This does not cause SYSENTER to jump to the wrong location, because
|
|
|
|
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
|
2015-03-23 02:48:14 +07:00
|
|
|
*/
|
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
2018-09-13 09:49:45 +07:00
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
|
|
|
|
(unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
|
2015-06-08 13:33:56 +07:00
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
|
2015-03-24 20:41:37 +07:00
|
|
|
#else
|
2015-07-24 02:14:40 +07:00
|
|
|
wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
|
2015-04-03 19:25:28 +07:00
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
|
2015-03-24 20:41:37 +07:00
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
|
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
|
2008-09-05 10:09:03 +07:00
|
|
|
#endif
|
x86: use ELF section to list CPU vendor specific code
Replace the hardcoded list of initialization functions for each CPU
vendor by a list in an ELF section, which is read at initialization in
arch/x86/kernel/cpu/cpu.c to fill the cpu_devs[] array. The ELF
section, named .x86cpuvendor.init, is reclaimed after boot, and
contains entries of type "struct cpu_vendor_dev" which associates a
vendor number with a pointer to a "struct cpu_dev" structure.
This first modification allows to remove all the VENDOR_init_cpu()
functions.
This patch also removes the hardcoded calls to early_init_amd() and
early_init_intel(). Instead, we add a "c_early_init" member to the
cpu_dev structure, which is then called if not NULL by the generic CPU
initialization code. Unfortunately, in early_cpu_detect(), this_cpu is
not yet set, so we have to use the cpu_devs[] array directly.
This patch is part of the Linux Tiny project, and is needed for
further patch that will allow to disable compilation of unused CPU
support code.
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-15 18:00:23 +07:00
|
|
|
|
2008-09-05 10:09:03 +07:00
|
|
|
/* Flags to clear on syscall */
|
|
|
|
wrmsrl(MSR_SYSCALL_MASK,
|
2012-09-22 02:43:12 +07:00
|
|
|
X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
|
x86_64, entry: Filter RFLAGS.NT on entry from userspace
The NT flag doesn't do anything in long mode other than causing IRET
to #GP. Oddly, CPL3 code can still set NT using popf.
Entry via hardware or software interrupt clears NT automatically, so
the only relevant entries are fast syscalls.
If user code causes kernel code to run with NT set, then there's at
least some (small) chance that it could cause trouble. For example,
user code could cause a call to EFI code with NT set, and who knows
what would happen? Apparently some games on Wine sometimes do
this (!), and, if an IRET return happens, they will segfault. That
segfault cannot be handled, because signal delivery fails, too.
This patch programs the CPU to clear NT on entry via SYSCALL (both
32-bit and 64-bit, by my reading of the AMD APM), and it clears NT
in software on entry via SYSENTER.
To save a few cycles, this borrows a trick from Jan Beulich in Xen:
it checks whether NT is set before trying to clear it. As a result,
it seems to have very little effect on SYSENTER performance on my
machine.
There's another minor bug fix in here: it looks like the CFI
annotations were wrong if CONFIG_AUDITSYSCALL=n.
Testers beware: on Xen, SYSENTER with NT set turns into a GPF.
I haven't touched anything on 32-bit kernels.
The syscall mask change comes from a variant of this patch by Anish
Bhatt.
Note to stable maintainers: there is no known security issue here.
A misguided program can set NT and cause the kernel to try and fail
to deliver SIGSEGV, crashing the program. This patch fixes Far Cry
on Wine: https://bugs.winehq.org/show_bug.cgi?id=33275
Cc: <stable@vger.kernel.org>
Reported-by: Anish Bhatt <anish@chelsio.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/395749a5d39a29bd3e4b35899cf3a3c1340e5595.1412189265.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2014-10-02 01:49:04 +07:00
|
|
|
X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-12-07 08:14:02 +07:00
|
|
|
|
2008-09-05 10:09:03 +07:00
|
|
|
/*
|
|
|
|
* Copies of the original ist values from the tss are only accessed during
|
|
|
|
* debugging, no special alignment required.
|
|
|
|
*/
|
|
|
|
DEFINE_PER_CPU(struct orig_ist, orig_ist);
|
|
|
|
|
2011-12-09 15:02:19 +07:00
|
|
|
static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
|
2011-12-16 23:43:02 +07:00
|
|
|
DEFINE_PER_CPU(int, debug_stack_usage);
|
2011-12-09 15:02:19 +07:00
|
|
|
|
|
|
|
int is_debug_stack(unsigned long addr)
|
|
|
|
{
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 00:30:40 +07:00
|
|
|
return __this_cpu_read(debug_stack_usage) ||
|
|
|
|
(addr <= __this_cpu_read(debug_stack_addr) &&
|
|
|
|
addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
|
2011-12-09 15:02:19 +07:00
|
|
|
}
|
2014-04-17 15:17:12 +07:00
|
|
|
NOKPROBE_SYMBOL(is_debug_stack);
|
2011-12-09 15:02:19 +07:00
|
|
|
|
2013-06-20 22:45:44 +07:00
|
|
|
DEFINE_PER_CPU(u32, debug_idt_ctr);
|
2012-05-30 22:47:00 +07:00
|
|
|
|
2011-12-09 15:02:19 +07:00
|
|
|
void debug_stack_set_zero(void)
|
|
|
|
{
|
2013-06-20 22:45:44 +07:00
|
|
|
this_cpu_inc(debug_idt_ctr);
|
|
|
|
load_current_idt();
|
2011-12-09 15:02:19 +07:00
|
|
|
}
|
2014-04-17 15:17:12 +07:00
|
|
|
NOKPROBE_SYMBOL(debug_stack_set_zero);
|
2011-12-09 15:02:19 +07:00
|
|
|
|
|
|
|
void debug_stack_reset(void)
|
|
|
|
{
|
2013-06-20 22:45:44 +07:00
|
|
|
if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
|
2012-05-30 22:47:00 +07:00
|
|
|
return;
|
2013-06-20 22:45:44 +07:00
|
|
|
if (this_cpu_dec_return(debug_idt_ctr) == 0)
|
|
|
|
load_current_idt();
|
2011-12-09 15:02:19 +07:00
|
|
|
}
|
2014-04-17 15:17:12 +07:00
|
|
|
NOKPROBE_SYMBOL(debug_stack_reset);
|
2011-12-09 15:02:19 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
#else /* CONFIG_X86_64 */
|
2008-09-05 10:09:03 +07:00
|
|
|
|
2009-08-03 12:12:19 +07:00
|
|
|
DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
|
|
|
|
EXPORT_PER_CPU_SYMBOL(current_task);
|
2013-08-14 19:51:00 +07:00
|
|
|
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
|
|
|
EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
2009-08-03 12:12:19 +07:00
|
|
|
|
2015-03-07 08:50:19 +07:00
|
|
|
/*
|
|
|
|
* On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
|
|
|
|
* the top of the kernel stack. Use an extra percpu variable to track the
|
|
|
|
* top of the kernel stack directly.
|
|
|
|
*/
|
|
|
|
DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
|
|
|
|
(unsigned long)&init_thread_union + THREAD_SIZE;
|
|
|
|
EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
|
|
|
|
|
Kbuild: rename CC_STACKPROTECTOR[_STRONG] config variables
The changes to automatically test for working stack protector compiler
support in the Kconfig files removed the special STACKPROTECTOR_AUTO
option that picked the strongest stack protector that the compiler
supported.
That was all a nice cleanup - it makes no sense to have the AUTO case
now that the Kconfig phase can just determine the compiler support
directly.
HOWEVER.
It also meant that doing "make oldconfig" would now _disable_ the strong
stackprotector if you had AUTO enabled, because in a legacy config file,
the sane stack protector configuration would look like
CONFIG_HAVE_CC_STACKPROTECTOR=y
# CONFIG_CC_STACKPROTECTOR_NONE is not set
# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
# CONFIG_CC_STACKPROTECTOR_STRONG is not set
CONFIG_CC_STACKPROTECTOR_AUTO=y
and when you ran this through "make oldconfig" with the Kbuild changes,
it would ask you about the regular CONFIG_CC_STACKPROTECTOR (that had
been renamed from CONFIG_CC_STACKPROTECTOR_REGULAR to just
CONFIG_CC_STACKPROTECTOR), but it would think that the STRONG version
used to be disabled (because it was really enabled by AUTO), and would
disable it in the new config, resulting in:
CONFIG_HAVE_CC_STACKPROTECTOR=y
CONFIG_CC_HAS_STACKPROTECTOR_NONE=y
CONFIG_CC_STACKPROTECTOR=y
# CONFIG_CC_STACKPROTECTOR_STRONG is not set
CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
That's dangerously subtle - people could suddenly find themselves with
the weaker stack protector setup without even realizing.
The solution here is to just rename not just the old RECULAR stack
protector option, but also the strong one. This does that by just
removing the CC_ prefix entirely for the user choices, because it really
is not about the compiler support (the compiler support now instead
automatially impacts _visibility_ of the options to users).
This results in "make oldconfig" actually asking the user for their
choice, so that we don't have any silent subtle security model changes.
The end result would generally look like this:
CONFIG_HAVE_CC_STACKPROTECTOR=y
CONFIG_CC_HAS_STACKPROTECTOR_NONE=y
CONFIG_STACKPROTECTOR=y
CONFIG_STACKPROTECTOR_STRONG=y
CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
where the "CC_" versions really are about internal compiler
infrastructure, not the user selections.
Acked-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-06-14 10:21:18 +07:00
|
|
|
#ifdef CONFIG_STACKPROTECTOR
|
2009-09-04 04:31:44 +07:00
|
|
|
DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
2009-02-09 20:17:40 +07:00
|
|
|
#endif
|
2008-09-05 10:09:03 +07:00
|
|
|
|
2009-03-14 14:46:17 +07:00
|
|
|
#endif /* CONFIG_X86_64 */
|
2007-05-03 00:27:16 +07:00
|
|
|
|
2009-03-14 12:49:49 +07:00
|
|
|
/*
|
|
|
|
* Clear all 6 debug registers:
|
|
|
|
*/
|
|
|
|
static void clear_all_debug_regs(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
/* Ignore db4, db5 */
|
|
|
|
if ((i == 4) || (i == 5))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
set_debugreg(0, i);
|
|
|
|
}
|
|
|
|
}
|
2007-05-03 00:27:16 +07:00
|
|
|
|
2010-05-21 09:04:30 +07:00
|
|
|
#ifdef CONFIG_KGDB
|
|
|
|
/*
|
|
|
|
* Restore debug regs if using kgdbwait and you have a kernel debugger
|
|
|
|
* connection established.
|
|
|
|
*/
|
|
|
|
static void dbg_restore_debug_regs(void)
|
|
|
|
{
|
|
|
|
if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
|
|
|
|
arch_kgdb_ops.correct_hw_break();
|
|
|
|
}
|
|
|
|
#else /* ! CONFIG_KGDB */
|
|
|
|
#define dbg_restore_debug_regs()
|
|
|
|
#endif /* ! CONFIG_KGDB */
|
|
|
|
|
2014-06-20 19:23:11 +07:00
|
|
|
static void wait_for_master_cpu(int cpu)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/*
|
|
|
|
* wait for ACK from master CPU before continuing
|
|
|
|
* with AP initialization
|
|
|
|
*/
|
|
|
|
WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
|
|
|
|
while (!cpumask_test_cpu(cpu, cpu_callout_mask))
|
|
|
|
cpu_relax();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-09-19 06:08:59 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
static void setup_getcpu(int cpu)
|
|
|
|
{
|
x86/segments: Introduce the 'CPUNODE' naming to better document the segment limit CPU/node NR trick
We have a special segment descriptor entry in the GDT, whose sole purpose is to
encode the CPU and node numbers in its limit (size) field. There are user-space
instructions that allow the reading of the limit field, which gives us a really
fast way to read the CPU and node IDs from the vDSO for example.
But the naming of related functionality does not make this clear, at all:
VDSO_CPU_SIZE
VDSO_CPU_MASK
__CPU_NUMBER_SEG
GDT_ENTRY_CPU_NUMBER
vdso_encode_cpu_node
vdso_read_cpu_node
There's a number of problems:
- The 'VDSO_CPU_SIZE' doesn't really make it clear that these are number
of bits, nor does it make it clear which 'CPU' this refers to, i.e.
that this is about a GDT entry whose limit encodes the CPU and node number.
- Furthermore, the 'CPU_NUMBER' naming is actively misleading as well,
because the segment limit encodes not just the CPU number but the
node ID as well ...
So use a better nomenclature all around: name everything related to this trick
as 'CPUNODE', to make it clear that this is something special, and add
_BITS to make it clear that these are number of bits, and propagate this to
every affected name:
VDSO_CPU_SIZE => VDSO_CPUNODE_BITS
VDSO_CPU_MASK => VDSO_CPUNODE_MASK
__CPU_NUMBER_SEG => __CPUNODE_SEG
GDT_ENTRY_CPU_NUMBER => GDT_ENTRY_CPUNODE
vdso_encode_cpu_node => vdso_encode_cpunode
vdso_read_cpu_node => vdso_read_cpunode
This, beyond being less confusing, also makes it easier to grep for all related
functionality:
$ git grep -i cpunode arch/x86
Also, while at it, fix "return is not a function" style sloppiness in vdso_encode_cpunode().
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chang S. Bae <chang.seok.bae@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Markus T Metzger <markus.t.metzger@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1537312139-5580-2-git-send-email-chang.seok.bae@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-10-08 15:41:59 +07:00
|
|
|
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
|
2018-09-19 06:08:59 +07:00
|
|
|
struct desc_struct d = { };
|
|
|
|
|
|
|
|
if (static_cpu_has(X86_FEATURE_RDTSCP))
|
|
|
|
write_rdtscp_aux(cpudata);
|
|
|
|
|
|
|
|
/* Store CPU and node number in limit. */
|
|
|
|
d.limit0 = cpudata;
|
|
|
|
d.limit1 = cpudata >> 16;
|
|
|
|
|
|
|
|
d.type = 5; /* RO data, expand down, accessed */
|
|
|
|
d.dpl = 3; /* Visible to user code */
|
|
|
|
d.s = 1; /* Not a system segment */
|
|
|
|
d.p = 1; /* Present */
|
|
|
|
d.d = 1; /* 32-bit */
|
|
|
|
|
x86/segments: Introduce the 'CPUNODE' naming to better document the segment limit CPU/node NR trick
We have a special segment descriptor entry in the GDT, whose sole purpose is to
encode the CPU and node numbers in its limit (size) field. There are user-space
instructions that allow the reading of the limit field, which gives us a really
fast way to read the CPU and node IDs from the vDSO for example.
But the naming of related functionality does not make this clear, at all:
VDSO_CPU_SIZE
VDSO_CPU_MASK
__CPU_NUMBER_SEG
GDT_ENTRY_CPU_NUMBER
vdso_encode_cpu_node
vdso_read_cpu_node
There's a number of problems:
- The 'VDSO_CPU_SIZE' doesn't really make it clear that these are number
of bits, nor does it make it clear which 'CPU' this refers to, i.e.
that this is about a GDT entry whose limit encodes the CPU and node number.
- Furthermore, the 'CPU_NUMBER' naming is actively misleading as well,
because the segment limit encodes not just the CPU number but the
node ID as well ...
So use a better nomenclature all around: name everything related to this trick
as 'CPUNODE', to make it clear that this is something special, and add
_BITS to make it clear that these are number of bits, and propagate this to
every affected name:
VDSO_CPU_SIZE => VDSO_CPUNODE_BITS
VDSO_CPU_MASK => VDSO_CPUNODE_MASK
__CPU_NUMBER_SEG => __CPUNODE_SEG
GDT_ENTRY_CPU_NUMBER => GDT_ENTRY_CPUNODE
vdso_encode_cpu_node => vdso_encode_cpunode
vdso_read_cpu_node => vdso_read_cpunode
This, beyond being less confusing, also makes it easier to grep for all related
functionality:
$ git grep -i cpunode arch/x86
Also, while at it, fix "return is not a function" style sloppiness in vdso_encode_cpunode().
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chang S. Bae <chang.seok.bae@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Markus T Metzger <markus.t.metzger@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1537312139-5580-2-git-send-email-chang.seok.bae@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-10-08 15:41:59 +07:00
|
|
|
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
|
2018-09-19 06:08:59 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-05-03 00:27:10 +07:00
|
|
|
/*
|
|
|
|
* cpu_init() initializes state that is per-CPU. Some data is already
|
|
|
|
* initialized (naturally) in the bootstrap process, such as the GDT
|
|
|
|
* and IDT. We reload them nevertheless, this function acts as a
|
|
|
|
* 'CPU state barrier', nothing should get across.
|
2008-09-05 10:09:04 +07:00
|
|
|
* A lot of state is already set up in PDA init for 64 bit
|
2007-05-03 00:27:10 +07:00
|
|
|
*/
|
2008-09-05 10:09:04 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2009-03-14 14:46:17 +07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void cpu_init(void)
|
2008-09-05 10:09:04 +07:00
|
|
|
{
|
2009-10-29 20:34:14 +07:00
|
|
|
struct orig_ist *oist;
|
2008-09-05 10:09:04 +07:00
|
|
|
struct task_struct *me;
|
2009-03-14 14:46:17 +07:00
|
|
|
struct tss_struct *t;
|
|
|
|
unsigned long v;
|
2016-07-15 03:22:58 +07:00
|
|
|
int cpu = raw_smp_processor_id();
|
2008-09-05 10:09:04 +07:00
|
|
|
int i;
|
|
|
|
|
2014-06-20 19:23:11 +07:00
|
|
|
wait_for_master_cpu(cpu);
|
|
|
|
|
2014-10-25 05:58:08 +07:00
|
|
|
/*
|
|
|
|
* Initialize the CR4 shadow before doing anything that could
|
|
|
|
* try to read it.
|
|
|
|
*/
|
|
|
|
cr4_init_shadow();
|
|
|
|
|
2016-10-25 16:55:11 +07:00
|
|
|
if (cpu)
|
|
|
|
load_ucode_ap();
|
2012-12-21 14:44:24 +07:00
|
|
|
|
2017-12-04 21:07:29 +07:00
|
|
|
t = &per_cpu(cpu_tss_rw, cpu);
|
2009-10-29 20:34:14 +07:00
|
|
|
oist = &per_cpu(orig_ist, cpu);
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2009-01-18 22:38:59 +07:00
|
|
|
#ifdef CONFIG_NUMA
|
2012-11-14 02:32:47 +07:00
|
|
|
if (this_cpu_read(numa_node) == 0 &&
|
2010-05-27 04:44:58 +07:00
|
|
|
early_cpu_to_node(cpu) != NUMA_NO_NODE)
|
|
|
|
set_numa_node(early_cpu_to_node(cpu));
|
2009-01-18 22:38:59 +07:00
|
|
|
#endif
|
2018-09-19 06:08:59 +07:00
|
|
|
setup_getcpu(cpu);
|
2008-09-05 10:09:04 +07:00
|
|
|
|
|
|
|
me = current;
|
|
|
|
|
2009-12-11 08:19:36 +07:00
|
|
|
pr_debug("Initializing CPU#%d\n", cpu);
|
2008-09-05 10:09:04 +07:00
|
|
|
|
2014-10-25 05:58:07 +07:00
|
|
|
cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
2008-09-05 10:09:04 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the per-CPU GDT with the boot GDT,
|
|
|
|
* and set up the GDT descriptor:
|
|
|
|
*/
|
|
|
|
|
2009-01-30 15:47:53 +07:00
|
|
|
switch_to_new_gdt(cpu);
|
2009-01-27 10:56:48 +07:00
|
|
|
loadsegment(fs, 0);
|
|
|
|
|
x86, trace: Add irq vector tracepoints
[Purpose of this patch]
As Vaibhav explained in the thread below, tracepoints for irq vectors
are useful.
http://www.spinics.net/lists/mm-commits/msg85707.html
<snip>
The current interrupt traces from irq_handler_entry and irq_handler_exit
provide when an interrupt is handled. They provide good data about when
the system has switched to kernel space and how it affects the currently
running processes.
There are some IRQ vectors which trigger the system into kernel space,
which are not handled in generic IRQ handlers. Tracing such events gives
us the information about IRQ interaction with other system events.
The trace also tells where the system is spending its time. We want to
know which cores are handling interrupts and how they are affecting other
processes in the system. Also, the trace provides information about when
the cores are idle and which interrupts are changing that state.
<snip>
On the other hand, my usecase is tracing just local timer event and
getting a value of instruction pointer.
I suggested to add an argument local timer event to get instruction pointer before.
But there is another way to get it with external module like systemtap.
So, I don't need to add any argument to irq vector tracepoints now.
[Patch Description]
Vaibhav's patch shared a trace point ,irq_vector_entry/irq_vector_exit, in all events.
But there is an above use case to trace specific irq_vector rather than tracing all events.
In this case, we are concerned about overhead due to unwanted events.
So, add following tracepoints instead of introducing irq_vector_entry/exit.
so that we can enable them independently.
- local_timer_vector
- reschedule_vector
- call_function_vector
- call_function_single_vector
- irq_work_entry_vector
- error_apic_vector
- thermal_apic_vector
- threshold_apic_vector
- spurious_apic_vector
- x86_platform_ipi_vector
Also, introduce a logic switching IDT at enabling/disabling time so that a time penalty
makes a zero when tracepoints are disabled. Detailed explanations are as follows.
- Create trace irq handlers with entering_irq()/exiting_irq().
- Create a new IDT, trace_idt_table, at boot time by adding a logic to
_set_gate(). It is just a copy of original idt table.
- Register the new handlers for tracpoints to the new IDT by introducing
macros to alloc_intr_gate() called at registering time of irq_vector handlers.
- Add checking, whether irq vector tracing is on/off, into load_current_idt().
This has to be done below debug checking for these reasons.
- Switching to debug IDT may be kicked while tracing is enabled.
- On the other hands, switching to trace IDT is kicked only when debugging
is disabled.
In addition, the new IDT is created only when CONFIG_TRACING is enabled to avoid being
used for other purposes.
Signed-off-by: Seiji Aguchi <seiji.aguchi@hds.com>
Link: http://lkml.kernel.org/r/51C323ED.5050708@hds.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
2013-06-20 22:46:53 +07:00
|
|
|
load_current_idt();
|
2008-09-05 10:09:04 +07:00
|
|
|
|
|
|
|
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
|
|
|
|
syscall_init();
|
|
|
|
|
|
|
|
wrmsrl(MSR_FS_BASE, 0);
|
|
|
|
wrmsrl(MSR_KERNEL_GS_BASE, 0);
|
|
|
|
barrier();
|
|
|
|
|
2009-11-14 06:28:16 +07:00
|
|
|
x86_configure_nx();
|
2015-01-16 04:22:26 +07:00
|
|
|
x2apic_setup();
|
2008-09-05 10:09:04 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* set up and load the per-CPU TSS
|
|
|
|
*/
|
2009-10-29 20:34:14 +07:00
|
|
|
if (!oist->ist[0]) {
|
2017-12-04 21:07:26 +07:00
|
|
|
char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2008-09-05 10:09:04 +07:00
|
|
|
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
|
2009-03-14 14:46:17 +07:00
|
|
|
estacks += exception_stack_sizes[v];
|
2009-10-29 20:34:14 +07:00
|
|
|
oist->ist[v] = t->x86_tss.ist[v] =
|
2008-09-05 10:09:04 +07:00
|
|
|
(unsigned long)estacks;
|
2011-12-09 15:02:19 +07:00
|
|
|
if (v == DEBUG_STACK-1)
|
|
|
|
per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
|
2008-09-05 10:09:04 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-04 21:07:17 +07:00
|
|
|
t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
|
2009-03-14 14:46:17 +07:00
|
|
|
|
2008-09-05 10:09:04 +07:00
|
|
|
/*
|
|
|
|
* <= is required because the CPU will access up to
|
|
|
|
* 8 bits beyond the end of the IO permission bitmap.
|
|
|
|
*/
|
|
|
|
for (i = 0; i <= IO_BITMAP_LONGS; i++)
|
|
|
|
t->io_bitmap[i] = ~0UL;
|
|
|
|
|
2017-02-28 05:30:07 +07:00
|
|
|
mmgrab(&init_mm);
|
2008-09-05 10:09:04 +07:00
|
|
|
me->active_mm = &init_mm;
|
2009-03-10 12:10:32 +07:00
|
|
|
BUG_ON(me->mm);
|
2017-09-07 09:54:53 +07:00
|
|
|
initialize_tlbstate_and_flush();
|
2008-09-05 10:09:04 +07:00
|
|
|
enter_lazy_tlb(&init_mm, me);
|
|
|
|
|
2017-11-02 14:59:13 +07:00
|
|
|
/*
|
2017-12-04 21:07:23 +07:00
|
|
|
* Initialize the TSS. sp0 points to the entry trampoline stack
|
|
|
|
* regardless of what task is running.
|
2017-11-02 14:59:13 +07:00
|
|
|
*/
|
2017-12-04 21:07:20 +07:00
|
|
|
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
|
2008-09-05 10:09:04 +07:00
|
|
|
load_TR_desc();
|
2017-12-05 08:25:07 +07:00
|
|
|
load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
|
2017-11-02 14:59:13 +07:00
|
|
|
|
2015-07-31 04:31:32 +07:00
|
|
|
load_mm_ldt(&init_mm);
|
2008-09-05 10:09:04 +07:00
|
|
|
|
2010-05-21 09:04:30 +07:00
|
|
|
clear_all_debug_regs();
|
|
|
|
dbg_restore_debug_regs();
|
2008-09-05 10:09:04 +07:00
|
|
|
|
2015-04-26 19:27:17 +07:00
|
|
|
fpu__init_cpu();
|
2008-09-05 10:09:04 +07:00
|
|
|
|
|
|
|
if (is_uv_system())
|
|
|
|
uv_cpu_init();
|
2017-03-15 00:05:07 +07:00
|
|
|
|
|
|
|
load_fixmap_gdt(cpu);
|
2008-09-05 10:09:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
void cpu_init(void)
|
2007-01-22 22:18:31 +07:00
|
|
|
{
|
2007-05-03 00:27:10 +07:00
|
|
|
int cpu = smp_processor_id();
|
|
|
|
struct task_struct *curr = current;
|
2017-12-04 21:07:29 +07:00
|
|
|
struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
|
2006-12-07 08:14:02 +07:00
|
|
|
|
2014-06-20 19:23:11 +07:00
|
|
|
wait_for_master_cpu(cpu);
|
2012-12-21 14:44:24 +07:00
|
|
|
|
2015-02-28 02:50:19 +07:00
|
|
|
/*
|
|
|
|
* Initialize the CR4 shadow before doing anything that could
|
|
|
|
* try to read it.
|
|
|
|
*/
|
|
|
|
cr4_init_shadow();
|
|
|
|
|
2014-06-20 19:23:11 +07:00
|
|
|
show_ucode_info_early();
|
2006-12-07 08:14:02 +07:00
|
|
|
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_info("Initializing CPU#%d\n", cpu);
|
2006-12-07 08:14:02 +07:00
|
|
|
|
2015-12-07 16:39:41 +07:00
|
|
|
if (cpu_feature_enabled(X86_FEATURE_VME) ||
|
2016-04-05 03:24:59 +07:00
|
|
|
boot_cpu_has(X86_FEATURE_TSC) ||
|
2015-12-07 16:39:41 +07:00
|
|
|
boot_cpu_has(X86_FEATURE_DE))
|
2014-10-25 05:58:07 +07:00
|
|
|
cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
2006-12-07 08:14:02 +07:00
|
|
|
|
x86, trace: Add irq vector tracepoints
[Purpose of this patch]
As Vaibhav explained in the thread below, tracepoints for irq vectors
are useful.
http://www.spinics.net/lists/mm-commits/msg85707.html
<snip>
The current interrupt traces from irq_handler_entry and irq_handler_exit
provide when an interrupt is handled. They provide good data about when
the system has switched to kernel space and how it affects the currently
running processes.
There are some IRQ vectors which trigger the system into kernel space,
which are not handled in generic IRQ handlers. Tracing such events gives
us the information about IRQ interaction with other system events.
The trace also tells where the system is spending its time. We want to
know which cores are handling interrupts and how they are affecting other
processes in the system. Also, the trace provides information about when
the cores are idle and which interrupts are changing that state.
<snip>
On the other hand, my usecase is tracing just local timer event and
getting a value of instruction pointer.
I suggested to add an argument local timer event to get instruction pointer before.
But there is another way to get it with external module like systemtap.
So, I don't need to add any argument to irq vector tracepoints now.
[Patch Description]
Vaibhav's patch shared a trace point ,irq_vector_entry/irq_vector_exit, in all events.
But there is an above use case to trace specific irq_vector rather than tracing all events.
In this case, we are concerned about overhead due to unwanted events.
So, add following tracepoints instead of introducing irq_vector_entry/exit.
so that we can enable them independently.
- local_timer_vector
- reschedule_vector
- call_function_vector
- call_function_single_vector
- irq_work_entry_vector
- error_apic_vector
- thermal_apic_vector
- threshold_apic_vector
- spurious_apic_vector
- x86_platform_ipi_vector
Also, introduce a logic switching IDT at enabling/disabling time so that a time penalty
makes a zero when tracepoints are disabled. Detailed explanations are as follows.
- Create trace irq handlers with entering_irq()/exiting_irq().
- Create a new IDT, trace_idt_table, at boot time by adding a logic to
_set_gate(). It is just a copy of original idt table.
- Register the new handlers for tracpoints to the new IDT by introducing
macros to alloc_intr_gate() called at registering time of irq_vector handlers.
- Add checking, whether irq vector tracing is on/off, into load_current_idt().
This has to be done below debug checking for these reasons.
- Switching to debug IDT may be kicked while tracing is enabled.
- On the other hands, switching to trace IDT is kicked only when debugging
is disabled.
In addition, the new IDT is created only when CONFIG_TRACING is enabled to avoid being
used for other purposes.
Signed-off-by: Seiji Aguchi <seiji.aguchi@hds.com>
Link: http://lkml.kernel.org/r/51C323ED.5050708@hds.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
2013-06-20 22:46:53 +07:00
|
|
|
load_current_idt();
|
2009-01-30 15:47:53 +07:00
|
|
|
switch_to_new_gdt(cpu);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up and load the per-CPU TSS and LDT
|
|
|
|
*/
|
2017-02-28 05:30:07 +07:00
|
|
|
mmgrab(&init_mm);
|
2006-12-07 08:14:02 +07:00
|
|
|
curr->active_mm = &init_mm;
|
2009-03-10 12:10:32 +07:00
|
|
|
BUG_ON(curr->mm);
|
2017-09-07 09:54:53 +07:00
|
|
|
initialize_tlbstate_and_flush();
|
2006-12-07 08:14:02 +07:00
|
|
|
enter_lazy_tlb(&init_mm, curr);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2017-11-02 14:59:13 +07:00
|
|
|
/*
|
2018-07-18 16:40:44 +07:00
|
|
|
* Initialize the TSS. sp0 points to the entry trampoline stack
|
|
|
|
* regardless of what task is running.
|
2017-11-02 14:59:13 +07:00
|
|
|
*/
|
2017-12-04 21:07:20 +07:00
|
|
|
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
|
2005-04-17 05:20:36 +07:00
|
|
|
load_TR_desc();
|
2018-07-18 16:40:44 +07:00
|
|
|
load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
|
2017-11-02 14:59:13 +07:00
|
|
|
|
2015-07-31 04:31:32 +07:00
|
|
|
load_mm_ldt(&init_mm);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2017-12-04 21:07:17 +07:00
|
|
|
t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
|
2009-05-02 01:59:25 +07:00
|
|
|
|
2006-01-08 16:05:24 +07:00
|
|
|
#ifdef CONFIG_DOUBLEFAULT
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Set up doublefault TSS pointer in the GDT */
|
|
|
|
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
|
2006-01-08 16:05:24 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-03-14 12:49:49 +07:00
|
|
|
clear_all_debug_regs();
|
2010-05-21 09:04:30 +07:00
|
|
|
dbg_restore_debug_regs();
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-04-26 19:27:17 +07:00
|
|
|
fpu__init_cpu();
|
2017-03-15 00:05:07 +07:00
|
|
|
|
|
|
|
load_fixmap_gdt(cpu);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2008-09-05 10:09:04 +07:00
|
|
|
#endif
|
2013-06-09 17:07:32 +07:00
|
|
|
|
2015-07-21 04:47:58 +07:00
|
|
|
static void bsp_resume(void)
|
|
|
|
{
|
|
|
|
if (this_cpu->c_bsp_resume)
|
|
|
|
this_cpu->c_bsp_resume(&boot_cpu_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct syscore_ops cpu_syscore_ops = {
|
|
|
|
.resume = bsp_resume,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init init_cpu_syscore(void)
|
|
|
|
{
|
|
|
|
register_syscore_ops(&cpu_syscore_ops);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
core_initcall(init_cpu_syscore);
|
2018-02-16 18:26:39 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The microcode loader calls this upon late microcode load to recheck features,
|
|
|
|
* only when microcode has been updated. Caller holds microcode_mutex and CPU
|
|
|
|
* hotplug lock.
|
|
|
|
*/
|
|
|
|
void microcode_check(void)
|
|
|
|
{
|
2018-02-16 18:26:40 +07:00
|
|
|
struct cpuinfo_x86 info;
|
|
|
|
|
2018-02-16 18:26:39 +07:00
|
|
|
perf_check_microcode();
|
2018-02-16 18:26:40 +07:00
|
|
|
|
|
|
|
/* Reload CPUID max function as it might've changed. */
|
|
|
|
info.cpuid_level = cpuid_eax(0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy all capability leafs to pick up the synthetic ones so that
|
|
|
|
* memcmp() below doesn't fail on that. The ones coming from CPUID will
|
|
|
|
* get overwritten in get_cpu_cap().
|
|
|
|
*/
|
|
|
|
memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
|
|
|
|
|
|
|
|
get_cpu_cap(&info);
|
|
|
|
|
|
|
|
if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
|
|
|
|
pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
|
2018-02-16 18:26:39 +07:00
|
|
|
}
|