2008-10-23 12:26:29 +07:00
|
|
|
#ifndef _ASM_X86_PROCESSOR_H
|
|
|
|
#define _ASM_X86_PROCESSOR_H
|
2008-01-30 19:31:03 +07:00
|
|
|
|
2008-01-30 19:31:27 +07:00
|
|
|
#include <asm/processor-flags.h>
|
|
|
|
|
2008-01-30 19:31:27 +07:00
|
|
|
/* Forward declaration, a strange C thing */
|
|
|
|
struct task_struct;
|
|
|
|
struct mm_struct;
|
|
|
|
|
2008-01-30 19:31:57 +07:00
|
|
|
#include <asm/vm86.h>
|
|
|
|
#include <asm/math_emu.h>
|
|
|
|
#include <asm/segment.h>
|
|
|
|
#include <asm/types.h>
|
|
|
|
#include <asm/sigcontext.h>
|
|
|
|
#include <asm/current.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
#include <asm/page.h>
|
2009-02-12 01:20:05 +07:00
|
|
|
#include <asm/pgtable_types.h>
|
2008-01-30 19:31:33 +07:00
|
|
|
#include <asm/percpu.h>
|
2008-01-30 19:31:57 +07:00
|
|
|
#include <asm/msr.h>
|
|
|
|
#include <asm/desc_defs.h>
|
2008-01-30 19:32:38 +07:00
|
|
|
#include <asm/nops.h>
|
2012-03-29 00:11:12 +07:00
|
|
|
#include <asm/special_insns.h>
|
2015-04-22 14:57:24 +07:00
|
|
|
#include <asm/fpu/types.h>
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-01-30 19:31:57 +07:00
|
|
|
#include <linux/personality.h>
|
2008-01-30 19:31:33 +07:00
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/cache.h>
|
2008-01-30 19:31:57 +07:00
|
|
|
#include <linux/threads.h>
|
2009-09-02 16:49:52 +07:00
|
|
|
#include <linux/math64.h>
|
2010-03-25 20:51:50 +07:00
|
|
|
#include <linux/err.h>
|
2012-03-29 00:11:12 +07:00
|
|
|
#include <linux/irqflags.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We handle most unaligned accesses in hardware. On the other hand
|
|
|
|
* unaligned DMA can be quite expensive on some Nehalem processors.
|
|
|
|
*
|
|
|
|
* Based on this we disable the IP header alignment in network drivers.
|
|
|
|
*/
|
|
|
|
#define NET_IP_ALIGN 0
|
2008-01-30 19:31:27 +07:00
|
|
|
|
2009-06-02 01:13:10 +07:00
|
|
|
#define HBP_NUM 4
|
2008-01-30 19:31:27 +07:00
|
|
|
/*
|
|
|
|
* Default implementation of macro that returns current
|
|
|
|
* instruction pointer ("program counter").
|
|
|
|
*/
|
|
|
|
static inline void *current_text_addr(void)
|
|
|
|
{
|
|
|
|
void *pc;
|
2008-02-21 10:24:40 +07:00
|
|
|
|
|
|
|
asm volatile("mov $1f, %0; 1:":"=r" (pc));
|
|
|
|
|
2008-01-30 19:31:27 +07:00
|
|
|
return pc;
|
|
|
|
}
|
|
|
|
|
2015-05-24 14:58:12 +07:00
|
|
|
/*
|
|
|
|
* These alignment constraints are for performance in the vSMP case,
|
|
|
|
* but in the task_struct case we must also meet hardware imposed
|
|
|
|
* alignment requirements of the FPU state:
|
|
|
|
*/
|
2008-01-30 19:31:31 +07:00
|
|
|
#ifdef CONFIG_X86_VSMP
|
2008-02-21 10:24:40 +07:00
|
|
|
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
|
|
|
|
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
|
2008-01-30 19:31:31 +07:00
|
|
|
#else
|
2015-05-24 14:58:12 +07:00
|
|
|
# define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
|
2008-02-21 10:24:40 +07:00
|
|
|
# define ARCH_MIN_MMSTRUCT_ALIGN 0
|
2008-01-30 19:31:31 +07:00
|
|
|
#endif
|
|
|
|
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 08:02:16 +07:00
|
|
|
enum tlb_infos {
|
|
|
|
ENTRIES,
|
|
|
|
NR_INFO
|
|
|
|
};
|
|
|
|
|
|
|
|
extern u16 __read_mostly tlb_lli_4k[NR_INFO];
|
|
|
|
extern u16 __read_mostly tlb_lli_2m[NR_INFO];
|
|
|
|
extern u16 __read_mostly tlb_lli_4m[NR_INFO];
|
|
|
|
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
|
|
|
|
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
|
|
|
|
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 19:16:58 +07:00
|
|
|
extern u16 __read_mostly tlb_lld_1g[NR_INFO];
|
2012-06-28 08:02:19 +07:00
|
|
|
|
2008-01-30 19:31:33 +07:00
|
|
|
/*
|
|
|
|
* CPU type and hardware bug flags. Kept separately for each CPU.
|
|
|
|
* Members of this structure are referenced in head.S, so think twice
|
|
|
|
* before touching them. [mj]
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct cpuinfo_x86 {
|
2008-02-21 10:24:40 +07:00
|
|
|
__u8 x86; /* CPU family */
|
|
|
|
__u8 x86_vendor; /* CPU vendor */
|
|
|
|
__u8 x86_model;
|
|
|
|
__u8 x86_mask;
|
2008-01-30 19:31:33 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-02-21 10:24:40 +07:00
|
|
|
char wp_works_ok; /* It doesn't on 386's */
|
|
|
|
|
|
|
|
/* Problems on some 486Dx4's and old 386's: */
|
|
|
|
char rfu;
|
|
|
|
char pad0;
|
2013-04-29 21:04:20 +07:00
|
|
|
char pad1;
|
2008-01-30 19:31:33 +07:00
|
|
|
#else
|
2008-02-21 10:24:40 +07:00
|
|
|
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
|
2009-01-24 08:18:52 +07:00
|
|
|
int x86_tlbsize;
|
2009-03-12 19:37:34 +07:00
|
|
|
#endif
|
2008-02-21 10:24:40 +07:00
|
|
|
__u8 x86_virt_bits;
|
|
|
|
__u8 x86_phys_bits;
|
|
|
|
/* CPUID returned core id bits: */
|
|
|
|
__u8 x86_coreid_bits;
|
|
|
|
/* Max extended CPUID function supported: */
|
|
|
|
__u32 extended_cpuid_level;
|
|
|
|
/* Maximum supported CPUID level, -1=no CPUID: */
|
|
|
|
int cpuid_level;
|
2013-03-20 21:07:23 +07:00
|
|
|
__u32 x86_capability[NCAPINTS + NBUGINTS];
|
2008-02-21 10:24:40 +07:00
|
|
|
char x86_vendor_id[16];
|
|
|
|
char x86_model_id[64];
|
|
|
|
/* in KB - valid for CPUS which support this call: */
|
|
|
|
int x86_cache_size;
|
|
|
|
int x86_cache_alignment; /* In bytes */
|
2015-01-24 01:45:43 +07:00
|
|
|
/* Cache QoS architectural values: */
|
|
|
|
int x86_cache_max_rmid; /* max index */
|
|
|
|
int x86_cache_occ_scale; /* scale to bytes */
|
2008-02-21 10:24:40 +07:00
|
|
|
int x86_power;
|
|
|
|
unsigned long loops_per_jiffy;
|
|
|
|
/* cpuid returned max cores value: */
|
|
|
|
u16 x86_max_cores;
|
|
|
|
u16 apicid;
|
2008-03-07 04:46:39 +07:00
|
|
|
u16 initial_apicid;
|
2008-02-21 10:24:40 +07:00
|
|
|
u16 x86_clflush_size;
|
|
|
|
/* number of cores as seen by the OS: */
|
|
|
|
u16 booted_cores;
|
|
|
|
/* Physical processor id: */
|
|
|
|
u16 phys_proc_id;
|
|
|
|
/* Core id: */
|
|
|
|
u16 cpu_core_id;
|
2010-09-30 19:38:57 +07:00
|
|
|
/* Compute unit id */
|
|
|
|
u8 compute_unit_id;
|
2008-02-21 10:24:40 +07:00
|
|
|
/* Index into per_cpu list: */
|
|
|
|
u16 cpu_index;
|
2011-10-13 07:46:33 +07:00
|
|
|
u32 microcode;
|
2014-11-04 15:26:42 +07:00
|
|
|
};
|
2008-01-30 19:31:33 +07:00
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
#define X86_VENDOR_INTEL 0
|
|
|
|
#define X86_VENDOR_CYRIX 1
|
|
|
|
#define X86_VENDOR_AMD 2
|
|
|
|
#define X86_VENDOR_UMC 3
|
|
|
|
#define X86_VENDOR_CENTAUR 5
|
|
|
|
#define X86_VENDOR_TRANSMETA 7
|
|
|
|
#define X86_VENDOR_NSC 8
|
|
|
|
#define X86_VENDOR_NUM 9
|
|
|
|
|
|
|
|
#define X86_VENDOR_UNKNOWN 0xff
|
2008-01-30 19:31:33 +07:00
|
|
|
|
2008-01-30 19:31:39 +07:00
|
|
|
/*
|
|
|
|
* capabilities of CPUs
|
|
|
|
*/
|
2008-02-21 10:24:40 +07:00
|
|
|
extern struct cpuinfo_x86 boot_cpu_data;
|
|
|
|
extern struct cpuinfo_x86 new_cpu_data;
|
|
|
|
|
|
|
|
extern struct tss_struct doublefault_tss;
|
2009-05-10 13:47:42 +07:00
|
|
|
extern __u32 cpu_caps_cleared[NCAPINTS];
|
|
|
|
extern __u32 cpu_caps_set[NCAPINTS];
|
2008-01-30 19:31:33 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2014-11-04 15:26:42 +07:00
|
|
|
DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
|
2008-01-30 19:31:33 +07:00
|
|
|
#define cpu_data(cpu) per_cpu(cpu_info, cpu)
|
|
|
|
#else
|
2010-12-18 22:30:05 +07:00
|
|
|
#define cpu_info boot_cpu_data
|
2008-01-30 19:31:33 +07:00
|
|
|
#define cpu_data(cpu) boot_cpu_data
|
|
|
|
#endif
|
|
|
|
|
2008-07-22 00:10:37 +07:00
|
|
|
extern const struct seq_operations cpuinfo_op;
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
|
|
|
|
|
|
|
|
extern void cpu_detect(struct cpuinfo_x86 *c);
|
2008-01-30 19:31:39 +07:00
|
|
|
|
2008-06-21 17:24:19 +07:00
|
|
|
extern void early_cpu_init(void);
|
2008-01-30 19:31:39 +07:00
|
|
|
extern void identify_boot_cpu(void);
|
|
|
|
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
2008-01-30 19:31:33 +07:00
|
|
|
extern void print_cpu_info(struct cpuinfo_x86 *);
|
2012-02-13 00:53:57 +07:00
|
|
|
void print_cpu_msr(struct cpuinfo_x86 *);
|
2008-01-30 19:31:33 +07:00
|
|
|
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
|
|
|
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
2012-10-19 15:59:33 +07:00
|
|
|
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
|
2008-01-30 19:31:33 +07:00
|
|
|
|
2008-08-23 22:47:10 +07:00
|
|
|
extern void detect_extended_topology(struct cpuinfo_x86 *c);
|
2008-01-30 19:31:39 +07:00
|
|
|
extern void detect_ht(struct cpuinfo_x86 *c);
|
|
|
|
|
2012-12-21 14:44:23 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
extern int have_cpuid_p(void);
|
|
|
|
#else
|
|
|
|
static inline int have_cpuid_p(void)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#endif
|
2008-01-30 19:31:03 +07:00
|
|
|
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned int *ecx, unsigned int *edx)
|
2008-01-30 19:31:03 +07:00
|
|
|
{
|
|
|
|
/* ecx is often an input as well as an output. */
|
2009-12-17 07:25:42 +07:00
|
|
|
asm volatile("cpuid"
|
2008-03-23 15:03:15 +07:00
|
|
|
: "=a" (*eax),
|
|
|
|
"=b" (*ebx),
|
|
|
|
"=c" (*ecx),
|
|
|
|
"=d" (*edx)
|
2011-10-13 07:46:33 +07:00
|
|
|
: "0" (*eax), "2" (*ecx)
|
|
|
|
: "memory");
|
2008-01-30 19:31:03 +07:00
|
|
|
}
|
|
|
|
|
2008-01-30 19:31:27 +07:00
|
|
|
static inline void load_cr3(pgd_t *pgdir)
|
|
|
|
{
|
|
|
|
write_cr3(__pa(pgdir));
|
|
|
|
}
|
2008-01-30 19:31:03 +07:00
|
|
|
|
2008-01-30 19:31:31 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/* This is the TSS defined by the hardware. */
|
|
|
|
struct x86_hw_tss {
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned short back_link, __blh;
|
|
|
|
unsigned long sp0;
|
|
|
|
unsigned short ss0, __ss0h;
|
2015-04-03 02:41:45 +07:00
|
|
|
unsigned long sp1;
|
2015-03-11 01:06:00 +07:00
|
|
|
|
|
|
|
/*
|
2015-04-03 02:41:45 +07:00
|
|
|
* We don't use ring 1, so ss1 is a convenient scratch space in
|
|
|
|
* the same cacheline as sp0. We use ss1 to cache the value in
|
|
|
|
* MSR_IA32_SYSENTER_CS. When we context switch
|
|
|
|
* MSR_IA32_SYSENTER_CS, we first check if the new value being
|
|
|
|
* written matches ss1, and, if it's not, then we wrmsr the new
|
|
|
|
* value and update ss1.
|
2015-03-11 01:06:00 +07:00
|
|
|
*
|
2015-04-03 02:41:45 +07:00
|
|
|
* The only reason we context switch MSR_IA32_SYSENTER_CS is
|
|
|
|
* that we set it to zero in vm86 tasks to avoid corrupting the
|
|
|
|
* stack if we were to go through the sysenter path from vm86
|
|
|
|
* mode.
|
2015-03-11 01:06:00 +07:00
|
|
|
*/
|
|
|
|
unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
|
|
|
|
|
|
|
|
unsigned short __ss1h;
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long sp2;
|
|
|
|
unsigned short ss2, __ss2h;
|
|
|
|
unsigned long __cr3;
|
|
|
|
unsigned long ip;
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long ax;
|
|
|
|
unsigned long cx;
|
|
|
|
unsigned long dx;
|
|
|
|
unsigned long bx;
|
|
|
|
unsigned long sp;
|
|
|
|
unsigned long bp;
|
|
|
|
unsigned long si;
|
|
|
|
unsigned long di;
|
|
|
|
unsigned short es, __esh;
|
|
|
|
unsigned short cs, __csh;
|
|
|
|
unsigned short ss, __ssh;
|
|
|
|
unsigned short ds, __dsh;
|
|
|
|
unsigned short fs, __fsh;
|
|
|
|
unsigned short gs, __gsh;
|
|
|
|
unsigned short ldt, __ldth;
|
|
|
|
unsigned short trace;
|
|
|
|
unsigned short io_bitmap_base;
|
|
|
|
|
2008-01-30 19:31:31 +07:00
|
|
|
} __attribute__((packed));
|
|
|
|
#else
|
|
|
|
struct x86_hw_tss {
|
2008-02-21 10:24:40 +07:00
|
|
|
u32 reserved1;
|
|
|
|
u64 sp0;
|
|
|
|
u64 sp1;
|
|
|
|
u64 sp2;
|
|
|
|
u64 reserved2;
|
|
|
|
u64 ist[7];
|
|
|
|
u32 reserved3;
|
|
|
|
u32 reserved4;
|
|
|
|
u16 reserved5;
|
|
|
|
u16 io_bitmap_base;
|
|
|
|
|
2008-01-30 19:31:31 +07:00
|
|
|
} __attribute__((packed)) ____cacheline_aligned;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2008-02-21 10:24:40 +07:00
|
|
|
* IO-bitmap sizes:
|
2008-01-30 19:31:31 +07:00
|
|
|
*/
|
2008-02-21 10:24:40 +07:00
|
|
|
#define IO_BITMAP_BITS 65536
|
|
|
|
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
|
|
|
|
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
|
|
|
|
#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
|
|
|
|
#define INVALID_IO_BITMAP_OFFSET 0x8000
|
2008-01-30 19:31:31 +07:00
|
|
|
|
|
|
|
struct tss_struct {
|
2008-02-21 10:24:40 +07:00
|
|
|
/*
|
|
|
|
* The hardware state:
|
|
|
|
*/
|
|
|
|
struct x86_hw_tss x86_tss;
|
2008-01-30 19:31:31 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The extra 1 is there because the CPU will access an
|
|
|
|
* additional byte beyond the end of the IO permission
|
|
|
|
* bitmap. The extra byte must be all 1 bits, and must
|
|
|
|
* be within the limit.
|
|
|
|
*/
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
|
|
|
|
|
2008-01-30 19:31:31 +07:00
|
|
|
/*
|
2015-03-09 21:52:18 +07:00
|
|
|
* Space for the temporary SYSENTER stack:
|
2008-01-30 19:31:31 +07:00
|
|
|
*/
|
2015-03-09 21:52:18 +07:00
|
|
|
unsigned long SYSENTER_stack[64];
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-07-04 19:56:16 +07:00
|
|
|
} ____cacheline_aligned;
|
2008-01-30 19:31:31 +07:00
|
|
|
|
2015-03-06 10:19:05 +07:00
|
|
|
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
|
2008-01-30 19:31:31 +07:00
|
|
|
|
2015-03-07 08:50:19 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
|
|
|
|
#endif
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
/*
|
|
|
|
* Save the original ist values for checking stack pointers during debugging
|
|
|
|
*/
|
2008-01-30 19:31:39 +07:00
|
|
|
struct orig_ist {
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long ist[7];
|
2008-01-30 19:31:39 +07:00
|
|
|
};
|
|
|
|
|
2008-03-04 00:12:56 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-01-30 19:31:57 +07:00
|
|
|
DECLARE_PER_CPU(struct orig_ist, orig_ist);
|
2009-01-18 22:38:58 +07:00
|
|
|
|
2009-01-19 10:21:28 +07:00
|
|
|
union irq_stack_union {
|
|
|
|
char irq_stack[IRQ_STACK_SIZE];
|
|
|
|
/*
|
|
|
|
* GCC hardcodes the stack canary as %gs:40. Since the
|
|
|
|
* irq_stack is the object at %gs:0, we reserve the bottom
|
|
|
|
* 48 bytes of the irq stack for the canary.
|
|
|
|
*/
|
|
|
|
struct {
|
|
|
|
char gs_base[40];
|
|
|
|
unsigned long stack_canary;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2013-08-06 05:02:43 +07:00
|
|
|
DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
|
2009-02-08 21:58:39 +07:00
|
|
|
DECLARE_INIT_PER_CPU(irq_stack_union);
|
|
|
|
|
2009-01-18 22:38:58 +07:00
|
|
|
DECLARE_PER_CPU(char *, irq_stack_ptr);
|
2009-03-14 12:49:49 +07:00
|
|
|
DECLARE_PER_CPU(unsigned int, irq_count);
|
|
|
|
extern asmlinkage void ignore_sysret(void);
|
2009-02-09 20:17:40 +07:00
|
|
|
#else /* X86_64 */
|
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
2009-09-04 02:27:15 +07:00
|
|
|
/*
|
|
|
|
* Make sure stack canary segment base is cached-aligned:
|
|
|
|
* "For Intel Atom processors, avoid non zero segment base address
|
|
|
|
* that is not aligned to cache line boundary at all cost."
|
|
|
|
* (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
|
|
|
|
*/
|
|
|
|
struct stack_canary {
|
|
|
|
char __pad[20]; /* canary at %gs:20 */
|
|
|
|
unsigned long canary;
|
|
|
|
};
|
2009-09-04 04:31:44 +07:00
|
|
|
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
2007-10-11 16:20:03 +07:00
|
|
|
#endif
|
2014-02-06 21:41:31 +07:00
|
|
|
/*
|
|
|
|
* per-CPU IRQ handling stacks
|
|
|
|
*/
|
|
|
|
struct irq_stack {
|
|
|
|
u32 stack[THREAD_SIZE/sizeof(u32)];
|
|
|
|
} __aligned(THREAD_SIZE);
|
|
|
|
|
|
|
|
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
|
|
|
|
DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
|
2009-02-09 20:17:40 +07:00
|
|
|
#endif /* X86_64 */
|
2008-01-30 19:31:03 +07:00
|
|
|
|
2008-03-11 05:28:04 +07:00
|
|
|
extern unsigned int xstate_size;
|
2008-01-30 19:31:27 +07:00
|
|
|
|
2009-09-10 00:22:48 +07:00
|
|
|
struct perf_event;
|
|
|
|
|
2008-01-30 19:31:31 +07:00
|
|
|
struct thread_struct {
|
2008-02-21 10:24:40 +07:00
|
|
|
/* Cached TLS descriptors: */
|
|
|
|
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
|
|
|
|
unsigned long sp0;
|
|
|
|
unsigned long sp;
|
2008-01-30 19:31:31 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long sysenter_cs;
|
2008-01-30 19:31:31 +07:00
|
|
|
#else
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned short es;
|
|
|
|
unsigned short ds;
|
|
|
|
unsigned short fsindex;
|
|
|
|
unsigned short gsindex;
|
2008-01-30 19:31:31 +07:00
|
|
|
#endif
|
2009-05-04 06:30:15 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long ip;
|
2009-05-04 06:30:15 +07:00
|
|
|
#endif
|
2009-05-04 06:29:52 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long fs;
|
2009-05-04 06:29:52 +07:00
|
|
|
#endif
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long gs;
|
2015-04-23 17:49:20 +07:00
|
|
|
|
2009-09-10 00:22:48 +07:00
|
|
|
/* Save middle states of ptrace breakpoints */
|
|
|
|
struct perf_event *ptrace_bps[HBP_NUM];
|
|
|
|
/* Debug status used for traps, single steps, etc... */
|
|
|
|
unsigned long debugreg6;
|
2010-02-19 00:24:18 +07:00
|
|
|
/* Keep track of the exact dr7 value set by the user */
|
|
|
|
unsigned long ptrace_dr7;
|
2008-02-21 10:24:40 +07:00
|
|
|
/* Fault info: */
|
|
|
|
unsigned long cr2;
|
2012-03-12 16:25:55 +07:00
|
|
|
unsigned long trap_nr;
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long error_code;
|
2008-01-30 19:31:31 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-02-21 10:24:40 +07:00
|
|
|
/* Virtual 86 mode info */
|
2008-01-30 19:31:31 +07:00
|
|
|
struct vm86_struct __user *vm86_info;
|
|
|
|
unsigned long screen_bitmap;
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long v86flags;
|
|
|
|
unsigned long v86mask;
|
|
|
|
unsigned long saved_sp0;
|
|
|
|
unsigned int saved_fs;
|
|
|
|
unsigned int saved_gs;
|
2008-01-30 19:31:31 +07:00
|
|
|
#endif
|
2008-02-21 10:24:40 +07:00
|
|
|
/* IO permissions: */
|
|
|
|
unsigned long *io_bitmap_ptr;
|
|
|
|
unsigned long iopl;
|
|
|
|
/* Max allowed port in the bitmap, in bytes: */
|
|
|
|
unsigned io_bitmap_max;
|
2015-07-17 17:28:11 +07:00
|
|
|
|
|
|
|
/* Floating point and extended processor state */
|
|
|
|
struct fpu fpu;
|
|
|
|
/*
|
|
|
|
* WARNING: 'fpu' is dynamically-sized. It *MUST* be at
|
|
|
|
* the end.
|
|
|
|
*/
|
2008-01-30 19:31:31 +07:00
|
|
|
};
|
|
|
|
|
2008-01-30 19:31:27 +07:00
|
|
|
/*
|
|
|
|
* Set IOPL bits in EFLAGS from given mask
|
|
|
|
*/
|
|
|
|
static inline void native_set_iopl_mask(unsigned mask)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
unsigned int reg;
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-03-23 15:03:15 +07:00
|
|
|
asm volatile ("pushfl;"
|
|
|
|
"popl %0;"
|
|
|
|
"andl %1, %0;"
|
|
|
|
"orl %2, %0;"
|
|
|
|
"pushl %0;"
|
|
|
|
"popfl"
|
|
|
|
: "=&r" (reg)
|
|
|
|
: "i" (~X86_EFLAGS_IOPL), "r" (mask));
|
2008-01-30 19:31:27 +07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
static inline void
|
|
|
|
native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
|
2008-01-30 19:31:31 +07:00
|
|
|
{
|
|
|
|
tss->x86_tss.sp0 = thread->sp0;
|
|
|
|
#ifdef CONFIG_X86_32
|
2008-02-21 10:24:40 +07:00
|
|
|
/* Only happens when SEP is enabled, no need to test "SEP"arately: */
|
2008-01-30 19:31:31 +07:00
|
|
|
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
|
|
|
|
tss->x86_tss.ss1 = thread->sysenter_cs;
|
|
|
|
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2008-01-30 19:31:27 +07:00
|
|
|
|
2008-01-30 19:32:08 +07:00
|
|
|
static inline void native_swapgs(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
asm volatile("swapgs" ::: "memory");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-03-07 08:50:19 +07:00
|
|
|
static inline unsigned long current_top_of_stack(void)
|
2015-03-06 10:19:02 +07:00
|
|
|
{
|
2015-03-07 08:50:19 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2015-03-06 10:19:05 +07:00
|
|
|
return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
|
2015-03-07 08:50:19 +07:00
|
|
|
#else
|
|
|
|
/* sp0 on x86_32 is special in and around vm86 mode. */
|
|
|
|
return this_cpu_read_stable(cpu_current_top_of_stack);
|
|
|
|
#endif
|
2015-03-06 10:19:02 +07:00
|
|
|
}
|
|
|
|
|
2008-01-30 19:31:31 +07:00
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
|
|
#include <asm/paravirt.h>
|
|
|
|
#else
|
2008-02-21 10:24:40 +07:00
|
|
|
#define __cpuid native_cpuid
|
|
|
|
#define paravirt_enabled() 0
|
2008-01-30 19:31:27 +07:00
|
|
|
|
2008-03-23 15:03:15 +07:00
|
|
|
static inline void load_sp0(struct tss_struct *tss,
|
|
|
|
struct thread_struct *thread)
|
2008-01-30 19:31:31 +07:00
|
|
|
{
|
|
|
|
native_load_sp0(tss, thread);
|
|
|
|
}
|
|
|
|
|
2008-01-30 19:31:27 +07:00
|
|
|
#define set_iopl_mask native_set_iopl_mask
|
2008-01-30 19:31:27 +07:00
|
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
|
2008-01-30 19:31:38 +07:00
|
|
|
typedef struct {
|
2008-02-21 10:24:40 +07:00
|
|
|
unsigned long seg;
|
2008-01-30 19:31:38 +07:00
|
|
|
} mm_segment_t;
|
|
|
|
|
|
|
|
|
2008-01-30 19:31:27 +07:00
|
|
|
/* Free all resources held by a thread. */
|
|
|
|
extern void release_thread(struct task_struct *);
|
|
|
|
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
2008-01-30 19:31:03 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Generic CPUID function
|
|
|
|
* clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
|
|
|
|
* resulting in stale register contents being returned.
|
|
|
|
*/
|
|
|
|
static inline void cpuid(unsigned int op,
|
|
|
|
unsigned int *eax, unsigned int *ebx,
|
|
|
|
unsigned int *ecx, unsigned int *edx)
|
|
|
|
{
|
|
|
|
*eax = op;
|
|
|
|
*ecx = 0;
|
|
|
|
__cpuid(eax, ebx, ecx, edx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Some CPUID calls want 'count' to be placed in ecx */
|
|
|
|
static inline void cpuid_count(unsigned int op, int count,
|
|
|
|
unsigned int *eax, unsigned int *ebx,
|
|
|
|
unsigned int *ecx, unsigned int *edx)
|
|
|
|
{
|
|
|
|
*eax = op;
|
|
|
|
*ecx = count;
|
|
|
|
__cpuid(eax, ebx, ecx, edx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CPUID functions returning a single datum
|
|
|
|
*/
|
|
|
|
static inline unsigned int cpuid_eax(unsigned int op)
|
|
|
|
{
|
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
|
|
|
|
cpuid(op, &eax, &ebx, &ecx, &edx);
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-01-30 19:31:03 +07:00
|
|
|
return eax;
|
|
|
|
}
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-01-30 19:31:03 +07:00
|
|
|
static inline unsigned int cpuid_ebx(unsigned int op)
|
|
|
|
{
|
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
|
|
|
|
cpuid(op, &eax, &ebx, &ecx, &edx);
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-01-30 19:31:03 +07:00
|
|
|
return ebx;
|
|
|
|
}
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-01-30 19:31:03 +07:00
|
|
|
static inline unsigned int cpuid_ecx(unsigned int op)
|
|
|
|
{
|
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
|
|
|
|
cpuid(op, &eax, &ebx, &ecx, &edx);
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-01-30 19:31:03 +07:00
|
|
|
return ecx;
|
|
|
|
}
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-01-30 19:31:03 +07:00
|
|
|
static inline unsigned int cpuid_edx(unsigned int op)
|
|
|
|
{
|
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
|
|
|
|
cpuid(op, &eax, &ebx, &ecx, &edx);
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2008-01-30 19:31:03 +07:00
|
|
|
return edx;
|
|
|
|
}
|
|
|
|
|
2008-01-30 19:31:27 +07:00
|
|
|
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
|
|
|
|
static inline void rep_nop(void)
|
|
|
|
{
|
2008-03-23 15:03:15 +07:00
|
|
|
asm volatile("rep; nop" ::: "memory");
|
2008-01-30 19:31:27 +07:00
|
|
|
}
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
static inline void cpu_relax(void)
|
|
|
|
{
|
|
|
|
rep_nop();
|
|
|
|
}
|
|
|
|
|
arch, locking: Ciao arch_mutex_cpu_relax()
The arch_mutex_cpu_relax() function, introduced by 34b133f, is
hacky and ugly. It was added a few years ago to address the fact
that common cpu_relax() calls include yielding on s390, and thus
impact the optimistic spinning functionality of mutexes. Nowadays
we use this function well beyond mutexes: rwsem, qrwlock, mcs and
lockref. Since the macro that defines the call is in the mutex header,
any users must include mutex.h and the naming is misleading as well.
This patch (i) renames the call to cpu_relax_lowlatency ("relax, but
only if you can do it with very low latency") and (ii) defines it in
each arch's asm/processor.h local header, just like for regular cpu_relax
functions. On all archs, except s390, cpu_relax_lowlatency is simply cpu_relax,
and thus we can take it out of mutex.h. While this can seem redundant,
I believe it is a good choice as it allows us to move out arch specific
logic from generic locking primitives and enables future(?) archs to
transparently define it, similarly to System Z.
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Bharat Bhushan <r65777@freescale.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Howells <dhowells@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Deepthi Dharwar <deepthi@linux.vnet.ibm.com>
Cc: Dominik Dingel <dingel@linux.vnet.ibm.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Joe Perches <joe@perches.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Joseph Myers <joseph@codesourcery.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Lennox Wu <lennox.wu@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Neuling <mikey@neuling.org>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mikael Starvik <starvik@axis.com>
Cc: Nicolas Pitre <nico@linaro.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Qais Yousef <qais.yousef@imgtec.com>
Cc: Qiaowei Ren <qiaowei.ren@intel.com>
Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Stratos Karafotis <stratosk@semaphore.gr>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vasily Kulikov <segoon@openwall.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Cc: Waiman Long <Waiman.Long@hp.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Wolfram Sang <wsa@the-dreams.de>
Cc: adi-buildroot-devel@lists.sourceforge.net
Cc: linux390@de.ibm.com
Cc: linux-alpha@vger.kernel.org
Cc: linux-am33-list@redhat.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-c6x-dev@linux-c6x.org
Cc: linux-cris-kernel@axis.com
Cc: linux-hexagon@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Cc: linux@lists.openrisc.net
Cc: linux-m32r-ja@ml.linux-m32r.org
Cc: linux-m32r@ml.linux-m32r.org
Cc: linux-m68k@lists.linux-m68k.org
Cc: linux-metag@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: linux-parisc@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-s390@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Cc: linux-xtensa@linux-xtensa.org
Cc: sparclinux@vger.kernel.org
Link: http://lkml.kernel.org/r/1404079773.2619.4.camel@buesod1.americas.hpqcorp.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-06-30 05:09:33 +07:00
|
|
|
#define cpu_relax_lowlatency() cpu_relax()
|
|
|
|
|
2009-09-10 08:53:50 +07:00
|
|
|
/* Stop speculative execution and prefetching of modified code. */
|
2008-01-30 19:31:27 +07:00
|
|
|
static inline void sync_core(void)
|
|
|
|
{
|
|
|
|
int tmp;
|
2008-02-21 10:24:40 +07:00
|
|
|
|
2012-11-29 02:50:23 +07:00
|
|
|
#ifdef CONFIG_M486
|
2012-11-29 02:50:30 +07:00
|
|
|
/*
|
|
|
|
* Do a CPUID if available, otherwise do a jump. The jump
|
|
|
|
* can conveniently enough be the jump around CPUID.
|
|
|
|
*/
|
|
|
|
asm volatile("cmpl %2,%1\n\t"
|
|
|
|
"jl 1f\n\t"
|
|
|
|
"cpuid\n"
|
|
|
|
"1:"
|
|
|
|
: "=a" (tmp)
|
|
|
|
: "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
|
|
|
|
: "ebx", "ecx", "edx", "memory");
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* CPUID is a barrier to speculative execution.
|
|
|
|
* Prefetched instructions are automatically
|
|
|
|
* invalidated when modified.
|
|
|
|
*/
|
|
|
|
asm volatile("cpuid"
|
|
|
|
: "=a" (tmp)
|
|
|
|
: "0" (1)
|
|
|
|
: "ebx", "ecx", "edx", "memory");
|
2009-09-10 08:53:50 +07:00
|
|
|
#endif
|
2008-01-30 19:31:27 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
extern void select_idle_routine(const struct cpuinfo_x86 *c);
|
2011-04-02 03:59:53 +07:00
|
|
|
extern void init_amd_e400_c1e_mask(void);
|
2008-01-30 19:31:27 +07:00
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
extern unsigned long boot_option_idle_override;
|
2011-04-02 03:59:53 +07:00
|
|
|
extern bool amd_e400_c1e_detected;
|
2008-01-30 19:31:27 +07:00
|
|
|
|
2010-11-03 23:06:14 +07:00
|
|
|
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
|
2013-02-10 13:38:39 +07:00
|
|
|
IDLE_POLL};
|
2010-11-03 23:06:14 +07:00
|
|
|
|
2008-01-30 19:31:39 +07:00
|
|
|
extern void enable_sep_cpu(void);
|
|
|
|
extern int sysenter_setup(void);
|
|
|
|
|
2010-05-21 09:04:29 +07:00
|
|
|
extern void early_trap_init(void);
|
x86, 64bit: Use a #PF handler to materialize early mappings on demand
Linear mode (CR0.PG = 0) is mutually exclusive with 64-bit mode; all
64-bit code has to use page tables. This makes it awkward before we
have first set up properly all-covering page tables to access objects
that are outside the static kernel range.
So far we have dealt with that simply by mapping a fixed amount of
low memory, but that fails in at least two upcoming use cases:
1. We will support load and run kernel, struct boot_params, ramdisk,
command line, etc. above the 4 GiB mark.
2. need to access ramdisk early to get microcode to update that as
early possible.
We could use early_iomap to access them too, but it will make code to
messy and hard to be unified with 32 bit.
Hence, set up a #PF table and use a fixed number of buffers to set up
page tables on demand. If the buffers fill up then we simply flush
them and start over. These buffers are all in __initdata, so it does
not increase RAM usage at runtime.
Thus, with the help of the #PF handler, we can set the final kernel
mapping from blank, and switch to init_level4_pgt later.
During the switchover in head_64.S, before #PF handler is available,
we use three pages to handle kernel crossing 1G, 512G boundaries with
sharing page by playing games with page aliasing: the same page is
mapped twice in the higher-level tables with appropriate wraparound.
The kernel region itself will be properly mapped; other mappings may
be spurious.
early_make_pgtable is using kernel high mapping address to access pages
to set page table.
-v4: Add phys_base offset to make kexec happy, and add
init_mapping_kernel() - Yinghai
-v5: fix compiling with xen, and add back ident level3 and level2 for xen
also move back init_level4_pgt from BSS to DATA again.
because we have to clear it anyway. - Yinghai
-v6: switch to init_level4_pgt in init_mem_mapping. - Yinghai
-v7: remove not needed clear_page for init_level4_page
it is with fill 512,8,0 already in head_64.S - Yinghai
-v8: we need to keep that handler alive until init_mem_mapping and don't
let early_trap_init to trash that early #PF handler.
So split early_trap_pf_init out and move it down. - Yinghai
-v9: switchover only cover kernel space instead of 1G so could avoid
touch possible mem holes. - Yinghai
-v11: change far jmp back to far return to initial_code, that is needed
to fix failure that is reported by Konrad on AMD systems. - Yinghai
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-12-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-01-25 03:19:52 +07:00
|
|
|
void early_trap_pf_init(void);
|
2010-05-21 09:04:29 +07:00
|
|
|
|
2008-01-30 19:31:39 +07:00
|
|
|
/* Defined in head.S */
|
2008-02-21 10:24:40 +07:00
|
|
|
extern struct desc_ptr early_gdt_descr;
|
2008-01-30 19:31:39 +07:00
|
|
|
|
|
|
|
extern void cpu_set_gdt(int);
|
2009-01-30 15:47:53 +07:00
|
|
|
extern void switch_to_new_gdt(int);
|
2009-01-30 15:47:54 +07:00
|
|
|
extern void load_percpu_segment(int);
|
2008-01-30 19:31:39 +07:00
|
|
|
extern void cpu_init(void);
|
|
|
|
|
2008-12-11 19:49:59 +07:00
|
|
|
static inline unsigned long get_debugctlmsr(void)
|
|
|
|
{
|
2010-03-25 20:51:51 +07:00
|
|
|
unsigned long debugctlmsr = 0;
|
2008-12-11 19:49:59 +07:00
|
|
|
|
|
|
|
#ifndef CONFIG_X86_DEBUGCTLMSR
|
|
|
|
if (boot_cpu_data.x86 < 6)
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
|
|
|
|
2010-03-25 20:51:51 +07:00
|
|
|
return debugctlmsr;
|
2008-12-11 19:49:59 +07:00
|
|
|
}
|
|
|
|
|
2008-03-10 20:11:17 +07:00
|
|
|
static inline void update_debugctlmsr(unsigned long debugctlmsr)
|
|
|
|
{
|
|
|
|
#ifndef CONFIG_X86_DEBUGCTLMSR
|
|
|
|
if (boot_cpu_data.x86 < 6)
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
|
|
|
}
|
|
|
|
|
2012-09-03 20:24:17 +07:00
|
|
|
extern void set_task_blockstep(struct task_struct *task, bool on);
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
/*
|
|
|
|
* from system description table in BIOS. Mostly for MCA use, but
|
|
|
|
* others may find it useful:
|
|
|
|
*/
|
|
|
|
extern unsigned int machine_id;
|
|
|
|
extern unsigned int machine_submodel_id;
|
|
|
|
extern unsigned int BIOS_revision;
|
2008-01-30 19:31:39 +07:00
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
/* Boot loader type from the setup header: */
|
|
|
|
extern int bootloader_type;
|
2009-05-08 06:54:11 +07:00
|
|
|
extern int bootloader_version;
|
2008-01-30 19:31:39 +07:00
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
extern char ignore_fpu_irq;
|
2008-01-30 19:31:27 +07:00
|
|
|
|
|
|
|
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
|
|
|
|
#define ARCH_HAS_PREFETCHW
|
|
|
|
#define ARCH_HAS_SPINLOCK_PREFETCH
|
|
|
|
|
2008-01-30 19:31:40 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
x86/asm: Cleanup prefetch primitives
This is based on a patch originally by hpa.
With the current improvements to the alternatives, we can simply use %P1
as a mem8 operand constraint and rely on the toolchain to generate the
proper instruction sizes. For example, on 32-bit, where we use an empty
old instruction we get:
apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
c104648b: alt_insn: 90 90 90 90
c195566c: rpl_insn: 0f 0d 4b 5c
...
apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
c18e09b4: alt_insn: 90 90 90
c1955948: rpl_insn: 0f 0d 08
...
apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
c1190cf9: alt_insn: 90 90 90 90 90 90 90
c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1
all with the proper padding done depending on the size of the
replacement instruction the compiler generates.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: H. Peter Anvin <hpa@linux.intel.com>
2015-01-18 23:48:18 +07:00
|
|
|
# define BASE_PREFETCH ""
|
2008-02-21 10:24:40 +07:00
|
|
|
# define ARCH_HAS_PREFETCH
|
2008-01-30 19:31:40 +07:00
|
|
|
#else
|
x86/asm: Cleanup prefetch primitives
This is based on a patch originally by hpa.
With the current improvements to the alternatives, we can simply use %P1
as a mem8 operand constraint and rely on the toolchain to generate the
proper instruction sizes. For example, on 32-bit, where we use an empty
old instruction we get:
apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
c104648b: alt_insn: 90 90 90 90
c195566c: rpl_insn: 0f 0d 4b 5c
...
apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
c18e09b4: alt_insn: 90 90 90
c1955948: rpl_insn: 0f 0d 08
...
apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
c1190cf9: alt_insn: 90 90 90 90 90 90 90
c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1
all with the proper padding done depending on the size of the
replacement instruction the compiler generates.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: H. Peter Anvin <hpa@linux.intel.com>
2015-01-18 23:48:18 +07:00
|
|
|
# define BASE_PREFETCH "prefetcht0 %P1"
|
2008-01-30 19:31:40 +07:00
|
|
|
#endif
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
/*
|
|
|
|
* Prefetch instructions for Pentium III (+) and AMD Athlon (+)
|
|
|
|
*
|
|
|
|
* It's not worth to care about 3dnow prefetches for the K6
|
|
|
|
* because they are microcoded there and very slow.
|
|
|
|
*/
|
2008-01-30 19:31:40 +07:00
|
|
|
static inline void prefetch(const void *x)
|
|
|
|
{
|
x86/asm: Cleanup prefetch primitives
This is based on a patch originally by hpa.
With the current improvements to the alternatives, we can simply use %P1
as a mem8 operand constraint and rely on the toolchain to generate the
proper instruction sizes. For example, on 32-bit, where we use an empty
old instruction we get:
apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
c104648b: alt_insn: 90 90 90 90
c195566c: rpl_insn: 0f 0d 4b 5c
...
apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
c18e09b4: alt_insn: 90 90 90
c1955948: rpl_insn: 0f 0d 08
...
apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
c1190cf9: alt_insn: 90 90 90 90 90 90 90
c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1
all with the proper padding done depending on the size of the
replacement instruction the compiler generates.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: H. Peter Anvin <hpa@linux.intel.com>
2015-01-18 23:48:18 +07:00
|
|
|
alternative_input(BASE_PREFETCH, "prefetchnta %P1",
|
2008-01-30 19:31:40 +07:00
|
|
|
X86_FEATURE_XMM,
|
x86/asm: Cleanup prefetch primitives
This is based on a patch originally by hpa.
With the current improvements to the alternatives, we can simply use %P1
as a mem8 operand constraint and rely on the toolchain to generate the
proper instruction sizes. For example, on 32-bit, where we use an empty
old instruction we get:
apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
c104648b: alt_insn: 90 90 90 90
c195566c: rpl_insn: 0f 0d 4b 5c
...
apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
c18e09b4: alt_insn: 90 90 90
c1955948: rpl_insn: 0f 0d 08
...
apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
c1190cf9: alt_insn: 90 90 90 90 90 90 90
c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1
all with the proper padding done depending on the size of the
replacement instruction the compiler generates.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: H. Peter Anvin <hpa@linux.intel.com>
2015-01-18 23:48:18 +07:00
|
|
|
"m" (*(const char *)x));
|
2008-01-30 19:31:40 +07:00
|
|
|
}
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
/*
|
|
|
|
* 3dnow prefetch to get an exclusive cache line.
|
|
|
|
* Useful for spinlocks to avoid one state transition in the
|
|
|
|
* cache coherency protocol:
|
|
|
|
*/
|
2008-01-30 19:31:40 +07:00
|
|
|
static inline void prefetchw(const void *x)
|
|
|
|
{
|
x86/asm: Cleanup prefetch primitives
This is based on a patch originally by hpa.
With the current improvements to the alternatives, we can simply use %P1
as a mem8 operand constraint and rely on the toolchain to generate the
proper instruction sizes. For example, on 32-bit, where we use an empty
old instruction we get:
apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
c104648b: alt_insn: 90 90 90 90
c195566c: rpl_insn: 0f 0d 4b 5c
...
apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
c18e09b4: alt_insn: 90 90 90
c1955948: rpl_insn: 0f 0d 08
...
apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
c1190cf9: alt_insn: 90 90 90 90 90 90 90
c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1
all with the proper padding done depending on the size of the
replacement instruction the compiler generates.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: H. Peter Anvin <hpa@linux.intel.com>
2015-01-18 23:48:18 +07:00
|
|
|
alternative_input(BASE_PREFETCH, "prefetchw %P1",
|
|
|
|
X86_FEATURE_3DNOWPREFETCH,
|
|
|
|
"m" (*(const char *)x));
|
2008-01-30 19:31:40 +07:00
|
|
|
}
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
static inline void spin_lock_prefetch(const void *x)
|
|
|
|
{
|
|
|
|
prefetchw(x);
|
|
|
|
}
|
|
|
|
|
2015-03-11 01:05:59 +07:00
|
|
|
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
|
|
|
|
TOP_OF_KERNEL_STACK_PADDING)
|
|
|
|
|
2008-01-30 19:31:57 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* User space process size: 3GB (default).
|
|
|
|
*/
|
2008-02-21 10:24:40 +07:00
|
|
|
#define TASK_SIZE PAGE_OFFSET
|
2009-02-21 05:32:28 +07:00
|
|
|
#define TASK_SIZE_MAX TASK_SIZE
|
2008-02-21 10:24:40 +07:00
|
|
|
#define STACK_TOP TASK_SIZE
|
|
|
|
#define STACK_TOP_MAX STACK_TOP
|
|
|
|
|
|
|
|
#define INIT_THREAD { \
|
2015-03-11 01:05:59 +07:00
|
|
|
.sp0 = TOP_OF_INIT_STACK, \
|
2008-02-21 10:24:40 +07:00
|
|
|
.vm86_info = NULL, \
|
|
|
|
.sysenter_cs = __KERNEL_CS, \
|
|
|
|
.io_bitmap_ptr = NULL, \
|
2008-01-30 19:31:57 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
|
|
|
|
|
|
|
/*
|
2015-03-13 21:09:03 +07:00
|
|
|
* TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
|
2008-01-30 19:31:57 +07:00
|
|
|
* This is necessary to guarantee that the entire "struct pt_regs"
|
tree-wide: fix comment/printk typos
"gadget", "through", "command", "maintain", "maintain", "controller", "address",
"between", "initiali[zs]e", "instead", "function", "select", "already",
"equal", "access", "management", "hierarchy", "registration", "interest",
"relative", "memory", "offset", "already",
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2010-11-02 02:38:34 +07:00
|
|
|
* is accessible even if the CPU haven't stored the SS/ESP registers
|
2008-01-30 19:31:57 +07:00
|
|
|
* on the stack (interrupt gate does not save these registers
|
|
|
|
* when switching to the same priv ring).
|
|
|
|
* Therefore beware: accessing the ss/esp fields of the
|
|
|
|
* "struct pt_regs" is possible, but they may contain the
|
|
|
|
* completely wrong values.
|
|
|
|
*/
|
2015-03-13 21:09:03 +07:00
|
|
|
#define task_pt_regs(task) \
|
|
|
|
({ \
|
|
|
|
unsigned long __ptr = (unsigned long)task_stack_page(task); \
|
|
|
|
__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
|
|
|
|
((struct pt_regs *)__ptr) - 1; \
|
2008-01-30 19:31:57 +07:00
|
|
|
})
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
|
2008-01-30 19:31:57 +07:00
|
|
|
|
|
|
|
#else
|
|
|
|
/*
|
2014-11-05 06:46:21 +07:00
|
|
|
* User space process size. 47bits minus one guard page. The guard
|
|
|
|
* page is necessary on Intel CPUs: if a SYSCALL instruction is at
|
|
|
|
* the highest possible canonical userspace address, then that
|
|
|
|
* syscall will enter the kernel with a non-canonical return
|
|
|
|
* address, and SYSRET will explode dangerously. We avoid this
|
|
|
|
* particular problem by preventing anything from being mapped
|
|
|
|
* at the maximum canonical address.
|
2008-01-30 19:31:57 +07:00
|
|
|
*/
|
2009-02-21 05:32:28 +07:00
|
|
|
#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
|
2008-01-30 19:31:57 +07:00
|
|
|
|
|
|
|
/* This decides where the kernel will search for a free chunk of vm
|
|
|
|
* space during mmap's.
|
|
|
|
*/
|
2008-02-21 10:24:40 +07:00
|
|
|
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
|
|
|
|
0xc0000000 : 0xFFFFe000)
|
2008-01-30 19:31:57 +07:00
|
|
|
|
2012-02-07 04:03:09 +07:00
|
|
|
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
|
2009-02-21 05:32:28 +07:00
|
|
|
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
|
2012-02-07 04:03:09 +07:00
|
|
|
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
|
2009-02-21 05:32:28 +07:00
|
|
|
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
|
2008-01-30 19:31:57 +07:00
|
|
|
|
2008-02-08 19:19:26 +07:00
|
|
|
#define STACK_TOP TASK_SIZE
|
2009-02-21 05:32:28 +07:00
|
|
|
#define STACK_TOP_MAX TASK_SIZE_MAX
|
2008-02-08 19:19:26 +07:00
|
|
|
|
2008-01-30 19:31:57 +07:00
|
|
|
#define INIT_THREAD { \
|
2015-03-11 01:05:59 +07:00
|
|
|
.sp0 = TOP_OF_INIT_STACK \
|
2008-01-30 19:31:57 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return saved PC of a blocked thread.
|
|
|
|
* What is this good for? it will be always the scheduler or ret_from_fork.
|
|
|
|
*/
|
2008-02-21 10:24:40 +07:00
|
|
|
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
|
2008-01-30 19:31:57 +07:00
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
|
2009-11-03 16:22:40 +07:00
|
|
|
extern unsigned long KSTK_ESP(struct task_struct *task);
|
2012-02-15 04:49:48 +07:00
|
|
|
|
2008-01-30 19:31:57 +07:00
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
|
2008-02-21 11:18:40 +07:00
|
|
|
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
|
|
|
|
unsigned long new_sp);
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
/*
|
|
|
|
* This decides where the kernel will search for a free chunk of vm
|
2008-01-30 19:31:27 +07:00
|
|
|
* space during mmap's.
|
|
|
|
*/
|
|
|
|
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
|
|
|
|
2008-02-21 10:24:40 +07:00
|
|
|
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
|
2008-01-30 19:31:27 +07:00
|
|
|
|
2008-04-14 05:24:18 +07:00
|
|
|
/* Get/set a process' ability to use the timestamp counter instruction */
|
|
|
|
#define GET_TSC_CTL(adr) get_tsc_mode((adr))
|
|
|
|
#define SET_TSC_CTL(val) set_tsc_mode((val))
|
|
|
|
|
|
|
|
extern int get_tsc_mode(unsigned long adr);
|
|
|
|
extern int set_tsc_mode(unsigned int val);
|
|
|
|
|
x86, mpx: On-demand kernel allocation of bounds tables
This is really the meat of the MPX patch set. If there is one patch to
review in the entire series, this is the one. There is a new ABI here
and this kernel code also interacts with userspace memory in a
relatively unusual manner. (small FAQ below).
Long Description:
This patch adds two prctl() commands to provide enable or disable the
management of bounds tables in kernel, including on-demand kernel
allocation (See the patch "on-demand kernel allocation of bounds tables")
and cleanup (See the patch "cleanup unused bound tables"). Applications
do not strictly need the kernel to manage bounds tables and we expect
some applications to use MPX without taking advantage of this kernel
support. This means the kernel can not simply infer whether an application
needs bounds table management from the MPX registers. The prctl() is an
explicit signal from userspace.
PR_MPX_ENABLE_MANAGEMENT is meant to be a signal from userspace to
require kernel's help in managing bounds tables.
PR_MPX_DISABLE_MANAGEMENT is the opposite, meaning that userspace don't
want kernel's help any more. With PR_MPX_DISABLE_MANAGEMENT, the kernel
won't allocate and free bounds tables even if the CPU supports MPX.
PR_MPX_ENABLE_MANAGEMENT will fetch the base address of the bounds
directory out of a userspace register (bndcfgu) and then cache it into
a new field (->bd_addr) in the 'mm_struct'. PR_MPX_DISABLE_MANAGEMENT
will set "bd_addr" to an invalid address. Using this scheme, we can
use "bd_addr" to determine whether the management of bounds tables in
kernel is enabled.
Also, the only way to access that bndcfgu register is via an xsaves,
which can be expensive. Caching "bd_addr" like this also helps reduce
the cost of those xsaves when doing table cleanup at munmap() time.
Unfortunately, we can not apply this optimization to #BR fault time
because we need an xsave to get the value of BNDSTATUS.
==== Why does the hardware even have these Bounds Tables? ====
MPX only has 4 hardware registers for storing bounds information.
If MPX-enabled code needs more than these 4 registers, it needs to
spill them somewhere. It has two special instructions for this
which allow the bounds to be moved between the bounds registers
and some new "bounds tables".
They are similar conceptually to a page fault and will be raised by
the MPX hardware during both bounds violations or when the tables
are not present. This patch handles those #BR exceptions for
not-present tables by carving the space out of the normal processes
address space (essentially calling the new mmap() interface indroduced
earlier in this patch set.) and then pointing the bounds-directory
over to it.
The tables *need* to be accessed and controlled by userspace because
the instructions for moving bounds in and out of them are extremely
frequent. They potentially happen every time a register pointing to
memory is dereferenced. Any direct kernel involvement (like a syscall)
to access the tables would obviously destroy performance.
==== Why not do this in userspace? ====
This patch is obviously doing this allocation in the kernel.
However, MPX does not strictly *require* anything in the kernel.
It can theoretically be done completely from userspace. Here are
a few ways this *could* be done. I don't think any of them are
practical in the real-world, but here they are.
Q: Can virtual space simply be reserved for the bounds tables so
that we never have to allocate them?
A: As noted earlier, these tables are *HUGE*. An X-GB virtual
area needs 4*X GB of virtual space, plus 2GB for the bounds
directory. If we were to preallocate them for the 128TB of
user virtual address space, we would need to reserve 512TB+2GB,
which is larger than the entire virtual address space today.
This means they can not be reserved ahead of time. Also, a
single process's pre-popualated bounds directory consumes 2GB
of virtual *AND* physical memory. IOW, it's completely
infeasible to prepopulate bounds directories.
Q: Can we preallocate bounds table space at the same time memory
is allocated which might contain pointers that might eventually
need bounds tables?
A: This would work if we could hook the site of each and every
memory allocation syscall. This can be done for small,
constrained applications. But, it isn't practical at a larger
scale since a given app has no way of controlling how all the
parts of the app might allocate memory (think libraries). The
kernel is really the only place to intercept these calls.
Q: Could a bounds fault be handed to userspace and the tables
allocated there in a signal handler instead of in the kernel?
A: (thanks to tglx) mmap() is not on the list of safe async
handler functions and even if mmap() would work it still
requires locking or nasty tricks to keep track of the
allocation state there.
Having ruled out all of the userspace-only approaches for managing
bounds tables that we could think of, we create them on demand in
the kernel.
Based-on-patch-by: Qiaowei Ren <qiaowei.ren@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: linux-mm@kvack.org
Cc: linux-mips@linux-mips.org
Cc: Dave Hansen <dave@sr71.net>
Link: http://lkml.kernel.org/r/20141114151829.AD4310DE@viggo.jf.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2014-11-14 22:18:29 +07:00
|
|
|
/* Register/unregister a process' MPX related resource */
|
2015-06-08 01:37:02 +07:00
|
|
|
#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
|
|
|
|
#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
|
x86, mpx: On-demand kernel allocation of bounds tables
This is really the meat of the MPX patch set. If there is one patch to
review in the entire series, this is the one. There is a new ABI here
and this kernel code also interacts with userspace memory in a
relatively unusual manner. (small FAQ below).
Long Description:
This patch adds two prctl() commands to provide enable or disable the
management of bounds tables in kernel, including on-demand kernel
allocation (See the patch "on-demand kernel allocation of bounds tables")
and cleanup (See the patch "cleanup unused bound tables"). Applications
do not strictly need the kernel to manage bounds tables and we expect
some applications to use MPX without taking advantage of this kernel
support. This means the kernel can not simply infer whether an application
needs bounds table management from the MPX registers. The prctl() is an
explicit signal from userspace.
PR_MPX_ENABLE_MANAGEMENT is meant to be a signal from userspace to
require kernel's help in managing bounds tables.
PR_MPX_DISABLE_MANAGEMENT is the opposite, meaning that userspace don't
want kernel's help any more. With PR_MPX_DISABLE_MANAGEMENT, the kernel
won't allocate and free bounds tables even if the CPU supports MPX.
PR_MPX_ENABLE_MANAGEMENT will fetch the base address of the bounds
directory out of a userspace register (bndcfgu) and then cache it into
a new field (->bd_addr) in the 'mm_struct'. PR_MPX_DISABLE_MANAGEMENT
will set "bd_addr" to an invalid address. Using this scheme, we can
use "bd_addr" to determine whether the management of bounds tables in
kernel is enabled.
Also, the only way to access that bndcfgu register is via an xsaves,
which can be expensive. Caching "bd_addr" like this also helps reduce
the cost of those xsaves when doing table cleanup at munmap() time.
Unfortunately, we can not apply this optimization to #BR fault time
because we need an xsave to get the value of BNDSTATUS.
==== Why does the hardware even have these Bounds Tables? ====
MPX only has 4 hardware registers for storing bounds information.
If MPX-enabled code needs more than these 4 registers, it needs to
spill them somewhere. It has two special instructions for this
which allow the bounds to be moved between the bounds registers
and some new "bounds tables".
They are similar conceptually to a page fault and will be raised by
the MPX hardware during both bounds violations or when the tables
are not present. This patch handles those #BR exceptions for
not-present tables by carving the space out of the normal processes
address space (essentially calling the new mmap() interface indroduced
earlier in this patch set.) and then pointing the bounds-directory
over to it.
The tables *need* to be accessed and controlled by userspace because
the instructions for moving bounds in and out of them are extremely
frequent. They potentially happen every time a register pointing to
memory is dereferenced. Any direct kernel involvement (like a syscall)
to access the tables would obviously destroy performance.
==== Why not do this in userspace? ====
This patch is obviously doing this allocation in the kernel.
However, MPX does not strictly *require* anything in the kernel.
It can theoretically be done completely from userspace. Here are
a few ways this *could* be done. I don't think any of them are
practical in the real-world, but here they are.
Q: Can virtual space simply be reserved for the bounds tables so
that we never have to allocate them?
A: As noted earlier, these tables are *HUGE*. An X-GB virtual
area needs 4*X GB of virtual space, plus 2GB for the bounds
directory. If we were to preallocate them for the 128TB of
user virtual address space, we would need to reserve 512TB+2GB,
which is larger than the entire virtual address space today.
This means they can not be reserved ahead of time. Also, a
single process's pre-popualated bounds directory consumes 2GB
of virtual *AND* physical memory. IOW, it's completely
infeasible to prepopulate bounds directories.
Q: Can we preallocate bounds table space at the same time memory
is allocated which might contain pointers that might eventually
need bounds tables?
A: This would work if we could hook the site of each and every
memory allocation syscall. This can be done for small,
constrained applications. But, it isn't practical at a larger
scale since a given app has no way of controlling how all the
parts of the app might allocate memory (think libraries). The
kernel is really the only place to intercept these calls.
Q: Could a bounds fault be handed to userspace and the tables
allocated there in a signal handler instead of in the kernel?
A: (thanks to tglx) mmap() is not on the list of safe async
handler functions and even if mmap() would work it still
requires locking or nasty tricks to keep track of the
allocation state there.
Having ruled out all of the userspace-only approaches for managing
bounds tables that we could think of, we create them on demand in
the kernel.
Based-on-patch-by: Qiaowei Ren <qiaowei.ren@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: linux-mm@kvack.org
Cc: linux-mips@linux-mips.org
Cc: Dave Hansen <dave@sr71.net>
Link: http://lkml.kernel.org/r/20141114151829.AD4310DE@viggo.jf.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2014-11-14 22:18:29 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_INTEL_MPX
|
2015-06-08 01:37:02 +07:00
|
|
|
extern int mpx_enable_management(void);
|
|
|
|
extern int mpx_disable_management(void);
|
x86, mpx: On-demand kernel allocation of bounds tables
This is really the meat of the MPX patch set. If there is one patch to
review in the entire series, this is the one. There is a new ABI here
and this kernel code also interacts with userspace memory in a
relatively unusual manner. (small FAQ below).
Long Description:
This patch adds two prctl() commands to provide enable or disable the
management of bounds tables in kernel, including on-demand kernel
allocation (See the patch "on-demand kernel allocation of bounds tables")
and cleanup (See the patch "cleanup unused bound tables"). Applications
do not strictly need the kernel to manage bounds tables and we expect
some applications to use MPX without taking advantage of this kernel
support. This means the kernel can not simply infer whether an application
needs bounds table management from the MPX registers. The prctl() is an
explicit signal from userspace.
PR_MPX_ENABLE_MANAGEMENT is meant to be a signal from userspace to
require kernel's help in managing bounds tables.
PR_MPX_DISABLE_MANAGEMENT is the opposite, meaning that userspace don't
want kernel's help any more. With PR_MPX_DISABLE_MANAGEMENT, the kernel
won't allocate and free bounds tables even if the CPU supports MPX.
PR_MPX_ENABLE_MANAGEMENT will fetch the base address of the bounds
directory out of a userspace register (bndcfgu) and then cache it into
a new field (->bd_addr) in the 'mm_struct'. PR_MPX_DISABLE_MANAGEMENT
will set "bd_addr" to an invalid address. Using this scheme, we can
use "bd_addr" to determine whether the management of bounds tables in
kernel is enabled.
Also, the only way to access that bndcfgu register is via an xsaves,
which can be expensive. Caching "bd_addr" like this also helps reduce
the cost of those xsaves when doing table cleanup at munmap() time.
Unfortunately, we can not apply this optimization to #BR fault time
because we need an xsave to get the value of BNDSTATUS.
==== Why does the hardware even have these Bounds Tables? ====
MPX only has 4 hardware registers for storing bounds information.
If MPX-enabled code needs more than these 4 registers, it needs to
spill them somewhere. It has two special instructions for this
which allow the bounds to be moved between the bounds registers
and some new "bounds tables".
They are similar conceptually to a page fault and will be raised by
the MPX hardware during both bounds violations or when the tables
are not present. This patch handles those #BR exceptions for
not-present tables by carving the space out of the normal processes
address space (essentially calling the new mmap() interface indroduced
earlier in this patch set.) and then pointing the bounds-directory
over to it.
The tables *need* to be accessed and controlled by userspace because
the instructions for moving bounds in and out of them are extremely
frequent. They potentially happen every time a register pointing to
memory is dereferenced. Any direct kernel involvement (like a syscall)
to access the tables would obviously destroy performance.
==== Why not do this in userspace? ====
This patch is obviously doing this allocation in the kernel.
However, MPX does not strictly *require* anything in the kernel.
It can theoretically be done completely from userspace. Here are
a few ways this *could* be done. I don't think any of them are
practical in the real-world, but here they are.
Q: Can virtual space simply be reserved for the bounds tables so
that we never have to allocate them?
A: As noted earlier, these tables are *HUGE*. An X-GB virtual
area needs 4*X GB of virtual space, plus 2GB for the bounds
directory. If we were to preallocate them for the 128TB of
user virtual address space, we would need to reserve 512TB+2GB,
which is larger than the entire virtual address space today.
This means they can not be reserved ahead of time. Also, a
single process's pre-popualated bounds directory consumes 2GB
of virtual *AND* physical memory. IOW, it's completely
infeasible to prepopulate bounds directories.
Q: Can we preallocate bounds table space at the same time memory
is allocated which might contain pointers that might eventually
need bounds tables?
A: This would work if we could hook the site of each and every
memory allocation syscall. This can be done for small,
constrained applications. But, it isn't practical at a larger
scale since a given app has no way of controlling how all the
parts of the app might allocate memory (think libraries). The
kernel is really the only place to intercept these calls.
Q: Could a bounds fault be handed to userspace and the tables
allocated there in a signal handler instead of in the kernel?
A: (thanks to tglx) mmap() is not on the list of safe async
handler functions and even if mmap() would work it still
requires locking or nasty tricks to keep track of the
allocation state there.
Having ruled out all of the userspace-only approaches for managing
bounds tables that we could think of, we create them on demand in
the kernel.
Based-on-patch-by: Qiaowei Ren <qiaowei.ren@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: linux-mm@kvack.org
Cc: linux-mips@linux-mips.org
Cc: Dave Hansen <dave@sr71.net>
Link: http://lkml.kernel.org/r/20141114151829.AD4310DE@viggo.jf.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2014-11-14 22:18:29 +07:00
|
|
|
#else
|
2015-06-08 01:37:02 +07:00
|
|
|
static inline int mpx_enable_management(void)
|
x86, mpx: On-demand kernel allocation of bounds tables
This is really the meat of the MPX patch set. If there is one patch to
review in the entire series, this is the one. There is a new ABI here
and this kernel code also interacts with userspace memory in a
relatively unusual manner. (small FAQ below).
Long Description:
This patch adds two prctl() commands to provide enable or disable the
management of bounds tables in kernel, including on-demand kernel
allocation (See the patch "on-demand kernel allocation of bounds tables")
and cleanup (See the patch "cleanup unused bound tables"). Applications
do not strictly need the kernel to manage bounds tables and we expect
some applications to use MPX without taking advantage of this kernel
support. This means the kernel can not simply infer whether an application
needs bounds table management from the MPX registers. The prctl() is an
explicit signal from userspace.
PR_MPX_ENABLE_MANAGEMENT is meant to be a signal from userspace to
require kernel's help in managing bounds tables.
PR_MPX_DISABLE_MANAGEMENT is the opposite, meaning that userspace don't
want kernel's help any more. With PR_MPX_DISABLE_MANAGEMENT, the kernel
won't allocate and free bounds tables even if the CPU supports MPX.
PR_MPX_ENABLE_MANAGEMENT will fetch the base address of the bounds
directory out of a userspace register (bndcfgu) and then cache it into
a new field (->bd_addr) in the 'mm_struct'. PR_MPX_DISABLE_MANAGEMENT
will set "bd_addr" to an invalid address. Using this scheme, we can
use "bd_addr" to determine whether the management of bounds tables in
kernel is enabled.
Also, the only way to access that bndcfgu register is via an xsaves,
which can be expensive. Caching "bd_addr" like this also helps reduce
the cost of those xsaves when doing table cleanup at munmap() time.
Unfortunately, we can not apply this optimization to #BR fault time
because we need an xsave to get the value of BNDSTATUS.
==== Why does the hardware even have these Bounds Tables? ====
MPX only has 4 hardware registers for storing bounds information.
If MPX-enabled code needs more than these 4 registers, it needs to
spill them somewhere. It has two special instructions for this
which allow the bounds to be moved between the bounds registers
and some new "bounds tables".
They are similar conceptually to a page fault and will be raised by
the MPX hardware during both bounds violations or when the tables
are not present. This patch handles those #BR exceptions for
not-present tables by carving the space out of the normal processes
address space (essentially calling the new mmap() interface indroduced
earlier in this patch set.) and then pointing the bounds-directory
over to it.
The tables *need* to be accessed and controlled by userspace because
the instructions for moving bounds in and out of them are extremely
frequent. They potentially happen every time a register pointing to
memory is dereferenced. Any direct kernel involvement (like a syscall)
to access the tables would obviously destroy performance.
==== Why not do this in userspace? ====
This patch is obviously doing this allocation in the kernel.
However, MPX does not strictly *require* anything in the kernel.
It can theoretically be done completely from userspace. Here are
a few ways this *could* be done. I don't think any of them are
practical in the real-world, but here they are.
Q: Can virtual space simply be reserved for the bounds tables so
that we never have to allocate them?
A: As noted earlier, these tables are *HUGE*. An X-GB virtual
area needs 4*X GB of virtual space, plus 2GB for the bounds
directory. If we were to preallocate them for the 128TB of
user virtual address space, we would need to reserve 512TB+2GB,
which is larger than the entire virtual address space today.
This means they can not be reserved ahead of time. Also, a
single process's pre-popualated bounds directory consumes 2GB
of virtual *AND* physical memory. IOW, it's completely
infeasible to prepopulate bounds directories.
Q: Can we preallocate bounds table space at the same time memory
is allocated which might contain pointers that might eventually
need bounds tables?
A: This would work if we could hook the site of each and every
memory allocation syscall. This can be done for small,
constrained applications. But, it isn't practical at a larger
scale since a given app has no way of controlling how all the
parts of the app might allocate memory (think libraries). The
kernel is really the only place to intercept these calls.
Q: Could a bounds fault be handed to userspace and the tables
allocated there in a signal handler instead of in the kernel?
A: (thanks to tglx) mmap() is not on the list of safe async
handler functions and even if mmap() would work it still
requires locking or nasty tricks to keep track of the
allocation state there.
Having ruled out all of the userspace-only approaches for managing
bounds tables that we could think of, we create them on demand in
the kernel.
Based-on-patch-by: Qiaowei Ren <qiaowei.ren@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: linux-mm@kvack.org
Cc: linux-mips@linux-mips.org
Cc: Dave Hansen <dave@sr71.net>
Link: http://lkml.kernel.org/r/20141114151829.AD4310DE@viggo.jf.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2014-11-14 22:18:29 +07:00
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2015-06-08 01:37:02 +07:00
|
|
|
static inline int mpx_disable_management(void)
|
x86, mpx: On-demand kernel allocation of bounds tables
This is really the meat of the MPX patch set. If there is one patch to
review in the entire series, this is the one. There is a new ABI here
and this kernel code also interacts with userspace memory in a
relatively unusual manner. (small FAQ below).
Long Description:
This patch adds two prctl() commands to provide enable or disable the
management of bounds tables in kernel, including on-demand kernel
allocation (See the patch "on-demand kernel allocation of bounds tables")
and cleanup (See the patch "cleanup unused bound tables"). Applications
do not strictly need the kernel to manage bounds tables and we expect
some applications to use MPX without taking advantage of this kernel
support. This means the kernel can not simply infer whether an application
needs bounds table management from the MPX registers. The prctl() is an
explicit signal from userspace.
PR_MPX_ENABLE_MANAGEMENT is meant to be a signal from userspace to
require kernel's help in managing bounds tables.
PR_MPX_DISABLE_MANAGEMENT is the opposite, meaning that userspace don't
want kernel's help any more. With PR_MPX_DISABLE_MANAGEMENT, the kernel
won't allocate and free bounds tables even if the CPU supports MPX.
PR_MPX_ENABLE_MANAGEMENT will fetch the base address of the bounds
directory out of a userspace register (bndcfgu) and then cache it into
a new field (->bd_addr) in the 'mm_struct'. PR_MPX_DISABLE_MANAGEMENT
will set "bd_addr" to an invalid address. Using this scheme, we can
use "bd_addr" to determine whether the management of bounds tables in
kernel is enabled.
Also, the only way to access that bndcfgu register is via an xsaves,
which can be expensive. Caching "bd_addr" like this also helps reduce
the cost of those xsaves when doing table cleanup at munmap() time.
Unfortunately, we can not apply this optimization to #BR fault time
because we need an xsave to get the value of BNDSTATUS.
==== Why does the hardware even have these Bounds Tables? ====
MPX only has 4 hardware registers for storing bounds information.
If MPX-enabled code needs more than these 4 registers, it needs to
spill them somewhere. It has two special instructions for this
which allow the bounds to be moved between the bounds registers
and some new "bounds tables".
They are similar conceptually to a page fault and will be raised by
the MPX hardware during both bounds violations or when the tables
are not present. This patch handles those #BR exceptions for
not-present tables by carving the space out of the normal processes
address space (essentially calling the new mmap() interface indroduced
earlier in this patch set.) and then pointing the bounds-directory
over to it.
The tables *need* to be accessed and controlled by userspace because
the instructions for moving bounds in and out of them are extremely
frequent. They potentially happen every time a register pointing to
memory is dereferenced. Any direct kernel involvement (like a syscall)
to access the tables would obviously destroy performance.
==== Why not do this in userspace? ====
This patch is obviously doing this allocation in the kernel.
However, MPX does not strictly *require* anything in the kernel.
It can theoretically be done completely from userspace. Here are
a few ways this *could* be done. I don't think any of them are
practical in the real-world, but here they are.
Q: Can virtual space simply be reserved for the bounds tables so
that we never have to allocate them?
A: As noted earlier, these tables are *HUGE*. An X-GB virtual
area needs 4*X GB of virtual space, plus 2GB for the bounds
directory. If we were to preallocate them for the 128TB of
user virtual address space, we would need to reserve 512TB+2GB,
which is larger than the entire virtual address space today.
This means they can not be reserved ahead of time. Also, a
single process's pre-popualated bounds directory consumes 2GB
of virtual *AND* physical memory. IOW, it's completely
infeasible to prepopulate bounds directories.
Q: Can we preallocate bounds table space at the same time memory
is allocated which might contain pointers that might eventually
need bounds tables?
A: This would work if we could hook the site of each and every
memory allocation syscall. This can be done for small,
constrained applications. But, it isn't practical at a larger
scale since a given app has no way of controlling how all the
parts of the app might allocate memory (think libraries). The
kernel is really the only place to intercept these calls.
Q: Could a bounds fault be handed to userspace and the tables
allocated there in a signal handler instead of in the kernel?
A: (thanks to tglx) mmap() is not on the list of safe async
handler functions and even if mmap() would work it still
requires locking or nasty tricks to keep track of the
allocation state there.
Having ruled out all of the userspace-only approaches for managing
bounds tables that we could think of, we create them on demand in
the kernel.
Based-on-patch-by: Qiaowei Ren <qiaowei.ren@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: linux-mm@kvack.org
Cc: linux-mips@linux-mips.org
Cc: Dave Hansen <dave@sr71.net>
Link: http://lkml.kernel.org/r/20141114151829.AD4310DE@viggo.jf.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2014-11-14 22:18:29 +07:00
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_X86_INTEL_MPX */
|
|
|
|
|
2012-11-27 13:32:10 +07:00
|
|
|
extern u16 amd_get_nb_id(int cpu);
|
2015-06-15 15:28:15 +07:00
|
|
|
extern u32 amd_get_nodes_per_socket(void);
|
2009-09-16 16:33:40 +07:00
|
|
|
|
2013-07-25 15:54:32 +07:00
|
|
|
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
|
|
|
|
{
|
|
|
|
uint32_t base, eax, signature[3];
|
|
|
|
|
|
|
|
for (base = 0x40000000; base < 0x40010000; base += 0x100) {
|
|
|
|
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
|
|
|
|
|
|
|
|
if (!memcmp(sig, signature, 12) &&
|
|
|
|
(leaves == 0 || ((eax - base) >= leaves)))
|
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-29 00:11:12 +07:00
|
|
|
extern unsigned long arch_align_stack(unsigned long sp);
|
|
|
|
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
|
|
|
|
|
|
|
|
void default_idle(void);
|
2013-02-10 11:08:07 +07:00
|
|
|
#ifdef CONFIG_XEN
|
|
|
|
bool xen_set_default_idle(void);
|
|
|
|
#else
|
|
|
|
#define xen_set_default_idle 0
|
|
|
|
#endif
|
2012-03-29 00:11:12 +07:00
|
|
|
|
|
|
|
void stop_this_cpu(void *dummy);
|
2013-05-09 17:02:29 +07:00
|
|
|
void df_debug(struct pt_regs *regs, long error_code);
|
2008-10-23 12:26:29 +07:00
|
|
|
#endif /* _ASM_X86_PROCESSOR_H */
|