mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 03:35:07 +07:00
Merge remote-tracking branch 's390/guarded-storage' into kvms390/next
This commit is contained in:
commit
7c2b3e0ddc
@ -1142,16 +1142,17 @@ used by the kernel.
|
||||
|
||||
pids.max
|
||||
|
||||
A read-write single value file which exists on non-root cgroups. The
|
||||
default is "max".
|
||||
A read-write single value file which exists on non-root
|
||||
cgroups. The default is "max".
|
||||
|
||||
Hard limit of number of processes.
|
||||
Hard limit of number of processes.
|
||||
|
||||
pids.current
|
||||
|
||||
A read-only single value file which exists on all cgroups.
|
||||
A read-only single value file which exists on all cgroups.
|
||||
|
||||
The number of processes currently in the cgroup and its descendants.
|
||||
The number of processes currently in the cgroup and its
|
||||
descendants.
|
||||
|
||||
Organisational operations are not blocked by cgroup policies, so it is
|
||||
possible to have pids.current > pids.max. This can be done by either
|
||||
|
@ -71,6 +71,9 @@
|
||||
For Axon it can be absent, though my current driver
|
||||
doesn't handle phy-address yet so for now, keep
|
||||
0x00ffffff in it.
|
||||
- phy-handle : Used to describe configurations where a external PHY
|
||||
is used. Please refer to:
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
|
||||
operations (if absent the value is the same as
|
||||
rx-fifo-size). For Axon, either absent or 2048.
|
||||
@ -81,8 +84,22 @@
|
||||
offload, phandle of the TAH device node.
|
||||
- tah-channel : 1 cell, optional. If appropriate, channel used on the
|
||||
TAH engine.
|
||||
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
|
||||
managed entity. See
|
||||
Documentation/devicetree/bindings/net/fixed-link.txt
|
||||
for details.
|
||||
- mdio subnode : When the EMAC has a phy connected to its local
|
||||
mdio, which us supported by the kernel's network
|
||||
PHY library in drivers/net/phy, there must be device
|
||||
tree subnode with the following required properties:
|
||||
- #address-cells: Must be <1>.
|
||||
- #size-cells: Must be <0>.
|
||||
|
||||
Example:
|
||||
For PHY definitions: Please refer to
|
||||
Documentation/devicetree/bindings/net/phy.txt and
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
|
||||
Examples:
|
||||
|
||||
EMAC0: ethernet@40000800 {
|
||||
device_type = "network";
|
||||
@ -104,6 +121,48 @@
|
||||
zmii-channel = <0>;
|
||||
};
|
||||
|
||||
EMAC1: ethernet@ef600c00 {
|
||||
device_type = "network";
|
||||
compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
|
||||
interrupt-parent = <&EMAC1>;
|
||||
interrupts = <0 1>;
|
||||
#interrupt-cells = <1>;
|
||||
#address-cells = <0>;
|
||||
#size-cells = <0>;
|
||||
interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
|
||||
1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
|
||||
reg = <0xef600c00 0x000000c4>;
|
||||
local-mac-address = [000000000000]; /* Filled in by U-Boot */
|
||||
mal-device = <&MAL0>;
|
||||
mal-tx-channel = <0>;
|
||||
mal-rx-channel = <0>;
|
||||
cell-index = <0>;
|
||||
max-frame-size = <9000>;
|
||||
rx-fifo-size = <16384>;
|
||||
tx-fifo-size = <2048>;
|
||||
fifo-entry-size = <10>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&phy0>;
|
||||
phy-map = <0x00000000>;
|
||||
rgmii-device = <&RGMII0>;
|
||||
rgmii-channel = <0>;
|
||||
tah-device = <&TAH0>;
|
||||
tah-channel = <0>;
|
||||
has-inverted-stacr-oc;
|
||||
has-new-stacr-staopc;
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
phy0: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
ii) McMAL node
|
||||
|
||||
Required properties:
|
||||
@ -145,4 +204,3 @@
|
||||
- revision : as provided by the RGMII new version register if
|
||||
available.
|
||||
For Axon: 0x0000012a
|
||||
|
||||
|
@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
|
||||
FALSE (router)
|
||||
|
||||
forwarding - BOOLEAN
|
||||
Enable IP forwarding on this interface.
|
||||
Enable IP forwarding on this interface. This controls whether packets
|
||||
received _on_ this interface can be forwarded.
|
||||
|
||||
mc_forwarding - BOOLEAN
|
||||
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -411,3 +411,4 @@
|
||||
394 common pkey_mprotect sys_pkey_mprotect
|
||||
395 common pkey_alloc sys_pkey_alloc
|
||||
396 common pkey_free sys_pkey_free
|
||||
397 common statx sys_statx
|
||||
|
@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && KEYS
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Power management options"
|
||||
|
@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
|
||||
static inline bool system_uses_ttbr0_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_cap(ARM64_HAS_PAN);
|
||||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* cpu_suspend() - function to enter a low-power idle state
|
||||
* arm_cpuidle_suspend() - function to enter a low-power idle state
|
||||
* @arg: argument to pass to CPU suspend operations
|
||||
*
|
||||
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
|
||||
|
@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p, *cur_kprobe;
|
||||
|
@ -162,7 +162,7 @@ void __init kasan_init(void)
|
||||
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
||||
|
||||
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
|
||||
pfn_to_nid(virt_to_pfn(_text)));
|
||||
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||
|
||||
/*
|
||||
* vmemmap_populate() has populated the shadow region that covers the
|
||||
|
@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
||||
return val;
|
||||
}
|
||||
|
||||
#define xchg(ptr, with) \
|
||||
((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
|
||||
#define xchg(ptr, with) \
|
||||
({ \
|
||||
(__typeof__(*(ptr))) __xchg((unsigned long)(with), \
|
||||
(ptr), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#endif /* __ASM_OPENRISC_CMPXCHG_H */
|
||||
|
@ -211,7 +211,7 @@ do { \
|
||||
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
|
||||
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
|
||||
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
|
||||
case 8: __get_user_asm2(x, ptr, retval); \
|
||||
case 8: __get_user_asm2(x, ptr, retval); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
|
||||
|
||||
@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
|
||||
DECLARE_EXPORT(__ashrdi3);
|
||||
DECLARE_EXPORT(__ashldi3);
|
||||
DECLARE_EXPORT(__lshrdi3);
|
||||
DECLARE_EXPORT(__ucmpdi2);
|
||||
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(__copy_tofrom_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(memset);
|
||||
|
@ -90,6 +90,7 @@ void arch_cpu_idle(void)
|
||||
}
|
||||
|
||||
void (*pm_power_off) (void) = machine_power_off;
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
/*
|
||||
* When a process does an "exec", machine state like FPU and debug
|
||||
|
@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
|
||||
|
||||
#define flush_kernel_dcache_range(start,size) \
|
||||
flush_kernel_dcache_range_asm((start), (start)+(size));
|
||||
/* vmap range flushes and invalidates. Architecturally, we don't need
|
||||
* the invalidate, because the CPU should refuse to speculate once an
|
||||
* area has been flushed, so invalidate is left empty */
|
||||
static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
void *cursor = vaddr;
|
||||
|
||||
for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
|
||||
struct page *page = vmalloc_to_page(cursor);
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
||||
flush_kernel_dcache_page(page);
|
||||
}
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
@ -32,7 +32,8 @@
|
||||
* that put_user is the same as __put_user, etc.
|
||||
*/
|
||||
|
||||
#define access_ok(type, uaddr, size) (1)
|
||||
#define access_ok(type, uaddr, size) \
|
||||
( (uaddr) == (uaddr) )
|
||||
|
||||
#define put_user __put_user
|
||||
#define get_user __get_user
|
||||
|
@ -362,8 +362,9 @@
|
||||
#define __NR_copy_file_range (__NR_Linux + 346)
|
||||
#define __NR_preadv2 (__NR_Linux + 347)
|
||||
#define __NR_pwritev2 (__NR_Linux + 348)
|
||||
#define __NR_statx (__NR_Linux + 349)
|
||||
|
||||
#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
|
||||
#define __NR_Linux_syscalls (__NR_statx + 1)
|
||||
|
||||
|
||||
#define __IGNORE_select /* newselect */
|
||||
|
@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
||||
*/
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_SECREL32:
|
||||
/* 32-bit section relative address. */
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_DPREL21L:
|
||||
/* left 21 bit of relative address */
|
||||
val = lrsel(val - dp, addend);
|
||||
@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
||||
*/
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_SECREL32:
|
||||
/* 32-bit section relative address. */
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_FPTR64:
|
||||
/* 64-bit function address */
|
||||
if(in_local(me, (void *)(val + addend))) {
|
||||
|
@ -39,7 +39,7 @@
|
||||
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
|
||||
* in various PDC revisions. The code is much more maintainable
|
||||
* and reliable this way vs having to debug on every version of PDC
|
||||
* on every box.
|
||||
* on every box.
|
||||
*/
|
||||
|
||||
#include <linux/capability.h>
|
||||
@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
|
||||
static int perf_release(struct inode *inode, struct file *file);
|
||||
static int perf_open(struct inode *inode, struct file *file);
|
||||
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
static void perf_start_counters(void);
|
||||
static int perf_stop_counters(uint32_t *raddr);
|
||||
@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
|
||||
/*
|
||||
* configure:
|
||||
*
|
||||
* Configure the cpu with a given data image. First turn off the counters,
|
||||
* Configure the cpu with a given data image. First turn off the counters,
|
||||
* then download the image, then turn the counters back on.
|
||||
*/
|
||||
static int perf_config(uint32_t *image_ptr)
|
||||
@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
|
||||
error = perf_stop_counters(raddr);
|
||||
if (error != 0) {
|
||||
printk("perf_config: perf_stop_counters = %ld\n", error);
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk("Preparing to write image\n");
|
||||
@ -242,7 +242,7 @@ printk("Preparing to write image\n");
|
||||
error = perf_write_image((uint64_t *)image_ptr);
|
||||
if (error != 0) {
|
||||
printk("perf_config: DOWNLOAD = %ld\n", error);
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk("Preparing to start counters\n");
|
||||
@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Open the device and initialize all of its memory. The device is only
|
||||
* Open the device and initialize all of its memory. The device is only
|
||||
* opened once, but can be "queried" by multiple processes that know its
|
||||
* file descriptor.
|
||||
*/
|
||||
@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
|
||||
* called on the processor that the download should happen
|
||||
* on.
|
||||
*/
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
size_t image_size;
|
||||
uint32_t image_type;
|
||||
uint32_t interface_type;
|
||||
uint32_t test;
|
||||
|
||||
if (perf_processor_interface == ONYX_INTF)
|
||||
if (perf_processor_interface == ONYX_INTF)
|
||||
image_size = PCXU_IMAGE_SIZE;
|
||||
else if (perf_processor_interface == CUDA_INTF)
|
||||
else if (perf_processor_interface == CUDA_INTF)
|
||||
image_size = PCXW_IMAGE_SIZE;
|
||||
else
|
||||
else
|
||||
return -EFAULT;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
|
||||
|
||||
/* First check the machine type is correct for
|
||||
the requested image */
|
||||
if (((perf_processor_interface == CUDA_INTF) &&
|
||||
(interface_type != CUDA_INTF)) ||
|
||||
((perf_processor_interface == ONYX_INTF) &&
|
||||
(interface_type != ONYX_INTF)))
|
||||
if (((perf_processor_interface == CUDA_INTF) &&
|
||||
(interface_type != CUDA_INTF)) ||
|
||||
((perf_processor_interface == ONYX_INTF) &&
|
||||
(interface_type != ONYX_INTF)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Next check to make sure the requested image
|
||||
is valid */
|
||||
if (((interface_type == CUDA_INTF) &&
|
||||
if (((interface_type == CUDA_INTF) &&
|
||||
(test >= MAX_CUDA_IMAGES)) ||
|
||||
((interface_type == ONYX_INTF) &&
|
||||
(test >= MAX_ONYX_IMAGES)))
|
||||
((interface_type == ONYX_INTF) &&
|
||||
(test >= MAX_ONYX_IMAGES)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Copy the image into the processor */
|
||||
if (interface_type == CUDA_INTF)
|
||||
if (interface_type == CUDA_INTF)
|
||||
return perf_config(cuda_images[test]);
|
||||
else
|
||||
return perf_config(onyx_images[test]);
|
||||
@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
|
||||
static void perf_patch_images(void)
|
||||
{
|
||||
#if 0 /* FIXME!! */
|
||||
/*
|
||||
/*
|
||||
* NOTE: this routine is VERY specific to the current TLB image.
|
||||
* If the image is changed, this routine might also need to be changed.
|
||||
*/
|
||||
@ -367,9 +367,9 @@ static void perf_patch_images(void)
|
||||
extern void $i_dtlb_miss_2_0();
|
||||
extern void PA2_0_iva();
|
||||
|
||||
/*
|
||||
/*
|
||||
* We can only use the lower 32-bits, the upper 32-bits should be 0
|
||||
* anyway given this is in the kernel
|
||||
* anyway given this is in the kernel
|
||||
*/
|
||||
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
|
||||
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
|
||||
@ -377,21 +377,21 @@ static void perf_patch_images(void)
|
||||
|
||||
if (perf_processor_interface == ONYX_INTF) {
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[TLBMISS][15] &= 0xffffff00;
|
||||
onyx_images[TLBMISS][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
onyx_images[TLBMISS][17] = itlb_addr;
|
||||
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
|
||||
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
onyx_images[TLBHANDMISS][17] = itlb_addr;
|
||||
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[BIG_CPI][15] &= 0xffffff00;
|
||||
onyx_images[BIG_CPI][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
@ -404,24 +404,24 @@ static void perf_patch_images(void)
|
||||
|
||||
} else if (perf_processor_interface == CUDA_INTF) {
|
||||
/* Cuda interface */
|
||||
cuda_images[TLBMISS][16] =
|
||||
cuda_images[TLBMISS][16] =
|
||||
(cuda_images[TLBMISS][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[TLBMISS][17] =
|
||||
cuda_images[TLBMISS][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
|
||||
|
||||
cuda_images[TLBHANDMISS][16] =
|
||||
cuda_images[TLBHANDMISS][16] =
|
||||
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[TLBHANDMISS][17] =
|
||||
cuda_images[TLBHANDMISS][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
|
||||
|
||||
cuda_images[BIG_CPI][16] =
|
||||
cuda_images[BIG_CPI][16] =
|
||||
(cuda_images[BIG_CPI][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[BIG_CPI][17] =
|
||||
cuda_images[BIG_CPI][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
|
||||
} else {
|
||||
@ -433,7 +433,7 @@ static void perf_patch_images(void)
|
||||
|
||||
/*
|
||||
* ioctl routine
|
||||
* All routines effect the processor that they are executed on. Thus you
|
||||
* All routines effect the processor that they are executed on. Thus you
|
||||
* must be running on the processor that you wish to change.
|
||||
*/
|
||||
|
||||
@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
|
||||
/* copy out the Counters */
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
sizeof (raddr)) != 0) {
|
||||
error = -EFAULT;
|
||||
break;
|
||||
@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
|
||||
.open = perf_open,
|
||||
.release = perf_release
|
||||
};
|
||||
|
||||
|
||||
static struct miscdevice perf_dev = {
|
||||
MISC_DYNAMIC_MINOR,
|
||||
PA_PERF_DEV,
|
||||
@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
||||
/* OR sticky2 (bit 1496) to counter2 bit 32 */
|
||||
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
|
||||
raddr[2] = (uint32_t)tmp64;
|
||||
|
||||
|
||||
/* Counter3 is bits 1497 to 1528 */
|
||||
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
|
||||
/* OR sticky3 (bit 1529) to counter3 bit 32 */
|
||||
@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
||||
userbuf[22] = 0;
|
||||
userbuf[23] = 0;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Write back the zeroed bytes + the image given
|
||||
* the read was destructive.
|
||||
*/
|
||||
@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
|
||||
} else {
|
||||
|
||||
/*
|
||||
* Read RDR-15 which contains the counters and sticky bits
|
||||
* Read RDR-15 which contains the counters and sticky bits
|
||||
*/
|
||||
if (!perf_rdr_read_ubuf(15, userbuf)) {
|
||||
return -13;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Clear out the counters
|
||||
*/
|
||||
perf_rdr_clear(15);
|
||||
@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
||||
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
|
||||
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
|
||||
i = tentry->num_words;
|
||||
while (i--) {
|
||||
buffer[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for bits an even number of 64 */
|
||||
if ((xbits = width & 0x03f) != 0) {
|
||||
@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
|
||||
}
|
||||
|
||||
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
|
||||
if (!runway) {
|
||||
pr_err("perf_write_image: ioremap failed!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Merge intrigue bits into Runway STATUS 0 */
|
||||
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
|
||||
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
|
||||
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
|
||||
runway + RUNWAY_STATUS);
|
||||
|
||||
|
||||
/* Write RUNWAY DEBUG registers */
|
||||
for (i = 0; i < 8; i++) {
|
||||
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
|
||||
perf_rdr_shift_out_U(rdr_num, buffer[i]);
|
||||
} else {
|
||||
perf_rdr_shift_out_W(rdr_num, buffer[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
printk("perf_rdr_write done\n");
|
||||
}
|
||||
|
@ -142,6 +142,8 @@ void machine_power_off(void)
|
||||
|
||||
printk(KERN_EMERG "System shut down completed.\n"
|
||||
"Please power this system off now.");
|
||||
|
||||
for (;;);
|
||||
}
|
||||
|
||||
void (*pm_power_off)(void) = machine_power_off;
|
||||
|
@ -444,6 +444,7 @@
|
||||
ENTRY_SAME(copy_file_range)
|
||||
ENTRY_COMP(preadv2)
|
||||
ENTRY_COMP(pwritev2)
|
||||
ENTRY_SAME(statx)
|
||||
|
||||
|
||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
||||
|
@ -68,6 +68,7 @@ SECTIONS
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64_BOOT_WRAPPER
|
||||
. = ALIGN(256);
|
||||
.got :
|
||||
{
|
||||
__toc_start = .;
|
||||
|
@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
u32 *key = crypto_tfm_ctx(tfm);
|
||||
|
||||
*key = 0;
|
||||
*key = ~0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -51,6 +51,10 @@
|
||||
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
|
||||
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
|
||||
|
||||
/* Put a PPC bit into a "normal" bit position */
|
||||
#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
|
||||
((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* Macro for generating the ***_bits() functions */
|
||||
|
@ -66,6 +66,55 @@
|
||||
|
||||
#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
|
||||
P8_DSISR_MC_ERAT_MULTIHIT_SEC)
|
||||
|
||||
/*
|
||||
* Machine Check bits on power9
|
||||
*/
|
||||
#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
|
||||
|
||||
#define P9_SRR1_MC_IFETCH(srr1) ( \
|
||||
PPC_BITEXTRACT(srr1, 45, 0) | \
|
||||
PPC_BITEXTRACT(srr1, 44, 1) | \
|
||||
PPC_BITEXTRACT(srr1, 43, 2) | \
|
||||
PPC_BITEXTRACT(srr1, 36, 3) )
|
||||
|
||||
/* 0 is reserved */
|
||||
#define P9_SRR1_MC_IFETCH_UE 1
|
||||
#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
|
||||
#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
|
||||
#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
|
||||
#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
|
||||
#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
|
||||
/* 7 is reserved */
|
||||
#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
|
||||
#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
|
||||
/* 10 ? */
|
||||
#define P9_SRR1_MC_IFETCH_RA 11
|
||||
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
|
||||
#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
|
||||
#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
|
||||
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
|
||||
|
||||
/* DSISR bits for machine check (On Power9) */
|
||||
#define P9_DSISR_MC_UE (PPC_BIT(48))
|
||||
#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
|
||||
#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
|
||||
#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
|
||||
#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
|
||||
#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
|
||||
#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
|
||||
#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
|
||||
#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
|
||||
#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
|
||||
#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
|
||||
#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
|
||||
#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
|
||||
|
||||
/* SLB error bits */
|
||||
#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
|
||||
P9_DSISR_MC_SLB_PARITY_MFSLB | \
|
||||
P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
|
||||
|
||||
enum MCE_Version {
|
||||
MCE_V1 = 1,
|
||||
};
|
||||
@ -93,6 +142,9 @@ enum MCE_ErrorType {
|
||||
MCE_ERROR_TYPE_SLB = 2,
|
||||
MCE_ERROR_TYPE_ERAT = 3,
|
||||
MCE_ERROR_TYPE_TLB = 4,
|
||||
MCE_ERROR_TYPE_USER = 5,
|
||||
MCE_ERROR_TYPE_RA = 6,
|
||||
MCE_ERROR_TYPE_LINK = 7,
|
||||
};
|
||||
|
||||
enum MCE_UeErrorType {
|
||||
@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
|
||||
MCE_TLB_ERROR_MULTIHIT = 2,
|
||||
};
|
||||
|
||||
enum MCE_UserErrorType {
|
||||
MCE_USER_ERROR_INDETERMINATE = 0,
|
||||
MCE_USER_ERROR_TLBIE = 1,
|
||||
};
|
||||
|
||||
enum MCE_RaErrorType {
|
||||
MCE_RA_ERROR_INDETERMINATE = 0,
|
||||
MCE_RA_ERROR_IFETCH = 1,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
|
||||
MCE_RA_ERROR_LOAD = 4,
|
||||
MCE_RA_ERROR_STORE = 5,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
|
||||
MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
|
||||
};
|
||||
|
||||
enum MCE_LinkErrorType {
|
||||
MCE_LINK_ERROR_INDETERMINATE = 0,
|
||||
MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
|
||||
MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
|
||||
MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
|
||||
MCE_LINK_ERROR_STORE_TIMEOUT = 4,
|
||||
MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
|
||||
};
|
||||
|
||||
struct machine_check_event {
|
||||
enum MCE_Version version:8; /* 0x00 */
|
||||
uint8_t in_use; /* 0x01 */
|
||||
@ -166,6 +244,30 @@ struct machine_check_event {
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} tlb_error;
|
||||
|
||||
struct {
|
||||
enum MCE_UserErrorType user_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} user_error;
|
||||
|
||||
struct {
|
||||
enum MCE_RaErrorType ra_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} ra_error;
|
||||
|
||||
struct {
|
||||
enum MCE_LinkErrorType link_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} link_error;
|
||||
} u;
|
||||
};
|
||||
|
||||
@ -176,8 +278,12 @@ struct mce_error_info {
|
||||
enum MCE_SlbErrorType slb_error_type:8;
|
||||
enum MCE_EratErrorType erat_error_type:8;
|
||||
enum MCE_TlbErrorType tlb_error_type:8;
|
||||
enum MCE_UserErrorType user_error_type:8;
|
||||
enum MCE_RaErrorType ra_error_type:8;
|
||||
enum MCE_LinkErrorType link_error_type:8;
|
||||
} u;
|
||||
uint8_t reserved[2];
|
||||
enum MCE_Severity severity:8;
|
||||
enum MCE_Initiator initiator:8;
|
||||
};
|
||||
|
||||
#define MAX_MC_EVT 100
|
||||
|
@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
|
||||
COMPAT_SYS_SPU(preadv2)
|
||||
COMPAT_SYS_SPU(pwritev2)
|
||||
SYSCALL(kexec_file_load)
|
||||
SYSCALL(statx)
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define NR_syscalls 383
|
||||
#define NR_syscalls 384
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
|
||||
|
@ -393,5 +393,6 @@
|
||||
#define __NR_preadv2 380
|
||||
#define __NR_pwritev2 381
|
||||
#define __NR_kexec_file_load 382
|
||||
#define __NR_statx 383
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
|
||||
extern void __flush_tlb_power9(unsigned int action);
|
||||
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#if defined(CONFIG_E500)
|
||||
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
|
||||
@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.flush_tlb = __flush_tlb_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Power9 */
|
||||
@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.flush_tlb = __flush_tlb_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Cell Broadband Engine */
|
||||
|
@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
|
||||
case MCE_ERROR_TYPE_TLB:
|
||||
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
mce->u.user_error.user_error_type = mce_err->u.user_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
mce->u.link_error.link_error_type = mce_err->u.link_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
||||
mce->gpr3 = regs->gpr[3];
|
||||
mce->in_use = 1;
|
||||
|
||||
mce->initiator = MCE_INITIATOR_CPU;
|
||||
/* Mark it recovered if we have handled it and MSR(RI=1). */
|
||||
if (handled && (regs->msr & MSR_RI))
|
||||
mce->disposition = MCE_DISPOSITION_RECOVERED;
|
||||
else
|
||||
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
|
||||
mce->severity = MCE_SEV_ERROR_SYNC;
|
||||
|
||||
mce->initiator = mce_err->initiator;
|
||||
mce->severity = mce_err->severity;
|
||||
|
||||
/*
|
||||
* Populate the mce error_type and type-specific error_type.
|
||||
@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
|
||||
mce->u.erat_error.effective_address_provided = true;
|
||||
mce->u.erat_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
|
||||
mce->u.user_error.effective_address_provided = true;
|
||||
mce->u.user_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
|
||||
mce->u.ra_error.effective_address_provided = true;
|
||||
mce->u.ra_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
|
||||
mce->u.link_error.effective_address_provided = true;
|
||||
mce->u.link_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
|
||||
mce->u.ue_error.effective_address_provided = true;
|
||||
mce->u.ue_error.effective_address = addr;
|
||||
@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
|
||||
"Parity",
|
||||
"Multihit",
|
||||
};
|
||||
static const char *mc_user_types[] = {
|
||||
"Indeterminate",
|
||||
"tlbie(l) invalid",
|
||||
};
|
||||
static const char *mc_ra_types[] = {
|
||||
"Indeterminate",
|
||||
"Instruction fetch (bad)",
|
||||
"Page table walk ifetch (bad)",
|
||||
"Page table walk ifetch (foreign)",
|
||||
"Load (bad)",
|
||||
"Store (bad)",
|
||||
"Page table walk Load/Store (bad)",
|
||||
"Page table walk Load/Store (foreign)",
|
||||
"Load/Store (foreign)",
|
||||
};
|
||||
static const char *mc_link_types[] = {
|
||||
"Indeterminate",
|
||||
"Instruction fetch (timeout)",
|
||||
"Page table walk ifetch (timeout)",
|
||||
"Load (timeout)",
|
||||
"Store (timeout)",
|
||||
"Page table walk Load/Store (timeout)",
|
||||
};
|
||||
|
||||
/* Print things out */
|
||||
if (evt->version != MCE_V1) {
|
||||
@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.tlb_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
subtype = evt->u.user_error.user_error_type <
|
||||
ARRAY_SIZE(mc_user_types) ?
|
||||
mc_user_types[evt->u.user_error.user_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: User [%s]\n", level, subtype);
|
||||
if (evt->u.user_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.user_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
subtype = evt->u.ra_error.ra_error_type <
|
||||
ARRAY_SIZE(mc_ra_types) ?
|
||||
mc_ra_types[evt->u.ra_error.ra_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: Real address [%s]\n", level, subtype);
|
||||
if (evt->u.ra_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.ra_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
subtype = evt->u.link_error.link_error_type <
|
||||
ARRAY_SIZE(mc_link_types) ?
|
||||
mc_link_types[evt->u.link_error.link_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: Link [%s]\n", level, subtype);
|
||||
if (evt->u.link_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.link_error.effective_address);
|
||||
break;
|
||||
default:
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
printk("%s Error type: Unknown\n", level);
|
||||
@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
|
||||
if (evt->u.tlb_error.effective_address_provided)
|
||||
return evt->u.tlb_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
if (evt->u.user_error.effective_address_provided)
|
||||
return evt->u.user_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
if (evt->u.ra_error.effective_address_provided)
|
||||
return evt->u.ra_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
if (evt->u.link_error.effective_address_provided)
|
||||
return evt->u.link_error.effective_address;
|
||||
break;
|
||||
default:
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
break;
|
||||
|
@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void flush_erat(void)
|
||||
{
|
||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
|
||||
#define MCE_FLUSH_SLB 1
|
||||
#define MCE_FLUSH_TLB 2
|
||||
#define MCE_FLUSH_ERAT 3
|
||||
|
||||
static int mce_flush(int what)
|
||||
{
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (what == MCE_FLUSH_SLB) {
|
||||
flush_and_reload_slb();
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
if (what == MCE_FLUSH_ERAT) {
|
||||
flush_erat();
|
||||
return 1;
|
||||
}
|
||||
if (what == MCE_FLUSH_TLB) {
|
||||
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
|
||||
cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
|
||||
{
|
||||
if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
|
||||
dsisr &= ~slb;
|
||||
if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
|
||||
dsisr &= ~erat;
|
||||
if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
|
||||
dsisr &= ~tlb;
|
||||
/* Any other errors we don't understand? */
|
||||
if (dsisr)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
|
||||
{
|
||||
long handled = 1;
|
||||
@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
|
||||
long handled = 1;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_error_info.initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
srr1 = regs->msr;
|
||||
nip = regs->nip;
|
||||
|
||||
@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
||||
long handled = 1;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_error_info.initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
srr1 = regs->msr;
|
||||
nip = regs->nip;
|
||||
|
||||
@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
||||
save_mce_event(regs, handled, &mce_error_info, nip, addr);
|
||||
return handled;
|
||||
}
|
||||
|
||||
static int mce_handle_derror_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t dsisr = regs->dsisr;
|
||||
|
||||
return mce_handle_flush_derrors(dsisr,
|
||||
P9_DSISR_MC_SLB_PARITY_MFSLB |
|
||||
P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
|
||||
|
||||
P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
|
||||
|
||||
P9_DSISR_MC_ERAT_MULTIHIT);
|
||||
}
|
||||
|
||||
static int mce_handle_ierror_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_SLB_PARITY:
|
||||
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_SLB);
|
||||
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_TLB);
|
||||
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_ERAT);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_get_derror_p9(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err, uint64_t *addr)
|
||||
{
|
||||
uint64_t dsisr = regs->dsisr;
|
||||
|
||||
mce_err->severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_err->initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
if (dsisr & P9_DSISR_MC_USER_TLBIE)
|
||||
*addr = regs->nip;
|
||||
else
|
||||
*addr = regs->dar;
|
||||
|
||||
if (dsisr & P9_DSISR_MC_UE) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
|
||||
} else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
|
||||
} else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
|
||||
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_TLB;
|
||||
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_USER;
|
||||
mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
|
||||
} else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
|
||||
} else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_LOAD) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_get_ierror_p9(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err, uint64_t *addr)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
|
||||
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
|
||||
mce_err->severity = MCE_SEV_FATAL;
|
||||
break;
|
||||
default:
|
||||
mce_err->severity = MCE_SEV_ERROR_SYNC;
|
||||
break;
|
||||
}
|
||||
|
||||
mce_err->initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
*addr = regs->nip;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_UE:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_SLB_PARITY:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
|
||||
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_TLB;
|
||||
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
long __machine_check_early_realmode_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t nip, addr;
|
||||
long handled;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
nip = regs->nip;
|
||||
|
||||
if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
|
||||
handled = mce_handle_derror_p9(regs);
|
||||
mce_get_derror_p9(regs, &mce_error_info, &addr);
|
||||
} else {
|
||||
handled = mce_handle_ierror_p9(regs);
|
||||
mce_get_ierror_p9(regs, &mce_error_info, &addr);
|
||||
}
|
||||
|
||||
/* Handle UE error. */
|
||||
if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
|
||||
handled = mce_handle_ue_error(regs);
|
||||
|
||||
save_mce_event(regs, handled, &mce_error_info, nip, addr);
|
||||
return handled;
|
||||
}
|
||||
|
@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
|
||||
sdsync = POWER7P_MMCRA_SDAR_VALID;
|
||||
else if (ppmu->flags & PPMU_ALT_SIPR)
|
||||
sdsync = POWER6_MMCRA_SDSYNC;
|
||||
else if (ppmu->flags & PPMU_NO_SIAR)
|
||||
sdsync = MMCRA_SAMPLE_ENABLE;
|
||||
else
|
||||
sdsync = MMCRA_SDSYNC;
|
||||
|
||||
|
@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
|
||||
return !(event & ~valid_mask);
|
||||
}
|
||||
|
||||
static u64 mmcra_sdar_mode(u64 event)
|
||||
static inline bool is_event_marked(u64 event)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
|
||||
if (event & EVENT_IS_MARKED)
|
||||
return true;
|
||||
|
||||
return MMCRA_SDAR_MODE_TLB;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
|
||||
{
|
||||
/*
|
||||
* MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
|
||||
* continous sampling mode.
|
||||
*
|
||||
* Incase of Power8:
|
||||
* MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
|
||||
* mode and will be un-changed when setting MMCRA[63] (Marked events).
|
||||
*
|
||||
* Incase of Power9:
|
||||
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
|
||||
* or if group already have any marked events.
|
||||
* Non-Marked events (for DD1):
|
||||
* MMCRA[SDAR_MODE] will be set to 0b01
|
||||
* For rest
|
||||
* MMCRA[SDAR_MODE] will be set from event code.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
|
||||
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
|
||||
else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
|
||||
else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
*mmcra |= MMCRA_SDAR_MODE_TLB;
|
||||
} else
|
||||
*mmcra |= MMCRA_SDAR_MODE_TLB;
|
||||
}
|
||||
|
||||
static u64 thresh_cmp_val(u64 value)
|
||||
@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
||||
value |= CNST_L1_QUAL_VAL(cache);
|
||||
}
|
||||
|
||||
if (event & EVENT_IS_MARKED) {
|
||||
if (is_event_marked(event)) {
|
||||
mask |= CNST_SAMPLE_MASK;
|
||||
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
|
||||
}
|
||||
@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
||||
}
|
||||
|
||||
/* In continuous sampling mode, update SDAR on TLB miss */
|
||||
mmcra |= mmcra_sdar_mode(event[i]);
|
||||
mmcra_sdar_mode(event[i], &mmcra);
|
||||
|
||||
if (event[i] & EVENT_IS_L1) {
|
||||
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
|
||||
@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
||||
mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
|
||||
}
|
||||
|
||||
if (event[i] & EVENT_IS_MARKED) {
|
||||
if (is_event_marked(event[i])) {
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
|
||||
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
|
||||
|
@ -246,6 +246,7 @@
|
||||
#define MMCRA_THR_CMP_SHIFT 32
|
||||
#define MMCRA_SDAR_MODE_SHIFT 42
|
||||
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
|
||||
#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
|
||||
#define MMCRA_IFM_SHIFT 30
|
||||
|
||||
/* MMCR1 Threshold Compare bit constant for power9 */
|
||||
|
@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
|
||||
struct machine_check_event *evt)
|
||||
{
|
||||
int recovered = 0;
|
||||
uint64_t ea = get_mce_fault_addr(evt);
|
||||
|
||||
if (!(regs->msr & MSR_RI)) {
|
||||
/* If MSR_RI isn't set, we cannot recover */
|
||||
@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
|
||||
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
|
||||
/* Platform corrected itself */
|
||||
recovered = 1;
|
||||
} else if (ea && !is_kernel_addr(ea)) {
|
||||
} else if (evt->severity == MCE_SEV_FATAL) {
|
||||
/* Fatal machine check */
|
||||
pr_err("Machine check interrupt is fatal\n");
|
||||
recovered = 0;
|
||||
} else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
|
||||
(user_mode(regs) && !is_global_init(current))) {
|
||||
/*
|
||||
* Faulting address is not in kernel text. We should be fine.
|
||||
* We need to find which process uses this address.
|
||||
* For now, kill the task if we have received exception when
|
||||
* in userspace.
|
||||
*
|
||||
* TODO: Queue up this address for hwpoisioning later.
|
||||
*/
|
||||
if (user_mode(regs) && !is_global_init(current)) {
|
||||
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
|
||||
recovered = 1;
|
||||
} else
|
||||
recovered = 0;
|
||||
} else if (user_mode(regs) && !is_global_init(current) &&
|
||||
evt->severity == MCE_SEV_ERROR_SYNC) {
|
||||
/*
|
||||
* If we have received a synchronous error when in userspace
|
||||
* kill the task.
|
||||
*/
|
||||
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
|
||||
recovered = 1;
|
||||
}
|
||||
|
@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
|
||||
struct pci_bus *bus)
|
||||
struct pci_bus *bus,
|
||||
bool add_to_group)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
|
||||
set_dma_offset(&dev->dev, pe->tce_bypass_base);
|
||||
iommu_add_device(&dev->dev);
|
||||
if (add_to_group)
|
||||
iommu_add_device(&dev->dev);
|
||||
|
||||
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate,
|
||||
add_to_group);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2191,7 +2194,7 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
|
||||
set_iommu_table_base(&pe->pdev->dev, tbl);
|
||||
iommu_add_device(&pe->pdev->dev);
|
||||
} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
|
||||
|
||||
return;
|
||||
fail:
|
||||
@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
|
||||
|
||||
pnv_pci_ioda2_set_bypass(pe, false);
|
||||
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
||||
if (pe->pbus)
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||
pnv_ioda2_table_free(tbl);
|
||||
}
|
||||
|
||||
@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
|
||||
table_group);
|
||||
|
||||
pnv_pci_ioda2_setup_default_config(pe);
|
||||
if (pe->pbus)
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||
}
|
||||
|
||||
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
|
||||
@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
||||
level_shift = entries_shift + 3;
|
||||
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
|
||||
|
||||
if ((level_shift - 3) * levels + page_shift >= 60)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate TCE table */
|
||||
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
||||
levels, tce_table_size, &offset, &total_allocated);
|
||||
@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
||||
if (pe->flags & PNV_IODA_PE_DEV)
|
||||
iommu_add_device(&pe->pdev->dev);
|
||||
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
|
||||
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
|
||||
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
|
||||
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
|
||||
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
|
||||
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
|
||||
}
|
||||
|
||||
void radix_init_pseries(void)
|
||||
|
@ -105,6 +105,7 @@
|
||||
#define HWCAP_S390_VXRS 2048
|
||||
#define HWCAP_S390_VXRS_BCD 4096
|
||||
#define HWCAP_S390_VXRS_EXT 8192
|
||||
#define HWCAP_S390_GS 16384
|
||||
|
||||
/* Internal bits, not exposed via elf */
|
||||
#define HWCAP_INT_SIE 1UL
|
||||
|
@ -157,8 +157,8 @@ struct lowcore {
|
||||
__u64 stfle_fac_list[32]; /* 0x0f00 */
|
||||
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
|
||||
|
||||
/* Pointer to vector register save area */
|
||||
__u64 vector_save_area_addr; /* 0x11b0 */
|
||||
/* Pointer to the machine check extended save area */
|
||||
__u64 mcesad; /* 0x11b0 */
|
||||
|
||||
/* 64 bit extparam used for pfault/diag 250: defined by architecture */
|
||||
__u64 ext_params2; /* 0x11B8 */
|
||||
@ -182,10 +182,7 @@ struct lowcore {
|
||||
|
||||
/* Transaction abort diagnostic block */
|
||||
__u8 pgm_tdb[256]; /* 0x1800 */
|
||||
__u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
|
||||
|
||||
/* Software defined save area for vector registers */
|
||||
__u8 vector_save_area[1024]; /* 0x1c00 */
|
||||
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
|
||||
} __packed;
|
||||
|
||||
#define S390_lowcore (*((struct lowcore *) 0))
|
||||
|
@ -58,7 +58,9 @@ union mci {
|
||||
u64 ie : 1; /* 32 indirect storage error */
|
||||
u64 ar : 1; /* 33 access register validity */
|
||||
u64 da : 1; /* 34 delayed access exception */
|
||||
u64 : 7; /* 35-41 */
|
||||
u64 : 1; /* 35 */
|
||||
u64 gs : 1; /* 36 guarded storage registers */
|
||||
u64 : 5; /* 37-41 */
|
||||
u64 pr : 1; /* 42 tod programmable register validity */
|
||||
u64 fc : 1; /* 43 fp control register validity */
|
||||
u64 ap : 1; /* 44 ancillary report */
|
||||
@ -69,6 +71,14 @@ union mci {
|
||||
};
|
||||
};
|
||||
|
||||
#define MCESA_ORIGIN_MASK (~0x3ffUL)
|
||||
#define MCESA_LC_MASK (0xfUL)
|
||||
|
||||
struct mcesa {
|
||||
u8 vector_save_area[1024];
|
||||
u8 guarded_storage_save_area[32];
|
||||
};
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
extern void s390_handle_mcck(void);
|
||||
|
@ -135,6 +135,8 @@ struct thread_struct {
|
||||
struct list_head list;
|
||||
/* cpu runtime instrumentation */
|
||||
struct runtime_instr_cb *ri_cb;
|
||||
struct gs_cb *gs_cb; /* Current guarded storage cb */
|
||||
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
|
||||
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
|
||||
/*
|
||||
* Warning: 'fpu' is dynamically-sized. It *MUST* be at
|
||||
@ -215,6 +217,9 @@ void show_cacheinfo(struct seq_file *m);
|
||||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
/* Free guarded storage control block for current */
|
||||
void exit_thread_gs(void);
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
|
@ -31,6 +31,7 @@
|
||||
#define MACHINE_FLAG_VX _BITUL(13)
|
||||
#define MACHINE_FLAG_CAD _BITUL(14)
|
||||
#define MACHINE_FLAG_NX _BITUL(15)
|
||||
#define MACHINE_FLAG_GS _BITUL(16)
|
||||
|
||||
#define LPP_MAGIC _BITUL(31)
|
||||
#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
|
||||
@ -70,6 +71,7 @@ extern void detect_memory_memblock(void);
|
||||
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
|
||||
#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
|
||||
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
|
||||
#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
|
||||
|
||||
/*
|
||||
* Console mode. Override with conmode=
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/guarded_storage.h>
|
||||
|
||||
extern struct task_struct *__switch_to(void *, void *);
|
||||
extern void update_cr_regs(struct task_struct *task);
|
||||
@ -33,12 +34,14 @@ static inline void restore_access_regs(unsigned int *acrs)
|
||||
save_fpu_regs(); \
|
||||
save_access_regs(&prev->thread.acrs[0]); \
|
||||
save_ri_cb(prev->thread.ri_cb); \
|
||||
save_gs_cb(prev->thread.gs_cb); \
|
||||
} \
|
||||
if (next->mm) { \
|
||||
update_cr_regs(next); \
|
||||
set_cpu_flag(CIF_FPU); \
|
||||
restore_access_regs(&next->thread.acrs[0]); \
|
||||
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
|
||||
restore_gs_cb(next->thread.gs_cb); \
|
||||
} \
|
||||
prev = __switch_to(prev,next); \
|
||||
} while (0)
|
||||
|
@ -54,11 +54,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||||
#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */
|
||||
#define TIF_SIGPENDING 1 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
||||
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 5 /* secure computing */
|
||||
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
|
||||
#define TIF_UPROBE 7 /* breakpointed or single-stepping */
|
||||
#define TIF_UPROBE 3 /* breakpointed or single-stepping */
|
||||
#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
|
||||
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
|
||||
#define TIF_31BIT 16 /* 32bit process */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
|
||||
@ -76,5 +77,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||||
#define _TIF_UPROBE _BITUL(TIF_UPROBE)
|
||||
#define _TIF_31BIT _BITUL(TIF_31BIT)
|
||||
#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
|
||||
#define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE)
|
||||
|
||||
#endif /* _ASM_THREAD_INFO_H */
|
||||
|
@ -12,6 +12,7 @@ header-y += dasd.h
|
||||
header-y += debug.h
|
||||
header-y += errno.h
|
||||
header-y += fcntl.h
|
||||
header-y += guarded_storage.h
|
||||
header-y += hypfs.h
|
||||
header-y += ioctl.h
|
||||
header-y += ioctls.h
|
||||
|
77
arch/s390/include/uapi/asm/guarded_storage.h
Normal file
77
arch/s390/include/uapi/asm/guarded_storage.h
Normal file
@ -0,0 +1,77 @@
|
||||
#ifndef _GUARDED_STORAGE_H
|
||||
#define _GUARDED_STORAGE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct gs_cb {
|
||||
__u64 reserved;
|
||||
__u64 gsd;
|
||||
__u64 gssm;
|
||||
__u64 gs_epl_a;
|
||||
};
|
||||
|
||||
struct gs_epl {
|
||||
__u8 pad1;
|
||||
union {
|
||||
__u8 gs_eam;
|
||||
struct {
|
||||
__u8 : 6;
|
||||
__u8 e : 1;
|
||||
__u8 b : 1;
|
||||
};
|
||||
};
|
||||
union {
|
||||
__u8 gs_eci;
|
||||
struct {
|
||||
__u8 tx : 1;
|
||||
__u8 cx : 1;
|
||||
__u8 : 5;
|
||||
__u8 in : 1;
|
||||
};
|
||||
};
|
||||
union {
|
||||
__u8 gs_eai;
|
||||
struct {
|
||||
__u8 : 1;
|
||||
__u8 t : 1;
|
||||
__u8 as : 2;
|
||||
__u8 ar : 4;
|
||||
};
|
||||
};
|
||||
__u32 pad2;
|
||||
__u64 gs_eha;
|
||||
__u64 gs_eia;
|
||||
__u64 gs_eoa;
|
||||
__u64 gs_eir;
|
||||
__u64 gs_era;
|
||||
};
|
||||
|
||||
#define GS_ENABLE 0
|
||||
#define GS_DISABLE 1
|
||||
#define GS_SET_BC_CB 2
|
||||
#define GS_CLEAR_BC_CB 3
|
||||
#define GS_BROADCAST 4
|
||||
|
||||
static inline void load_gs_cb(struct gs_cb *gs_cb)
|
||||
{
|
||||
asm volatile(".insn rxy,0xe3000000004d,0,%0" : : "Q" (*gs_cb));
|
||||
}
|
||||
|
||||
static inline void store_gs_cb(struct gs_cb *gs_cb)
|
||||
{
|
||||
asm volatile(".insn rxy,0xe30000000049,0,%0" : : "Q" (*gs_cb));
|
||||
}
|
||||
|
||||
static inline void save_gs_cb(struct gs_cb *gs_cb)
|
||||
{
|
||||
if (gs_cb)
|
||||
store_gs_cb(gs_cb);
|
||||
}
|
||||
|
||||
static inline void restore_gs_cb(struct gs_cb *gs_cb)
|
||||
{
|
||||
if (gs_cb)
|
||||
load_gs_cb(gs_cb);
|
||||
}
|
||||
|
||||
#endif /* _GUARDED_STORAGE_H */
|
@ -313,7 +313,7 @@
|
||||
#define __NR_copy_file_range 375
|
||||
#define __NR_preadv2 376
|
||||
#define __NR_pwritev2 377
|
||||
/* Number 378 is reserved for guarded storage */
|
||||
#define __NR_s390_guarded_storage 378
|
||||
#define __NR_statx 379
|
||||
#define NR_syscalls 380
|
||||
|
||||
|
@ -57,7 +57,7 @@ obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
|
||||
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
|
||||
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
|
||||
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
|
||||
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
|
||||
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o
|
||||
obj-y += entry.o reipl.o relocate_kernel.o
|
||||
|
||||
extra-y += head.o head64.o vmlinux.lds
|
||||
|
@ -175,7 +175,7 @@ int main(void)
|
||||
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
|
||||
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
|
||||
/* hardware defined lowcore locations 0x1000 - 0x18ff */
|
||||
OFFSET(__LC_VX_SAVE_AREA_ADDR, lowcore, vector_save_area_addr);
|
||||
OFFSET(__LC_MCESAD, lowcore, mcesad);
|
||||
OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
|
||||
OFFSET(__LC_FPREGS_SAVE_AREA, lowcore, floating_pt_save_area);
|
||||
OFFSET(__LC_GPREGS_SAVE_AREA, lowcore, gpregs_save_area);
|
||||
|
@ -178,4 +178,5 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
|
||||
COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
|
||||
COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
|
||||
COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
|
||||
COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
|
||||
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
|
||||
|
@ -358,6 +358,8 @@ static __init void detect_machine_facilities(void)
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
|
||||
__ctl_set_bit(0, 20);
|
||||
}
|
||||
if (test_facility(133))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
|
||||
}
|
||||
|
||||
static inline void save_vector_registers(void)
|
||||
|
@ -47,7 +47,7 @@ STACK_SIZE = 1 << STACK_SHIFT
|
||||
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
|
||||
|
||||
_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||
_TIF_UPROBE)
|
||||
_TIF_UPROBE | _TIF_GUARDED_STORAGE)
|
||||
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
|
||||
_TIF_SYSCALL_TRACEPOINT)
|
||||
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
|
||||
@ -332,6 +332,8 @@ ENTRY(system_call)
|
||||
TSTMSK __TI_flags(%r12),_TIF_UPROBE
|
||||
jo .Lsysc_uprobe_notify
|
||||
#endif
|
||||
TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
|
||||
jo .Lsysc_guarded_storage
|
||||
TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
|
||||
jo .Lsysc_singlestep
|
||||
TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
|
||||
@ -408,6 +410,14 @@ ENTRY(system_call)
|
||||
jg uprobe_notify_resume
|
||||
#endif
|
||||
|
||||
#
|
||||
# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
|
||||
#
|
||||
.Lsysc_guarded_storage:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
larl %r14,.Lsysc_return
|
||||
jg gs_load_bc_cb
|
||||
|
||||
#
|
||||
# _PIF_PER_TRAP is set, call do_per_trap
|
||||
#
|
||||
@ -663,6 +673,8 @@ ENTRY(io_int_handler)
|
||||
jo .Lio_sigpending
|
||||
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
|
||||
jo .Lio_notify_resume
|
||||
TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
|
||||
jo .Lio_guarded_storage
|
||||
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
|
||||
jo .Lio_vxrs
|
||||
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
|
||||
@ -696,6 +708,18 @@ ENTRY(io_int_handler)
|
||||
larl %r14,.Lio_return
|
||||
jg load_fpu_regs
|
||||
|
||||
#
|
||||
# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
|
||||
#
|
||||
.Lio_guarded_storage:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
brasl %r14,gs_load_bc_cb
|
||||
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
|
||||
TRACE_IRQS_OFF
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _TIF_NEED_RESCHED is set, call schedule
|
||||
#
|
||||
|
@ -74,12 +74,14 @@ long sys_sigreturn(void);
|
||||
|
||||
long sys_s390_personality(unsigned int personality);
|
||||
long sys_s390_runtime_instr(int command, int signum);
|
||||
long sys_s390_guarded_storage(int command, struct gs_cb __user *);
|
||||
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
|
||||
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
|
||||
|
||||
DECLARE_PER_CPU(u64, mt_cycles[8]);
|
||||
|
||||
void verify_facilities(void);
|
||||
void gs_load_bc_cb(struct pt_regs *regs);
|
||||
void set_fs_fixup(void);
|
||||
|
||||
#endif /* _ENTRY_H */
|
||||
|
128
arch/s390/kernel/guarded_storage.c
Normal file
128
arch/s390/kernel/guarded_storage.c
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright IBM Corp. 2016
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/guarded_storage.h>
|
||||
#include "entry.h"
|
||||
|
||||
void exit_thread_gs(void)
|
||||
{
|
||||
kfree(current->thread.gs_cb);
|
||||
kfree(current->thread.gs_bc_cb);
|
||||
current->thread.gs_cb = current->thread.gs_bc_cb = NULL;
|
||||
}
|
||||
|
||||
static int gs_enable(void)
|
||||
{
|
||||
struct gs_cb *gs_cb;
|
||||
|
||||
if (!current->thread.gs_cb) {
|
||||
gs_cb = kzalloc(sizeof(*gs_cb), GFP_KERNEL);
|
||||
if (!gs_cb)
|
||||
return -ENOMEM;
|
||||
gs_cb->gsd = 25;
|
||||
preempt_disable();
|
||||
__ctl_set_bit(2, 4);
|
||||
load_gs_cb(gs_cb);
|
||||
current->thread.gs_cb = gs_cb;
|
||||
preempt_enable();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gs_disable(void)
|
||||
{
|
||||
if (current->thread.gs_cb) {
|
||||
preempt_disable();
|
||||
kfree(current->thread.gs_cb);
|
||||
current->thread.gs_cb = NULL;
|
||||
__ctl_clear_bit(2, 4);
|
||||
preempt_enable();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gs_set_bc_cb(struct gs_cb __user *u_gs_cb)
|
||||
{
|
||||
struct gs_cb *gs_cb;
|
||||
|
||||
gs_cb = current->thread.gs_bc_cb;
|
||||
if (!gs_cb) {
|
||||
gs_cb = kzalloc(sizeof(*gs_cb), GFP_KERNEL);
|
||||
if (!gs_cb)
|
||||
return -ENOMEM;
|
||||
current->thread.gs_bc_cb = gs_cb;
|
||||
}
|
||||
if (copy_from_user(gs_cb, u_gs_cb, sizeof(*gs_cb)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gs_clear_bc_cb(void)
|
||||
{
|
||||
struct gs_cb *gs_cb;
|
||||
|
||||
gs_cb = current->thread.gs_bc_cb;
|
||||
current->thread.gs_bc_cb = NULL;
|
||||
kfree(gs_cb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gs_load_bc_cb(struct pt_regs *regs)
|
||||
{
|
||||
struct gs_cb *gs_cb;
|
||||
|
||||
preempt_disable();
|
||||
clear_thread_flag(TIF_GUARDED_STORAGE);
|
||||
gs_cb = current->thread.gs_bc_cb;
|
||||
if (gs_cb) {
|
||||
kfree(current->thread.gs_cb);
|
||||
current->thread.gs_bc_cb = NULL;
|
||||
__ctl_set_bit(2, 4);
|
||||
load_gs_cb(gs_cb);
|
||||
current->thread.gs_cb = gs_cb;
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static int gs_broadcast(void)
|
||||
{
|
||||
struct task_struct *sibling;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_thread(current, sibling) {
|
||||
if (!sibling->thread.gs_bc_cb)
|
||||
continue;
|
||||
if (test_and_set_tsk_thread_flag(sibling, TIF_GUARDED_STORAGE))
|
||||
kick_process(sibling);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(s390_guarded_storage, int, command,
|
||||
struct gs_cb __user *, gs_cb)
|
||||
{
|
||||
if (!MACHINE_HAS_GS)
|
||||
return -EOPNOTSUPP;
|
||||
switch (command) {
|
||||
case GS_ENABLE:
|
||||
return gs_enable();
|
||||
case GS_DISABLE:
|
||||
return gs_disable();
|
||||
case GS_SET_BC_CB:
|
||||
return gs_set_bc_cb(gs_cb);
|
||||
case GS_CLEAR_BC_CB:
|
||||
return gs_clear_bc_cb();
|
||||
case GS_BROADCAST:
|
||||
return gs_broadcast();
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
@ -27,6 +27,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/nmi.h>
|
||||
|
||||
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
|
||||
|
||||
@ -102,6 +103,8 @@ static void __do_machine_kdump(void *image)
|
||||
*/
|
||||
static noinline void __machine_kdump(void *image)
|
||||
{
|
||||
struct mcesa *mcesa;
|
||||
unsigned long cr2_old, cr2_new;
|
||||
int this_cpu, cpu;
|
||||
|
||||
lgr_info_log();
|
||||
@ -114,8 +117,16 @@ static noinline void __machine_kdump(void *image)
|
||||
continue;
|
||||
}
|
||||
/* Store status of the boot CPU */
|
||||
mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
|
||||
if (MACHINE_HAS_VX)
|
||||
save_vx_regs((void *) &S390_lowcore.vector_save_area);
|
||||
save_vx_regs((__vector128 *) mcesa->vector_save_area);
|
||||
if (MACHINE_HAS_GS) {
|
||||
__ctl_store(cr2_old, 2, 2);
|
||||
cr2_new = cr2_old | (1UL << 4);
|
||||
__ctl_load(cr2_new, 2, 2);
|
||||
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
|
||||
__ctl_load(cr2_old, 2, 2);
|
||||
}
|
||||
/*
|
||||
* To create a good backchain for this CPU in the dump store_status
|
||||
* is passed the address of a function. The address is saved into
|
||||
|
@ -106,6 +106,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
int kill_task;
|
||||
u64 zero;
|
||||
void *fpt_save_area;
|
||||
struct mcesa *mcesa;
|
||||
|
||||
kill_task = 0;
|
||||
zero = 0;
|
||||
@ -165,6 +166,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
: : "Q" (S390_lowcore.fpt_creg_save_area));
|
||||
}
|
||||
|
||||
mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
|
||||
if (!MACHINE_HAS_VX) {
|
||||
/* Validate floating point registers */
|
||||
asm volatile(
|
||||
@ -209,8 +211,8 @@ static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
" la 1,%0\n"
|
||||
" .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
|
||||
" .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
|
||||
: : "Q" (*(struct vx_array *)
|
||||
&S390_lowcore.vector_save_area) : "1");
|
||||
: : "Q" (*(struct vx_array *) mcesa->vector_save_area)
|
||||
: "1");
|
||||
__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
|
||||
}
|
||||
/* Validate access registers */
|
||||
@ -224,6 +226,19 @@ static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
*/
|
||||
kill_task = 1;
|
||||
}
|
||||
/* Validate guarded storage registers */
|
||||
if (MACHINE_HAS_GS && (S390_lowcore.cregs_save_area[2] & (1UL << 4))) {
|
||||
if (!mci.gs)
|
||||
/*
|
||||
* Guarded storage register can't be restored and
|
||||
* the current processes uses guarded storage.
|
||||
* It has to be terminated.
|
||||
*/
|
||||
kill_task = 1;
|
||||
else
|
||||
load_gs_cb((struct gs_cb *)
|
||||
mcesa->guarded_storage_save_area);
|
||||
}
|
||||
/*
|
||||
* We don't even try to validate the TOD register, since we simply
|
||||
* can't write something sensible into that register.
|
||||
|
@ -73,8 +73,10 @@ extern void kernel_thread_starter(void);
|
||||
*/
|
||||
void exit_thread(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk == current)
|
||||
if (tsk == current) {
|
||||
exit_thread_runtime_instr();
|
||||
exit_thread_gs();
|
||||
}
|
||||
}
|
||||
|
||||
void flush_thread(void)
|
||||
@ -159,6 +161,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
|
||||
/* Don't copy runtime instrumentation info */
|
||||
p->thread.ri_cb = NULL;
|
||||
frame->childregs.psw.mask &= ~PSW_MASK_RI;
|
||||
/* Don't copy guarded storage control block */
|
||||
p->thread.gs_cb = NULL;
|
||||
p->thread.gs_bc_cb = NULL;
|
||||
|
||||
/* Set a new TLS ? */
|
||||
if (clone_flags & CLONE_SETTLS) {
|
||||
|
@ -95,7 +95,7 @@ static void show_cpu_summary(struct seq_file *m, void *v)
|
||||
{
|
||||
static const char *hwcap_str[] = {
|
||||
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
|
||||
"edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe"
|
||||
"edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs"
|
||||
};
|
||||
static const char * const int_hwcap_str[] = {
|
||||
"sie"
|
||||
|
@ -44,30 +44,42 @@ void update_cr_regs(struct task_struct *task)
|
||||
struct pt_regs *regs = task_pt_regs(task);
|
||||
struct thread_struct *thread = &task->thread;
|
||||
struct per_regs old, new;
|
||||
unsigned long cr0_old, cr0_new;
|
||||
unsigned long cr2_old, cr2_new;
|
||||
int cr0_changed, cr2_changed;
|
||||
|
||||
__ctl_store(cr0_old, 0, 0);
|
||||
__ctl_store(cr2_old, 2, 2);
|
||||
cr0_new = cr0_old;
|
||||
cr2_new = cr2_old;
|
||||
/* Take care of the enable/disable of transactional execution. */
|
||||
if (MACHINE_HAS_TE) {
|
||||
unsigned long cr, cr_new;
|
||||
|
||||
__ctl_store(cr, 0, 0);
|
||||
/* Set or clear transaction execution TXC bit 8. */
|
||||
cr_new = cr | (1UL << 55);
|
||||
cr0_new |= (1UL << 55);
|
||||
if (task->thread.per_flags & PER_FLAG_NO_TE)
|
||||
cr_new &= ~(1UL << 55);
|
||||
if (cr_new != cr)
|
||||
__ctl_load(cr_new, 0, 0);
|
||||
cr0_new &= ~(1UL << 55);
|
||||
/* Set or clear transaction execution TDC bits 62 and 63. */
|
||||
__ctl_store(cr, 2, 2);
|
||||
cr_new = cr & ~3UL;
|
||||
cr2_new &= ~3UL;
|
||||
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
|
||||
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
|
||||
cr_new |= 1UL;
|
||||
cr2_new |= 1UL;
|
||||
else
|
||||
cr_new |= 2UL;
|
||||
cr2_new |= 2UL;
|
||||
}
|
||||
if (cr_new != cr)
|
||||
__ctl_load(cr_new, 2, 2);
|
||||
}
|
||||
/* Take care of enable/disable of guarded storage. */
|
||||
if (MACHINE_HAS_GS) {
|
||||
cr2_new &= ~(1UL << 4);
|
||||
if (task->thread.gs_cb)
|
||||
cr2_new |= (1UL << 4);
|
||||
}
|
||||
/* Load control register 0/2 iff changed */
|
||||
cr0_changed = cr0_new != cr0_old;
|
||||
cr2_changed = cr2_new != cr2_old;
|
||||
if (cr0_changed)
|
||||
__ctl_load(cr0_new, 0, 0);
|
||||
if (cr2_changed)
|
||||
__ctl_load(cr2_new, 2, 2);
|
||||
/* Copy user specified PER registers */
|
||||
new.control = thread->per_user.control;
|
||||
new.start = thread->per_user.start;
|
||||
@ -1137,6 +1149,36 @@ static int s390_system_call_set(struct task_struct *target,
|
||||
data, 0, sizeof(unsigned int));
|
||||
}
|
||||
|
||||
static int s390_gs_cb_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
struct gs_cb *data = target->thread.gs_cb;
|
||||
|
||||
if (!MACHINE_HAS_GS)
|
||||
return -ENODEV;
|
||||
if (!data)
|
||||
return -ENODATA;
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
data, 0, sizeof(struct gs_cb));
|
||||
}
|
||||
|
||||
static int s390_gs_cb_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct gs_cb *data = target->thread.gs_cb;
|
||||
|
||||
if (!MACHINE_HAS_GS)
|
||||
return -ENODEV;
|
||||
if (!data)
|
||||
return -ENODATA;
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
data, 0, sizeof(struct gs_cb));
|
||||
}
|
||||
|
||||
static const struct user_regset s390_regsets[] = {
|
||||
{
|
||||
.core_note_type = NT_PRSTATUS,
|
||||
@ -1194,6 +1236,14 @@ static const struct user_regset s390_regsets[] = {
|
||||
.get = s390_vxrs_high_get,
|
||||
.set = s390_vxrs_high_set,
|
||||
},
|
||||
{
|
||||
.core_note_type = NT_S390_GS_CB,
|
||||
.n = sizeof(struct gs_cb) / sizeof(__u64),
|
||||
.size = sizeof(__u64),
|
||||
.align = sizeof(__u64),
|
||||
.get = s390_gs_cb_get,
|
||||
.set = s390_gs_cb_set,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_s390_view = {
|
||||
@ -1422,6 +1472,14 @@ static const struct user_regset s390_compat_regsets[] = {
|
||||
.get = s390_compat_regs_high_get,
|
||||
.set = s390_compat_regs_high_set,
|
||||
},
|
||||
{
|
||||
.core_note_type = NT_S390_GS_CB,
|
||||
.n = sizeof(struct gs_cb) / sizeof(__u64),
|
||||
.size = sizeof(__u64),
|
||||
.align = sizeof(__u64),
|
||||
.get = s390_gs_cb_get,
|
||||
.set = s390_gs_cb_set,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_s390_compat_view = {
|
||||
|
@ -339,9 +339,15 @@ static void __init setup_lowcore(void)
|
||||
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
|
||||
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
|
||||
MAX_FACILITY_BIT/8);
|
||||
if (MACHINE_HAS_VX)
|
||||
lc->vector_save_area_addr =
|
||||
(unsigned long) &lc->vector_save_area;
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
unsigned long bits, size;
|
||||
|
||||
bits = MACHINE_HAS_GS ? 11 : 10;
|
||||
size = 1UL << bits;
|
||||
lc->mcesad = (__u64) memblock_virt_alloc(size, size);
|
||||
if (MACHINE_HAS_GS)
|
||||
lc->mcesad |= bits;
|
||||
}
|
||||
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
|
||||
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
|
||||
lc->async_enter_timer = S390_lowcore.async_enter_timer;
|
||||
@ -779,6 +785,12 @@ static int __init setup_hwcaps(void)
|
||||
elf_hwcap |= HWCAP_S390_VXRS_BCD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Guarded storage support HWCAP_S390_GS is bit 12.
|
||||
*/
|
||||
if (MACHINE_HAS_GS)
|
||||
elf_hwcap |= HWCAP_S390_GS;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
add_device_randomness(&cpu_id, sizeof(cpu_id));
|
||||
switch (cpu_id.machine) {
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/sigp.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/nmi.h>
|
||||
#include "entry.h"
|
||||
|
||||
enum {
|
||||
@ -78,6 +79,8 @@ struct pcpu {
|
||||
static u8 boot_core_type;
|
||||
static struct pcpu pcpu_devices[NR_CPUS];
|
||||
|
||||
static struct kmem_cache *pcpu_mcesa_cache;
|
||||
|
||||
unsigned int smp_cpu_mt_shift;
|
||||
EXPORT_SYMBOL(smp_cpu_mt_shift);
|
||||
|
||||
@ -188,8 +191,10 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
|
||||
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
{
|
||||
unsigned long async_stack, panic_stack;
|
||||
unsigned long mcesa_origin, mcesa_bits;
|
||||
struct lowcore *lc;
|
||||
|
||||
mcesa_origin = mcesa_bits = 0;
|
||||
if (pcpu != &pcpu_devices[0]) {
|
||||
pcpu->lowcore = (struct lowcore *)
|
||||
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
|
||||
@ -197,20 +202,27 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
panic_stack = __get_free_page(GFP_KERNEL);
|
||||
if (!pcpu->lowcore || !panic_stack || !async_stack)
|
||||
goto out;
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
mcesa_origin = (unsigned long)
|
||||
kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
|
||||
if (!mcesa_origin)
|
||||
goto out;
|
||||
mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
|
||||
}
|
||||
} else {
|
||||
async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
|
||||
panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
|
||||
mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
|
||||
mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK;
|
||||
}
|
||||
lc = pcpu->lowcore;
|
||||
memcpy(lc, &S390_lowcore, 512);
|
||||
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
|
||||
lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
|
||||
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
|
||||
lc->mcesad = mcesa_origin | mcesa_bits;
|
||||
lc->cpu_nr = cpu;
|
||||
lc->spinlock_lockval = arch_spin_lockval(cpu);
|
||||
if (MACHINE_HAS_VX)
|
||||
lc->vector_save_area_addr =
|
||||
(unsigned long) &lc->vector_save_area;
|
||||
if (vdso_alloc_per_cpu(lc))
|
||||
goto out;
|
||||
lowcore_ptr[cpu] = lc;
|
||||
@ -218,6 +230,9 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
return 0;
|
||||
out:
|
||||
if (pcpu != &pcpu_devices[0]) {
|
||||
if (mcesa_origin)
|
||||
kmem_cache_free(pcpu_mcesa_cache,
|
||||
(void *) mcesa_origin);
|
||||
free_page(panic_stack);
|
||||
free_pages(async_stack, ASYNC_ORDER);
|
||||
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
|
||||
@ -229,11 +244,17 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
|
||||
static void pcpu_free_lowcore(struct pcpu *pcpu)
|
||||
{
|
||||
unsigned long mcesa_origin;
|
||||
|
||||
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
|
||||
lowcore_ptr[pcpu - pcpu_devices] = NULL;
|
||||
vdso_free_per_cpu(pcpu->lowcore);
|
||||
if (pcpu == &pcpu_devices[0])
|
||||
return;
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
|
||||
kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin);
|
||||
}
|
||||
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
|
||||
free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
|
||||
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
|
||||
@ -550,9 +571,11 @@ int smp_store_status(int cpu)
|
||||
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
|
||||
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
|
||||
return -EIO;
|
||||
if (!MACHINE_HAS_VX)
|
||||
if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
|
||||
return 0;
|
||||
pa = __pa(pcpu->lowcore->vector_save_area_addr);
|
||||
pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
|
||||
if (MACHINE_HAS_GS)
|
||||
pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
|
||||
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
|
||||
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
|
||||
return -EIO;
|
||||
@ -897,12 +920,22 @@ void __init smp_fill_possible_mask(void)
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
/* request the 0x1201 emergency signal external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1201");
|
||||
/* request the 0x1202 external call external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1202");
|
||||
/* create slab cache for the machine-check-extended-save-areas */
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
size = 1UL << (MACHINE_HAS_GS ? 11 : 10);
|
||||
pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas",
|
||||
size, size, 0, NULL);
|
||||
if (!pcpu_mcesa_cache)
|
||||
panic("Couldn't create nmi save area cache");
|
||||
}
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
|
@ -386,5 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2)
|
||||
SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
|
||||
SYSCALL(sys_preadv2,compat_sys_preadv2)
|
||||
SYSCALL(sys_pwritev2,compat_sys_pwritev2)
|
||||
NI_SYSCALL
|
||||
SYSCALL(sys_s390_guarded_storage,compat_sys_s390_guarded_storage) /* 378 */
|
||||
SYSCALL(sys_statx,compat_sys_statx)
|
||||
|
@ -420,8 +420,8 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
|
||||
/* Extended save area */
|
||||
rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr,
|
||||
sizeof(unsigned long));
|
||||
rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
|
||||
sizeof(unsigned long));
|
||||
/* Only bits 0-53 are used for address formation */
|
||||
ext_sa_addr &= ~0x3ffUL;
|
||||
if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
|
||||
|
@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
|
||||
|
||||
static void refresh_pce(void *ignored)
|
||||
{
|
||||
if (current->mm)
|
||||
load_mm_cr4(current->mm);
|
||||
if (current->active_mm)
|
||||
load_mm_cr4(current->active_mm);
|
||||
}
|
||||
|
||||
static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This function relies on not being called concurrently in two
|
||||
* tasks in the same mm. Otherwise one task could observe
|
||||
* perf_rdpmc_allowed > 1 and return all the way back to
|
||||
* userspace with CR4.PCE clear while another task is still
|
||||
* doing on_each_cpu_mask() to propagate CR4.PCE.
|
||||
*
|
||||
* For now, this can't happen because all callers hold mmap_sem
|
||||
* for write. If this changes, we'll need a different solution.
|
||||
*/
|
||||
lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
|
||||
|
||||
if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
|
||||
*(tmp + 1) = 0;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
|
||||
defined(CONFIG_PARAVIRT))
|
||||
static inline void native_pud_clear(pud_t *pudp)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
|
@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
|
||||
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
|
||||
#endif
|
||||
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
#define pud_clear(pud) native_pud_clear(pud)
|
||||
#endif
|
||||
|
||||
|
@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!enabled) {
|
||||
++disabled_cpus;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (boot_cpu_physical_apicid != -1U)
|
||||
ver = boot_cpu_apic_version;
|
||||
|
||||
cpu = __generic_processor_info(id, ver, enabled);
|
||||
cpu = generic_processor_info(id, ver);
|
||||
if (cpu >= 0)
|
||||
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
|
||||
|
||||
@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
#include <acpi/processor.h>
|
||||
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int nid;
|
||||
|
@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
|
||||
return nr_logical_cpuids++;
|
||||
}
|
||||
|
||||
int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
int cpu, max = nr_cpu_ids;
|
||||
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
|
||||
@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
if (num_processors >= nr_cpu_ids) {
|
||||
int thiscpu = max + disabled_cpus;
|
||||
|
||||
if (enabled) {
|
||||
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
|
||||
"reached. Processor %d/0x%x ignored.\n",
|
||||
max, thiscpu, apicid);
|
||||
}
|
||||
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
|
||||
"reached. Processor %d/0x%x ignored.\n",
|
||||
max, thiscpu, apicid);
|
||||
|
||||
disabled_cpus++;
|
||||
return -EINVAL;
|
||||
@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
apic->x86_32_early_logical_apicid(cpu);
|
||||
#endif
|
||||
set_cpu_possible(cpu, true);
|
||||
|
||||
if (enabled) {
|
||||
num_processors++;
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
} else {
|
||||
disabled_cpus++;
|
||||
}
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
num_processors++;
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
return __generic_processor_info(apicid, version, true);
|
||||
}
|
||||
|
||||
int hard_smp_processor_id(void)
|
||||
{
|
||||
return read_apic_id();
|
||||
|
@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
|
||||
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
|
||||
(rdtgrp->flags & RDT_DELETED)) {
|
||||
kernfs_unbreak_active_protection(kn);
|
||||
kernfs_put(kn);
|
||||
kernfs_put(rdtgrp->kn);
|
||||
kfree(rdtgrp);
|
||||
} else {
|
||||
kernfs_unbreak_active_protection(kn);
|
||||
|
@ -4,6 +4,7 @@
|
||||
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
|
||||
*/
|
||||
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/*
|
||||
* most handlers of type NMI_UNKNOWN never return because
|
||||
* they just assume the NMI is theirs. Just a sanity check
|
||||
* to manage expectations
|
||||
* Indicate if there are multiple registrations on the
|
||||
* internal NMI handler call chains (SERR and IO_CHECK).
|
||||
*/
|
||||
WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
|
||||
WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
|
||||
WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
|
||||
|
||||
|
@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
|
||||
* the refined calibration and directly register it as a clocksource.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
return 0;
|
||||
}
|
||||
|
@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
|
||||
return sizeof(*regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define GCC_REALIGN_WORDS 3
|
||||
#else
|
||||
#define GCC_REALIGN_WORDS 1
|
||||
#endif
|
||||
|
||||
static bool is_last_task_frame(struct unwind_state *state)
|
||||
{
|
||||
unsigned long bp = (unsigned long)state->bp;
|
||||
unsigned long regs = (unsigned long)task_pt_regs(state->task);
|
||||
unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
|
||||
unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
|
||||
|
||||
/*
|
||||
* We have to check for the last task frame at two different locations
|
||||
* because gcc can occasionally decide to realign the stack pointer and
|
||||
* change the offset of the stack frame by a word in the prologue of a
|
||||
* function called by head/entry code.
|
||||
* change the offset of the stack frame in the prologue of a function
|
||||
* called by head/entry code. Examples:
|
||||
*
|
||||
* <start_secondary>:
|
||||
* push %edi
|
||||
* lea 0x8(%esp),%edi
|
||||
* and $0xfffffff8,%esp
|
||||
* pushl -0x4(%edi)
|
||||
* push %ebp
|
||||
* mov %esp,%ebp
|
||||
*
|
||||
* <x86_64_start_kernel>:
|
||||
* lea 0x8(%rsp),%r10
|
||||
* and $0xfffffffffffffff0,%rsp
|
||||
* pushq -0x8(%r10)
|
||||
* push %rbp
|
||||
* mov %rsp,%rbp
|
||||
*
|
||||
* Note that after aligning the stack, it pushes a duplicate copy of
|
||||
* the return address before pushing the frame pointer.
|
||||
*/
|
||||
return bp == regs - FRAME_HEADER_SIZE ||
|
||||
bp == regs - FRAME_HEADER_SIZE - sizeof(long);
|
||||
return (state->bp == last_bp ||
|
||||
(state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,3 +1,4 @@
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
#define pr_fmt(fmt) "kasan: " fmt
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/kasan.h>
|
||||
|
@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
|
||||
* we might run off the end of the bounds table if we are on
|
||||
* a 64-bit kernel and try to get 8 bytes.
|
||||
*/
|
||||
int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
long __user *bd_entry_ptr)
|
||||
{
|
||||
u32 bd_entry_32;
|
||||
|
@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
|
||||
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
|
||||
# MISC Devices
|
||||
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
|
||||
obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
|
||||
|
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Intel Merrifield power button support
|
||||
*
|
||||
* (C) Copyright 2017 Intel Corporation
|
||||
*
|
||||
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sfi.h>
|
||||
|
||||
#include <asm/intel-mid.h>
|
||||
#include <asm/intel_scu_ipc.h>
|
||||
|
||||
static struct resource mrfld_power_btn_resources[] = {
|
||||
{
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device mrfld_power_btn_dev = {
|
||||
.name = "msic_power_btn",
|
||||
.id = PLATFORM_DEVID_NONE,
|
||||
.num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
|
||||
.resource = mrfld_power_btn_resources,
|
||||
};
|
||||
|
||||
static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
|
||||
unsigned long code, void *data)
|
||||
{
|
||||
if (code == SCU_DOWN) {
|
||||
platform_device_unregister(&mrfld_power_btn_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return platform_device_register(&mrfld_power_btn_dev);
|
||||
}
|
||||
|
||||
static struct notifier_block mrfld_power_btn_scu_notifier = {
|
||||
.notifier_call = mrfld_power_btn_scu_status_change,
|
||||
};
|
||||
|
||||
static int __init register_mrfld_power_btn(void)
|
||||
{
|
||||
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* We need to be sure that the SCU IPC is ready before
|
||||
* PMIC power button device can be registered:
|
||||
*/
|
||||
intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(register_mrfld_power_btn);
|
||||
|
||||
static void __init *mrfld_power_btn_platform_data(void *info)
|
||||
{
|
||||
struct resource *res = mrfld_power_btn_resources;
|
||||
struct sfi_device_table_entry *pentry = info;
|
||||
|
||||
res->start = res->end = pentry->irq;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct devs_id mrfld_power_btn_dev_id __initconst = {
|
||||
.name = "bcove_power_btn",
|
||||
.type = SFI_DEV_TYPE_IPC,
|
||||
.delay = 1,
|
||||
.msic = 1,
|
||||
.get_platform_data = &mrfld_power_btn_platform_data,
|
||||
};
|
||||
|
||||
sfi_device(mrfld_power_btn_dev_id);
|
@ -19,7 +19,7 @@
|
||||
#include <asm/intel_scu_ipc.h>
|
||||
#include <asm/io_apic.h>
|
||||
|
||||
#define TANGIER_EXT_TIMER0_MSI 15
|
||||
#define TANGIER_EXT_TIMER0_MSI 12
|
||||
|
||||
static struct platform_device wdt_dev = {
|
||||
.name = "intel_mid_wdt",
|
||||
|
@ -17,16 +17,6 @@
|
||||
|
||||
#include "intel_mid_weak_decls.h"
|
||||
|
||||
static void penwell_arch_setup(void);
|
||||
/* penwell arch ops */
|
||||
static struct intel_mid_ops penwell_ops = {
|
||||
.arch_setup = penwell_arch_setup,
|
||||
};
|
||||
|
||||
static void mfld_power_off(void)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned long __init mfld_calibrate_tsc(void)
|
||||
{
|
||||
unsigned long fast_calibrate;
|
||||
@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
|
||||
static void __init penwell_arch_setup(void)
|
||||
{
|
||||
x86_platform.calibrate_tsc = mfld_calibrate_tsc;
|
||||
pm_power_off = mfld_power_off;
|
||||
}
|
||||
|
||||
static struct intel_mid_ops penwell_ops = {
|
||||
.arch_setup = penwell_arch_setup,
|
||||
};
|
||||
|
||||
void *get_penwell_ops(void)
|
||||
{
|
||||
return &penwell_ops;
|
||||
|
12
block/bio.c
12
block/bio.c
@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
||||
bio_list_init(&punt);
|
||||
bio_list_init(&nopunt);
|
||||
|
||||
while ((bio = bio_list_pop(current->bio_list)))
|
||||
while ((bio = bio_list_pop(¤t->bio_list[0])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[0] = nopunt;
|
||||
|
||||
*current->bio_list = nopunt;
|
||||
bio_list_init(&nopunt);
|
||||
while ((bio = bio_list_pop(¤t->bio_list[1])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[1] = nopunt;
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_merge(&bs->rescue_list, &punt);
|
||||
@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
* we retry with the original gfp_flags.
|
||||
*/
|
||||
|
||||
if (current->bio_list && !bio_list_empty(current->bio_list))
|
||||
if (current->bio_list &&
|
||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||
!bio_list_empty(¤t->bio_list[1])))
|
||||
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
||||
|
||||
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
|
@ -1973,7 +1973,14 @@ generic_make_request_checks(struct bio *bio)
|
||||
*/
|
||||
blk_qc_t generic_make_request(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list_on_stack;
|
||||
/*
|
||||
* bio_list_on_stack[0] contains bios submitted by the current
|
||||
* make_request_fn.
|
||||
* bio_list_on_stack[1] contains bios that were submitted before
|
||||
* the current make_request_fn, but that haven't been processed
|
||||
* yet.
|
||||
*/
|
||||
struct bio_list bio_list_on_stack[2];
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (!generic_make_request_checks(bio))
|
||||
@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
* should be added at the tail
|
||||
*/
|
||||
if (current->bio_list) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
bio_list_add(¤t->bio_list[0], bio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
* bio_list, and call into ->make_request() again.
|
||||
*/
|
||||
BUG_ON(bio->bi_next);
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
current->bio_list = &bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
current->bio_list = bio_list_on_stack;
|
||||
do {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
if (likely(blk_queue_enter(q, false) == 0)) {
|
||||
struct bio_list hold;
|
||||
struct bio_list lower, same;
|
||||
|
||||
/* Create a fresh bio_list for all subordinate requests */
|
||||
hold = bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
bio_list_on_stack[1] = bio_list_on_stack[0];
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
ret = q->make_request_fn(q, bio);
|
||||
|
||||
blk_queue_exit(q);
|
||||
@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
*/
|
||||
bio_list_init(&lower);
|
||||
bio_list_init(&same);
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
|
||||
if (q == bdev_get_queue(bio->bi_bdev))
|
||||
bio_list_add(&same, bio);
|
||||
else
|
||||
bio_list_add(&lower, bio);
|
||||
/* now assemble so we handle the lowest level first */
|
||||
bio_list_merge(&bio_list_on_stack, &lower);
|
||||
bio_list_merge(&bio_list_on_stack, &same);
|
||||
bio_list_merge(&bio_list_on_stack, &hold);
|
||||
bio_list_merge(&bio_list_on_stack[0], &lower);
|
||||
bio_list_merge(&bio_list_on_stack[0], &same);
|
||||
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
|
||||
} else {
|
||||
bio_io_error(bio);
|
||||
}
|
||||
bio = bio_list_pop(current->bio_list);
|
||||
bio = bio_list_pop(&bio_list_on_stack[0]);
|
||||
} while (bio);
|
||||
current->bio_list = NULL; /* deactivate */
|
||||
|
||||
|
@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
|
||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
struct blk_mq_tags *tags = set->tags[i];
|
||||
|
||||
if (!tags)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < tags->nr_tags; j++) {
|
||||
if (!tags->static_rqs[j])
|
||||
continue;
|
||||
|
@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
||||
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
|
||||
}
|
||||
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
||||
bool may_sleep)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_queue_data bd = {
|
||||
@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
||||
}
|
||||
|
||||
insert:
|
||||
blk_mq_sched_insert_request(rq, false, true, true, false);
|
||||
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
|
||||
rcu_read_lock();
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, false);
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, true);
|
||||
srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
|
||||
}
|
||||
goto done;
|
||||
|
@ -266,7 +266,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
|
||||
return err;
|
||||
}
|
||||
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
const struct af_alg_type *type;
|
||||
@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
if (!type)
|
||||
goto unlock;
|
||||
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
|
||||
err = -ENOMEM;
|
||||
if (!sk2)
|
||||
goto unlock;
|
||||
@ -323,9 +323,10 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_accept);
|
||||
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
return af_alg_accept(sock->sk, newsock);
|
||||
return af_alg_accept(sock->sk, newsock, kern);
|
||||
}
|
||||
|
||||
static const struct proto_ops alg_proto_ops = {
|
||||
|
@ -239,7 +239,8 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
return err ?: len;
|
||||
}
|
||||
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = af_alg_accept(ask->parent, newsock);
|
||||
err = af_alg_accept(ask->parent, newsock, kern);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
|
||||
}
|
||||
|
||||
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
int flags)
|
||||
int flags, bool kern)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return hash_accept(sock, newsock, flags);
|
||||
return hash_accept(sock, newsock, flags, kern);
|
||||
}
|
||||
|
||||
static struct proto_ops algif_hash_ops_nokey = {
|
||||
|
@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
|
||||
|
||||
void __weak arch_unregister_cpu(int cpu) {}
|
||||
|
||||
int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
|
||||
{
|
||||
unsigned long long sta;
|
||||
@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
||||
pr->acpi_id = value;
|
||||
}
|
||||
|
||||
if (acpi_duplicate_processor_id(pr->acpi_id)) {
|
||||
dev_err(&device->dev,
|
||||
"Failed to get unique processor _UID (0x%x)\n",
|
||||
pr->acpi_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
|
||||
pr->acpi_id);
|
||||
if (invalid_phys_cpuid(pr->phys_id))
|
||||
@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
|
||||
static int nr_unique_ids __initdata;
|
||||
|
||||
/* The number of the duplicate processor IDs */
|
||||
static int nr_duplicate_ids __initdata;
|
||||
static int nr_duplicate_ids;
|
||||
|
||||
/* Used to store the unique processor IDs */
|
||||
static int unique_processor_ids[] __initdata = {
|
||||
@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
|
||||
};
|
||||
|
||||
/* Used to store the duplicate processor IDs */
|
||||
static int duplicate_processor_ids[] __initdata = {
|
||||
static int duplicate_processor_ids[] = {
|
||||
[0 ... NR_CPUS - 1] = -1,
|
||||
};
|
||||
|
||||
@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
|
||||
void **rv)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_object_type acpi_type;
|
||||
unsigned long long uid;
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
status = acpi_get_type(handle, &acpi_type);
|
||||
if (ACPI_FAILURE(status))
|
||||
acpi_handle_info(handle, "Not get the processor object\n");
|
||||
else
|
||||
processor_validated_ids_update(object.processor.proc_id);
|
||||
return false;
|
||||
|
||||
switch (acpi_type) {
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto err;
|
||||
uid = object.processor.proc_id;
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_DEVICE:
|
||||
status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
goto err;
|
||||
}
|
||||
|
||||
processor_validated_ids_update(uid);
|
||||
return true;
|
||||
|
||||
err:
|
||||
acpi_handle_info(handle, "Invalid processor object\n");
|
||||
return false;
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static void __init acpi_processor_check_duplicates(void)
|
||||
void __init acpi_processor_check_duplicates(void)
|
||||
{
|
||||
/* Search all processor nodes in ACPI namespace */
|
||||
/* check the correctness for all processors in ACPI namespace */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
acpi_processor_ids_walk,
|
||||
NULL, NULL, NULL);
|
||||
acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
bool __init acpi_processor_validate_proc_id(int proc_id)
|
||||
bool acpi_duplicate_processor_id(int proc_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
|
||||
acpi_wakeup_device_init();
|
||||
acpi_debugger_init();
|
||||
acpi_setup_sb_notify_handler();
|
||||
acpi_set_processor_mapping();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
|
||||
}
|
||||
|
||||
static int map_lapic_id(struct acpi_subtable_header *entry,
|
||||
u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
|
||||
u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_apic *lapic =
|
||||
container_of(entry, struct acpi_madt_local_apic, header);
|
||||
|
||||
if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (lapic->processor_id != acpi_id)
|
||||
@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static int map_x2apic_id(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_x2apic *apic =
|
||||
container_of(entry, struct acpi_madt_local_x2apic, header);
|
||||
|
||||
if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (device_declaration && (apic->uid == acpi_id)) {
|
||||
@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static int map_lsapic_id(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_sapic *lsapic =
|
||||
container_of(entry, struct acpi_madt_local_sapic, header);
|
||||
|
||||
if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (device_declaration) {
|
||||
@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
|
||||
* Retrieve the ARM CPU physical identifier (MPIDR)
|
||||
*/
|
||||
static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
|
||||
{
|
||||
struct acpi_madt_generic_interrupt *gicc =
|
||||
container_of(entry, struct acpi_madt_generic_interrupt, header);
|
||||
|
||||
if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
|
||||
if (!(gicc->flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
/* device_declaration means Device object in DSDT, in the
|
||||
@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
|
||||
int type, u32 acpi_id, bool ignore_disabled)
|
||||
int type, u32 acpi_id)
|
||||
{
|
||||
unsigned long madt_end, entry;
|
||||
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
|
||||
@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
|
||||
struct acpi_subtable_header *header =
|
||||
(struct acpi_subtable_header *)entry;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
|
||||
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
}
|
||||
entry += header->length;
|
||||
@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
|
||||
if (!madt)
|
||||
return PHYS_CPUID_INVALID;
|
||||
|
||||
rv = map_madt_entry(madt, 1, acpi_id, true);
|
||||
rv = map_madt_entry(madt, 1, acpi_id);
|
||||
|
||||
acpi_put_table((struct acpi_table_header *)madt);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
|
||||
bool ignore_disabled)
|
||||
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
|
||||
|
||||
header = (struct acpi_subtable_header *)obj->buffer.pointer;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
|
||||
map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
|
||||
map_lapic_id(header, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
|
||||
map_gicc_mpidr(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled);
|
||||
map_gicc_mpidr(header, type, acpi_id, &phys_id);
|
||||
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
|
||||
u32 acpi_id, bool ignore_disabled)
|
||||
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
phys_cpuid_t phys_id;
|
||||
|
||||
phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
|
||||
phys_id = map_mat_entry(handle, type, acpi_id);
|
||||
if (invalid_phys_cpuid(phys_id))
|
||||
phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
|
||||
ignore_disabled);
|
||||
phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
|
||||
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
return __acpi_get_phys_id(handle, type, acpi_id, true);
|
||||
}
|
||||
|
||||
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
static bool __init
|
||||
map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
|
||||
{
|
||||
int type, id;
|
||||
u32 acpi_id;
|
||||
acpi_status status;
|
||||
acpi_object_type acpi_type;
|
||||
unsigned long long tmp;
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
|
||||
status = acpi_get_type(handle, &acpi_type);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
|
||||
switch (acpi_type) {
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
acpi_id = object.processor.proc_id;
|
||||
|
||||
/* validate the acpi_id */
|
||||
if(acpi_processor_validate_proc_id(acpi_id))
|
||||
return false;
|
||||
break;
|
||||
case ACPI_TYPE_DEVICE:
|
||||
status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
acpi_id = tmp;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
|
||||
|
||||
*phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
|
||||
id = acpi_map_cpuid(*phys_id, acpi_id);
|
||||
|
||||
if (id < 0)
|
||||
return false;
|
||||
*cpuid = id;
|
||||
return true;
|
||||
}
|
||||
|
||||
static acpi_status __init
|
||||
set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
|
||||
void **rv)
|
||||
{
|
||||
phys_cpuid_t phys_id;
|
||||
int cpu_id;
|
||||
|
||||
if (!map_processor(handle, &phys_id, &cpu_id))
|
||||
return AE_ERROR;
|
||||
|
||||
acpi_map_cpu2node(handle, cpu_id, phys_id);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
void __init acpi_set_processor_mapping(void)
|
||||
{
|
||||
/* Set persistent cpu <-> node mapping for all processors. */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX, set_processor_node_mapping,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
#else
|
||||
void __init acpi_set_processor_mapping(void) {}
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
|
||||
u64 *phys_addr, int *ioapic_id)
|
||||
|
@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
|
||||
case AHCI_LS1043A:
|
||||
if (!qpriv->ecc_addr)
|
||||
return -EINVAL;
|
||||
writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
|
||||
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
|
||||
qpriv->ecc_addr);
|
||||
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
if (qpriv->is_dmacoherent)
|
||||
@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
|
||||
case AHCI_LS1046A:
|
||||
if (!qpriv->ecc_addr)
|
||||
return -EINVAL;
|
||||
writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
|
||||
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
|
||||
qpriv->ecc_addr);
|
||||
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
if (qpriv->is_dmacoherent)
|
||||
|
@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return AC_ERR_SYSTEM;
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
|
||||
|
||||
static void ata_tport_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
|
||||
device_initialize(dev);
|
||||
dev->type = &ata_port_type;
|
||||
|
||||
dev->parent = get_device(parent);
|
||||
dev->parent = parent;
|
||||
dev->release = ata_tport_release;
|
||||
dev_set_name(dev, "ata%d", ap->print_id);
|
||||
transport_setup_device(dev);
|
||||
@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
|
||||
|
||||
static void ata_tlink_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
|
||||
int error;
|
||||
|
||||
device_initialize(dev);
|
||||
dev->parent = get_device(&ap->tdev);
|
||||
dev->parent = &ap->tdev;
|
||||
dev->release = ata_tlink_release;
|
||||
if (ata_is_host_link(link))
|
||||
dev_set_name(dev, "link%d", ap->print_id);
|
||||
@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
|
||||
|
||||
static void ata_tdev_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
|
||||
int error;
|
||||
|
||||
device_initialize(dev);
|
||||
dev->parent = get_device(&link->tdev);
|
||||
dev->parent = &link->tdev;
|
||||
dev->release = ata_tdev_release;
|
||||
if (ata_is_host_link(link))
|
||||
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
|
||||
|
@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
|
||||
return restart_syscall();
|
||||
}
|
||||
|
||||
void assert_held_device_hotplug(void)
|
||||
{
|
||||
lockdep_assert_held(&device_hotplug_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
static inline int device_is_not_partition(struct device *dev)
|
||||
{
|
||||
|
@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
|
||||
irq, err);
|
||||
return err;
|
||||
}
|
||||
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
|
||||
|
||||
priv->clk = of_clk_get(pdev->dev.of_node, 0);
|
||||
priv->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (!IS_ERR(priv->clk)) {
|
||||
@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
|
||||
dev_err(&pdev->dev, "unable to enable the clk, "
|
||||
"err = %d\n", err);
|
||||
}
|
||||
|
||||
/*
|
||||
* On OMAP4, enabling the shutdown_oflo interrupt is
|
||||
* done in the interrupt mask register. There is no
|
||||
* such register on EIP76, and it's enabled by the
|
||||
* same bit in the control register
|
||||
*/
|
||||
if (priv->pdata->regs[RNG_INTMASK_REG])
|
||||
omap_rng_write(priv, RNG_INTMASK_REG,
|
||||
RNG_SHUTDOWN_OFLO_MASK);
|
||||
else
|
||||
omap_rng_write(priv, RNG_CONTROL_REG,
|
||||
RNG_SHUTDOWN_OFLO_MASK);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/atmel_tc.h>
|
||||
#include <linux/sched_clock.h>
|
||||
|
||||
|
||||
/*
|
||||
@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
|
||||
return (upper << 16) | lower;
|
||||
}
|
||||
|
||||
static u32 tc_get_cv32(void)
|
||||
{
|
||||
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
||||
}
|
||||
|
||||
static u64 tc_get_cycles32(struct clocksource *cs)
|
||||
{
|
||||
return tc_get_cv32();
|
||||
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
||||
}
|
||||
|
||||
static struct clocksource clksrc = {
|
||||
@ -75,11 +69,6 @@ static struct clocksource clksrc = {
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static u64 notrace tc_read_sched_clock(void)
|
||||
{
|
||||
return tc_get_cv32();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
|
||||
struct tc_clkevt_device {
|
||||
@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
|
||||
clksrc.read = tc_get_cycles32;
|
||||
/* setup ony channel 0 */
|
||||
tcb_setup_single_chan(tc, best_divisor_idx);
|
||||
|
||||
/* register sched_clock on chips with single 32 bit counter */
|
||||
sched_clock_register(tc_read_sched_clock, 32, divided_rate);
|
||||
} else {
|
||||
/* tclib will give us three clocks no matter what the
|
||||
* underlying platform supports.
|
||||
|
@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
|
||||
char *buf)
|
||||
{
|
||||
unsigned int cur_freq = __cpufreq_get(policy);
|
||||
if (!cur_freq)
|
||||
return sprintf(buf, "<unknown>");
|
||||
return sprintf(buf, "%u\n", cur_freq);
|
||||
|
||||
if (cur_freq)
|
||||
return sprintf(buf, "%u\n", cur_freq);
|
||||
|
||||
return sprintf(buf, "<unknown>\n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
|
||||
return div64_u64(x << EXT_FRAC_BITS, y);
|
||||
}
|
||||
|
||||
static inline int32_t percent_ext_fp(int percent)
|
||||
{
|
||||
return div_ext_fp(percent, 100);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct sample - Store performance sample
|
||||
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
|
||||
@ -845,12 +850,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
|
||||
|
||||
static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
||||
{
|
||||
int min, hw_min, max, hw_max, cpu, range, adj_range;
|
||||
int min, hw_min, max, hw_max, cpu;
|
||||
struct perf_limits *perf_limits = limits;
|
||||
u64 value, cap;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
int max_perf_pct, min_perf_pct;
|
||||
struct cpudata *cpu_data = all_cpu_data[cpu];
|
||||
s16 epp;
|
||||
|
||||
@ -863,20 +867,15 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
||||
hw_max = HWP_GUARANTEED_PERF(cap);
|
||||
else
|
||||
hw_max = HWP_HIGHEST_PERF(cap);
|
||||
range = hw_max - hw_min;
|
||||
|
||||
max_perf_pct = perf_limits->max_perf_pct;
|
||||
min_perf_pct = perf_limits->min_perf_pct;
|
||||
min = fp_ext_toint(hw_max * perf_limits->min_perf);
|
||||
|
||||
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
||||
adj_range = min_perf_pct * range / 100;
|
||||
min = hw_min + adj_range;
|
||||
|
||||
value &= ~HWP_MIN_PERF(~0L);
|
||||
value |= HWP_MIN_PERF(min);
|
||||
|
||||
adj_range = max_perf_pct * range / 100;
|
||||
max = hw_min + adj_range;
|
||||
|
||||
max = fp_ext_toint(hw_max * perf_limits->max_perf);
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(max);
|
||||
|
||||
@ -989,6 +988,7 @@ static void intel_pstate_update_policies(void)
|
||||
static int pid_param_set(void *data, u64 val)
|
||||
{
|
||||
*(u32 *)data = val;
|
||||
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
|
||||
intel_pstate_reset_all_pid();
|
||||
return 0;
|
||||
}
|
||||
@ -1225,7 +1225,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf_pct = max(limits->min_perf_pct,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
|
||||
limits->max_perf = percent_ext_fp(limits->max_perf_pct);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
@ -1262,7 +1262,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
||||
limits->min_perf_pct);
|
||||
limits->min_perf_pct = min(limits->max_perf_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
|
||||
limits->min_perf = percent_ext_fp(limits->min_perf_pct);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
@ -2080,36 +2080,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
|
||||
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
struct perf_limits *limits)
|
||||
{
|
||||
int32_t max_policy_perf, min_policy_perf;
|
||||
|
||||
limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
|
||||
policy->cpuinfo.max_freq);
|
||||
limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
|
||||
max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
|
||||
max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
|
||||
if (policy->max == policy->min) {
|
||||
limits->min_policy_pct = limits->max_policy_pct;
|
||||
min_policy_perf = max_policy_perf;
|
||||
} else {
|
||||
limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
|
||||
policy->cpuinfo.max_freq);
|
||||
limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
|
||||
0, 100);
|
||||
min_policy_perf = div_ext_fp(policy->min,
|
||||
policy->cpuinfo.max_freq);
|
||||
min_policy_perf = clamp_t(int32_t, min_policy_perf,
|
||||
0, max_policy_perf);
|
||||
}
|
||||
|
||||
/* Normalize user input to [min_policy_pct, max_policy_pct] */
|
||||
limits->min_perf_pct = max(limits->min_policy_pct,
|
||||
limits->min_sysfs_pct);
|
||||
limits->min_perf_pct = min(limits->max_policy_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->max_perf_pct = min(limits->max_policy_pct,
|
||||
limits->max_sysfs_pct);
|
||||
limits->max_perf_pct = max(limits->min_policy_pct,
|
||||
limits->max_perf_pct);
|
||||
/* Normalize user input to [min_perf, max_perf] */
|
||||
limits->min_perf = max(min_policy_perf,
|
||||
percent_ext_fp(limits->min_sysfs_pct));
|
||||
limits->min_perf = min(limits->min_perf, max_policy_perf);
|
||||
limits->max_perf = min(max_policy_perf,
|
||||
percent_ext_fp(limits->max_sysfs_pct));
|
||||
limits->max_perf = max(min_policy_perf, limits->max_perf);
|
||||
|
||||
/* Make sure min_perf_pct <= max_perf_pct */
|
||||
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
|
||||
/* Make sure min_perf <= max_perf */
|
||||
limits->min_perf = min(limits->min_perf, limits->max_perf);
|
||||
|
||||
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
|
||||
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
|
||||
limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
|
||||
limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
|
||||
limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
|
||||
limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
|
||||
|
||||
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
|
||||
limits->max_perf_pct, limits->min_perf_pct);
|
||||
|
@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
|
||||
scatterwalk_done(&walk, out, 0);
|
||||
}
|
||||
|
||||
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
static void s5p_sg_done(struct s5p_aes_dev *dev)
|
||||
{
|
||||
if (dev->sg_dst_cpy) {
|
||||
dev_dbg(dev->dev,
|
||||
@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
}
|
||||
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
|
||||
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
|
||||
}
|
||||
|
||||
/* holding a lock outside */
|
||||
/* Calls the completion. Cannot be called with dev->lock hold. */
|
||||
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
{
|
||||
dev->req->base.complete(&dev->req->base, err);
|
||||
dev->busy = false;
|
||||
}
|
||||
@ -368,51 +371,44 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if new transmitting (output) data is ready and its
|
||||
* address+length have to be written to device (by calling
|
||||
* s5p_set_dma_outdata()). False otherwise.
|
||||
* Returns -ERRNO on error (mapping of new data failed).
|
||||
* On success returns:
|
||||
* - 0 if there is no more data,
|
||||
* - 1 if new transmitting (output) data is ready and its address+length
|
||||
* have to be written to device (by calling s5p_set_dma_outdata()).
|
||||
*/
|
||||
static bool s5p_aes_tx(struct s5p_aes_dev *dev)
|
||||
static int s5p_aes_tx(struct s5p_aes_dev *dev)
|
||||
{
|
||||
int err = 0;
|
||||
bool ret = false;
|
||||
int ret = 0;
|
||||
|
||||
s5p_unset_outdata(dev);
|
||||
|
||||
if (!sg_is_last(dev->sg_dst)) {
|
||||
err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
|
||||
if (err)
|
||||
s5p_aes_complete(dev, err);
|
||||
else
|
||||
ret = true;
|
||||
} else {
|
||||
s5p_aes_complete(dev, err);
|
||||
|
||||
dev->busy = true;
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if new receiving (input) data is ready and its
|
||||
* address+length have to be written to device (by calling
|
||||
* s5p_set_dma_indata()). False otherwise.
|
||||
* Returns -ERRNO on error (mapping of new data failed).
|
||||
* On success returns:
|
||||
* - 0 if there is no more data,
|
||||
* - 1 if new receiving (input) data is ready and its address+length
|
||||
* have to be written to device (by calling s5p_set_dma_indata()).
|
||||
*/
|
||||
static bool s5p_aes_rx(struct s5p_aes_dev *dev)
|
||||
static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
|
||||
{
|
||||
int err;
|
||||
bool ret = false;
|
||||
int ret = 0;
|
||||
|
||||
s5p_unset_indata(dev);
|
||||
|
||||
if (!sg_is_last(dev->sg_src)) {
|
||||
err = s5p_set_indata(dev, sg_next(dev->sg_src));
|
||||
if (err)
|
||||
s5p_aes_complete(dev, err);
|
||||
else
|
||||
ret = true;
|
||||
ret = s5p_set_indata(dev, sg_next(dev->sg_src));
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct platform_device *pdev = dev_id;
|
||||
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
|
||||
bool set_dma_tx = false;
|
||||
bool set_dma_rx = false;
|
||||
int err_dma_tx = 0;
|
||||
int err_dma_rx = 0;
|
||||
bool tx_end = false;
|
||||
unsigned long flags;
|
||||
uint32_t status;
|
||||
int err;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
|
||||
/*
|
||||
* Handle rx or tx interrupt. If there is still data (scatterlist did not
|
||||
* reach end), then map next scatterlist entry.
|
||||
* In case of such mapping error, s5p_aes_complete() should be called.
|
||||
*
|
||||
* If there is no more data in tx scatter list, call s5p_aes_complete()
|
||||
* and schedule new tasklet.
|
||||
*/
|
||||
status = SSS_READ(dev, FCINTSTAT);
|
||||
if (status & SSS_FCINTSTAT_BRDMAINT)
|
||||
set_dma_rx = s5p_aes_rx(dev);
|
||||
if (status & SSS_FCINTSTAT_BTDMAINT)
|
||||
set_dma_tx = s5p_aes_tx(dev);
|
||||
err_dma_rx = s5p_aes_rx(dev);
|
||||
|
||||
if (status & SSS_FCINTSTAT_BTDMAINT) {
|
||||
if (sg_is_last(dev->sg_dst))
|
||||
tx_end = true;
|
||||
err_dma_tx = s5p_aes_tx(dev);
|
||||
}
|
||||
|
||||
SSS_WRITE(dev, FCINTPEND, status);
|
||||
|
||||
/*
|
||||
* Writing length of DMA block (either receiving or transmitting)
|
||||
* will start the operation immediately, so this should be done
|
||||
* at the end (even after clearing pending interrupts to not miss the
|
||||
* interrupt).
|
||||
*/
|
||||
if (set_dma_tx)
|
||||
s5p_set_dma_outdata(dev, dev->sg_dst);
|
||||
if (set_dma_rx)
|
||||
s5p_set_dma_indata(dev, dev->sg_src);
|
||||
if (err_dma_rx < 0) {
|
||||
err = err_dma_rx;
|
||||
goto error;
|
||||
}
|
||||
if (err_dma_tx < 0) {
|
||||
err = err_dma_tx;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (tx_end) {
|
||||
s5p_sg_done(dev);
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
s5p_aes_complete(dev, 0);
|
||||
dev->busy = true;
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
} else {
|
||||
/*
|
||||
* Writing length of DMA block (either receiving or
|
||||
* transmitting) will start the operation immediately, so this
|
||||
* should be done at the end (even after clearing pending
|
||||
* interrupts to not miss the interrupt).
|
||||
*/
|
||||
if (err_dma_tx == 1)
|
||||
s5p_set_dma_outdata(dev, dev->sg_dst);
|
||||
if (err_dma_rx == 1)
|
||||
s5p_set_dma_indata(dev, dev->sg_src);
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
||||
error:
|
||||
s5p_sg_done(dev);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
s5p_aes_complete(dev, err);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -597,8 +633,9 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
|
||||
s5p_unset_indata(dev);
|
||||
|
||||
indata_error:
|
||||
s5p_aes_complete(dev, err);
|
||||
s5p_sg_done(dev);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
s5p_aes_complete(dev, err);
|
||||
}
|
||||
|
||||
static void s5p_tasklet_cb(unsigned long data)
|
||||
@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
|
||||
dev_warn(dev, "feed control interrupt is not available.\n");
|
||||
goto err_irq;
|
||||
}
|
||||
err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
|
||||
IRQF_SHARED, pdev->name, pdev);
|
||||
err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
|
||||
s5p_aes_interrupt, IRQF_ONESHOT,
|
||||
pdev->name, pdev);
|
||||
if (err < 0) {
|
||||
dev_warn(dev, "feed control interrupt is not available.\n");
|
||||
goto err_irq;
|
||||
|
@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
int rc = VM_FAULT_SIGBUS;
|
||||
phys_addr_t phys;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PAGE_SIZE;
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size != dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
vmf->pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
phys_addr_t phys;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PMD_SIZE;
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size < dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
else if (fault_size > dax_region->align)
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
||||
/* if we are outside of the VMA */
|
||||
if (pmd_addr < vmf->vma->vm_start ||
|
||||
(pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma, pmd_addr);
|
||||
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
phys_addr_t phys;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PUD_SIZE;
|
||||
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size < dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
else if (fault_size > dax_region->align)
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
||||
/* if we are outside of the VMA */
|
||||
if (pud_addr < vmf->vma->vm_start ||
|
||||
(pud_addr + PUD_SIZE) > vmf->vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma, pud_addr);
|
||||
phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
|
||||
gpio->regmap = a10sr->regmap;
|
||||
|
||||
gpio->gp = altr_a10sr_gc;
|
||||
|
||||
gpio->gp.parent = pdev->dev.parent;
|
||||
gpio->gp.of_node = pdev->dev.of_node;
|
||||
|
||||
ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user