mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 19:20:50 +07:00
Merge 4.8-rc5 into usb-testing
We want the USB fixes in here for testing and merge issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
9b0dd49e35
2
.mailmap
2
.mailmap
@ -158,6 +158,8 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
|
||||
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
|
||||
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
|
||||
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
|
||||
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
|
||||
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
|
||||
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
|
||||
Yusuke Goda <goda.yusuke@renesas.com>
|
||||
Gustavo Padovan <gustavo@las.ic.unicamp.br>
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Note: This documents additional properties of any device beyond what
|
||||
# is documented in Documentation/sysfs-rules.txt
|
||||
|
||||
What: /sys/devices/*/of_path
|
||||
What: /sys/devices/*/of_node
|
||||
Date: February 2015
|
||||
Contact: Device Tree mailing list <devicetree@vger.kernel.org>
|
||||
Description:
|
||||
|
@ -94,14 +94,11 @@ has a requirements for a minimum number of vectors the driver can pass a
|
||||
min_vecs argument set to this limit, and the PCI core will return -ENOSPC
|
||||
if it can't meet the minimum number of vectors.
|
||||
|
||||
The flags argument should normally be set to 0, but can be used to pass the
|
||||
PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
|
||||
MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
|
||||
case the device does not support legacy interrupt lines.
|
||||
|
||||
By default this function will spread the interrupts around the available
|
||||
CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
|
||||
flag.
|
||||
The flags argument is used to specify which type of interrupt can be used
|
||||
by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
|
||||
A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
|
||||
any possible kind of interrupt. If the PCI_IRQ_AFFINITY flag is set,
|
||||
pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
|
||||
|
||||
To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
|
||||
vectors, use the following function:
|
||||
@ -131,7 +128,7 @@ larger than the number supported by the device it will automatically be
|
||||
capped to the supported limit, so there is no need to query the number of
|
||||
vectors supported beforehand:
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES)
|
||||
if (nvec < 0)
|
||||
goto out_err;
|
||||
|
||||
@ -140,7 +137,7 @@ interrupts it can request a particular number of interrupts by passing that
|
||||
number to pci_alloc_irq_vectors() function as both 'min_vecs' and
|
||||
'max_vecs' parameters:
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
|
||||
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
@ -148,15 +145,14 @@ The most notorious example of the request type described above is enabling
|
||||
the single MSI mode for a device. It could be done by passing two 1s as
|
||||
'min_vecs' and 'max_vecs':
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
Some devices might not support using legacy line interrupts, in which case
|
||||
the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
|
||||
can't provide MSI or MSI-X interrupts:
|
||||
the driver can specify that only MSI or MSI-X is acceptable:
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX);
|
||||
if (nvec < 0)
|
||||
goto out_err;
|
||||
|
||||
|
@ -124,7 +124,6 @@ initialization with a pointer to a structure describing the driver
|
||||
|
||||
The ID table is an array of struct pci_device_id entries ending with an
|
||||
all-zero entry. Definitions with static const are generally preferred.
|
||||
Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
|
||||
|
||||
Each entry consists of:
|
||||
|
||||
|
@ -53,6 +53,7 @@ stable kernels.
|
||||
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
|
||||
| ARM | Cortex-A57 | #852523 | N/A |
|
||||
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
|
||||
| ARM | Cortex-A72 | #853709 | N/A |
|
||||
| ARM | MMU-500 | #841119,#826419 | N/A |
|
||||
| | | | |
|
||||
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
|
||||
|
@ -16,6 +16,11 @@ Required properties:
|
||||
- vref-supply: The regulator supply ADC reference voltage.
|
||||
- #io-channel-cells: Should be 1, see ../iio-bindings.txt
|
||||
|
||||
Optional properties:
|
||||
- resets: Must contain an entry for each entry in reset-names if need support
|
||||
this option. See ../reset/reset.txt for details.
|
||||
- reset-names: Must include the name "saradc-apb".
|
||||
|
||||
Example:
|
||||
saradc: saradc@2006c000 {
|
||||
compatible = "rockchip,saradc";
|
||||
@ -23,6 +28,8 @@ Example:
|
||||
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
|
||||
clock-names = "saradc", "apb_pclk";
|
||||
resets = <&cru SRST_SARADC>;
|
||||
reset-names = "saradc-apb";
|
||||
#io-channel-cells = <1>;
|
||||
vref-supply = <&vcc18>;
|
||||
};
|
||||
|
@ -42,9 +42,6 @@ Optional properties:
|
||||
- auto-flow-control: one way to enable automatic flow control support. The
|
||||
driver is allowed to detect support for the capability even without this
|
||||
property.
|
||||
- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
|
||||
line respectively. It will use specified GPIO instead of the peripheral
|
||||
function pin for the UART feature. If unsure, don't specify this property.
|
||||
|
||||
Note:
|
||||
* fsl,ns16550:
|
||||
@ -66,19 +63,3 @@ Example:
|
||||
interrupts = <10>;
|
||||
reg-shift = <2>;
|
||||
};
|
||||
|
||||
Example for OMAP UART using GPIO-based modem control signals:
|
||||
|
||||
uart4: serial@49042000 {
|
||||
compatible = "ti,omap3-uart";
|
||||
reg = <0x49042000 0x400>;
|
||||
interrupts = <80>;
|
||||
ti,hwmods = "uart4";
|
||||
clock-frequency = <48000000>;
|
||||
cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
|
||||
rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
|
||||
dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
|
||||
dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
|
||||
dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
|
||||
rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
@ -8,8 +8,6 @@ Required properties:
|
||||
- interrupts: Interrupt number for McPDM
|
||||
- interrupt-parent: The parent interrupt controller
|
||||
- ti,hwmods: Name of the hwmod associated to the McPDM
|
||||
- clocks: phandle for the pdmclk provider, likely <&twl6040>
|
||||
- clock-names: Must be "pdmclk"
|
||||
|
||||
Example:
|
||||
|
||||
@ -21,11 +19,3 @@ mcpdm: mcpdm@40132000 {
|
||||
interrupt-parent = <&gic>;
|
||||
ti,hwmods = "mcpdm";
|
||||
};
|
||||
|
||||
In board DTS file the pdmclk needs to be added:
|
||||
|
||||
&mcpdm {
|
||||
clocks = <&twl6040>;
|
||||
clock-names = "pdmclk";
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -62,7 +62,7 @@ For more examples of cooling devices, refer to the example sections below.
|
||||
Required properties:
|
||||
- #cooling-cells: Used to provide cooling device specific information
|
||||
Type: unsigned while referring to it. Must be at least 2, in order
|
||||
Size: one cell to specify minimum and maximum cooling state used
|
||||
Size: one cell to specify minimum and maximum cooling state used
|
||||
in the reference. The first cell is the minimum
|
||||
cooling state requested and the second cell is
|
||||
the maximum cooling state requested in the reference.
|
||||
@ -119,7 +119,7 @@ Required properties:
|
||||
Optional property:
|
||||
- contribution: The cooling contribution to the thermal zone of the
|
||||
Type: unsigned referred cooling device at the referred trip point.
|
||||
Size: one cell The contribution is a ratio of the sum
|
||||
Size: one cell The contribution is a ratio of the sum
|
||||
of all cooling contributions within a thermal zone.
|
||||
|
||||
Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle
|
||||
@ -145,7 +145,7 @@ Required properties:
|
||||
Size: one cell
|
||||
|
||||
- thermal-sensors: A list of thermal sensor phandles and sensor specifier
|
||||
Type: list of used while monitoring the thermal zone.
|
||||
Type: list of used while monitoring the thermal zone.
|
||||
phandles + sensor
|
||||
specifier
|
||||
|
||||
@ -473,7 +473,7 @@ thermal-zones {
|
||||
<&adc>; /* pcb north */
|
||||
|
||||
/* hotspot = 100 * bandgap - 120 * adc + 484 */
|
||||
coefficients = <100 -120 484>;
|
||||
coefficients = <100 -120 484>;
|
||||
|
||||
trips {
|
||||
...
|
||||
@ -502,7 +502,7 @@ from the ADC sensor. The binding would be then:
|
||||
thermal-sensors = <&adc>;
|
||||
|
||||
/* hotspot = 1 * adc + 6000 */
|
||||
coefficients = <1 6000>;
|
||||
coefficients = <1 6000>;
|
||||
|
||||
(d) - Board thermal
|
||||
|
||||
|
@ -183,12 +183,10 @@ The copy_up operation essentially creates a new, identical file and
|
||||
moves it over to the old name. The new file may be on a different
|
||||
filesystem, so both st_dev and st_ino of the file may change.
|
||||
|
||||
Any open files referring to this inode will access the old data and
|
||||
metadata. Similarly any file locks obtained before copy_up will not
|
||||
apply to the copied up file.
|
||||
Any open files referring to this inode will access the old data.
|
||||
|
||||
On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and
|
||||
fsetxattr(2) will fail with EROFS.
|
||||
Any file locks (and leases) obtained before copy_up will not apply
|
||||
to the copied up file.
|
||||
|
||||
If a file with multiple hard links is copied up, then this will
|
||||
"break" the link. Changes will not be propagated to other names
|
||||
|
@ -3032,6 +3032,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
PAGE_SIZE is used as alignment.
|
||||
PCI-PCI bridge can be specified, if resource
|
||||
windows need to be expanded.
|
||||
To specify the alignment for several
|
||||
instances of a device, the PCI vendor,
|
||||
device, subvendor, and subdevice may be
|
||||
specified, e.g., 4096@pci:8086:9c22:103c:198f
|
||||
ecrc= Enable/disable PCIe ECRC (transaction layer
|
||||
end-to-end CRC checking).
|
||||
bios: Use BIOS/firmware settings. This is the
|
||||
|
@ -587,26 +587,6 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device.
|
||||
TODO
|
||||
====
|
||||
|
||||
The platform device problem
|
||||
---------------------------
|
||||
DSA is currently implemented as a platform device driver which is far from ideal
|
||||
as was discussed in this thread:
|
||||
|
||||
http://permalink.gmane.org/gmane.linux.network/329848
|
||||
|
||||
This basically prevents the device driver model to be properly used and applied,
|
||||
and support non-MDIO, non-MMIO Ethernet connected switches.
|
||||
|
||||
Another problem with the platform device driver approach is that it prevents the
|
||||
use of a modular switch drivers build due to a circular dependency, illustrated
|
||||
here:
|
||||
|
||||
http://comments.gmane.org/gmane.linux.network/345803
|
||||
|
||||
Attempts of reworking this has been done here:
|
||||
|
||||
https://lwn.net/Articles/643149/
|
||||
|
||||
Making SWITCHDEV and DSA converge towards an unified codebase
|
||||
-------------------------------------------------------------
|
||||
|
||||
|
@ -167,6 +167,8 @@ signal will be rolled back anyway.
|
||||
For signals taken in non-TM or suspended mode, we use the
|
||||
normal/non-checkpointed stack pointer.
|
||||
|
||||
Any transaction initiated inside a sighandler and suspended on return
|
||||
from the sighandler to the kernel will get reclaimed and discarded.
|
||||
|
||||
Failure cause codes used by kernel
|
||||
==================================
|
||||
|
@ -80,6 +80,10 @@ functionality of their platform when planning to use this driver:
|
||||
|
||||
III. Module parameters
|
||||
|
||||
- 'dma_timeout' - DMA transfer completion timeout (in msec, default value 3000).
|
||||
This parameter set a maximum completion wait time for SYNC mode DMA
|
||||
transfer requests and for RIO_WAIT_FOR_ASYNC ioctl requests.
|
||||
|
||||
- 'dbg_level' - This parameter allows to control amount of debug information
|
||||
generated by this device driver. This parameter is formed by set of
|
||||
bit masks that correspond to the specific functional blocks.
|
||||
|
22
MAINTAINERS
22
MAINTAINERS
@ -798,6 +798,7 @@ M: Laura Abbott <labbott@redhat.com>
|
||||
M: Sumit Semwal <sumit.semwal@linaro.org>
|
||||
L: devel@driverdev.osuosl.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/staging/ion/
|
||||
F: drivers/staging/android/ion
|
||||
F: drivers/staging/android/uapi/ion.h
|
||||
F: drivers/staging/android/uapi/ion_test.h
|
||||
@ -881,6 +882,15 @@ S: Supported
|
||||
F: drivers/gpu/drm/arc/
|
||||
F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
|
||||
|
||||
ARM ARCHITECTED TIMER DRIVER
|
||||
M: Mark Rutland <mark.rutland@arm.com>
|
||||
M: Marc Zyngier <marc.zyngier@arm.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/include/asm/arch_timer.h
|
||||
F: arch/arm64/include/asm/arch_timer.h
|
||||
F: drivers/clocksource/arm_arch_timer.c
|
||||
|
||||
ARM HDLCD DRM DRIVER
|
||||
M: Liviu Dudau <liviu.dudau@arm.com>
|
||||
S: Supported
|
||||
@ -3238,7 +3248,7 @@ F: kernel/cpuset.c
|
||||
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
|
||||
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@kernel.org>
|
||||
M: Vladimir Davydov <vdavydov@virtuozzo.com>
|
||||
M: Vladimir Davydov <vdavydov.dev@gmail.com>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
@ -7661,7 +7671,7 @@ L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
|
||||
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||
F: drivers/infiniband/hw/rxe/
|
||||
F: drivers/infiniband/sw/rxe/
|
||||
F: include/uapi/rdma/rdma_user_rxe.h
|
||||
|
||||
MEMBARRIER SUPPORT
|
||||
@ -11223,12 +11233,8 @@ S: Odd Fixes
|
||||
F: drivers/staging/vt665?/
|
||||
|
||||
STAGING - WILC1000 WIFI DRIVER
|
||||
M: Johnny Kim <johnny.kim@atmel.com>
|
||||
M: Austin Shin <austin.shin@atmel.com>
|
||||
M: Chris Park <chris.park@atmel.com>
|
||||
M: Tony Cho <tony.cho@atmel.com>
|
||||
M: Glen Lee <glen.lee@atmel.com>
|
||||
M: Leo Kim <leo.kim@atmel.com>
|
||||
M: Aditya Shankar <aditya.shankar@microchip.com>
|
||||
M: Ganesh Krishna <ganesh.krishna@microchip.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/staging/wilc1000/
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 8
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -142,7 +142,7 @@
|
||||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
; Retrieve orig r25 and save it with rest of callee_regs
|
||||
ld.as r12, [r12, PT_user_r25]
|
||||
ld r12, [r12, PT_user_r25]
|
||||
PUSH r12
|
||||
#else
|
||||
PUSH r25
|
||||
@ -198,7 +198,7 @@
|
||||
|
||||
; SP is back to start of pt_regs
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
st.as r12, [sp, PT_user_r25]
|
||||
st r12, [sp, PT_user_r25]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
|
||||
.endm
|
||||
|
||||
.macro IRQ_ENABLE scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
lr \scratch, [status32]
|
||||
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
||||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
||||
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
||||
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
||||
|
||||
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
|
||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||
|
@ -13,8 +13,15 @@
|
||||
|
||||
/* Machine specific ELF Hdr flags */
|
||||
#define EF_ARC_OSABI_MSK 0x00000f00
|
||||
#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
|
||||
#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
|
||||
|
||||
#define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */
|
||||
#define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */
|
||||
|
||||
#if __GNUC__ < 6
|
||||
#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3
|
||||
#else
|
||||
#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4
|
||||
#endif
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
typedef unsigned long elf_fpregset_t;
|
||||
|
@ -28,6 +28,7 @@ extern void __muldf3(void);
|
||||
extern void __divdf3(void);
|
||||
extern void __floatunsidf(void);
|
||||
extern void __floatunsisf(void);
|
||||
extern void __udivdi3(void);
|
||||
|
||||
EXPORT_SYMBOL(__ashldi3);
|
||||
EXPORT_SYMBOL(__ashrdi3);
|
||||
@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
|
||||
EXPORT_SYMBOL(__divdf3);
|
||||
EXPORT_SYMBOL(__floatunsidf);
|
||||
EXPORT_SYMBOL(__floatunsisf);
|
||||
EXPORT_SYMBOL(__udivdi3);
|
||||
|
||||
/* ARC optimised assembler routines */
|
||||
EXPORT_SYMBOL(memset);
|
||||
|
@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x)
|
||||
}
|
||||
|
||||
eflags = x->e_flags;
|
||||
if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
|
||||
if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
|
||||
pr_err("ABI mismatch - you need newer toolchain\n");
|
||||
force_sigsegv(SIGSEGV, current);
|
||||
return 0;
|
||||
|
@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
|
||||
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
|
||||
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"OS ABI [v3]\t: no-legacy-syscalls\n");
|
||||
n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
|
||||
EF_ARC_OSABI_CURRENT >> 8,
|
||||
EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
|
||||
"no-legacy-syscalls" : "64-bit data any register aligned");
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
@ -921,6 +921,15 @@ void arc_cache_init(void)
|
||||
|
||||
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
|
||||
|
||||
/*
|
||||
* Only master CPU needs to execute rest of function:
|
||||
* - Assume SMP so all cores will have same cache config so
|
||||
* any geomtry checks will be same for all
|
||||
* - IOC setup / dma callbacks only need to be setup once
|
||||
*/
|
||||
if (cpu)
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
|
||||
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
|
||||
|
||||
|
@ -61,6 +61,7 @@ void *kmap(struct page *page)
|
||||
|
||||
return kmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
{
|
||||
|
@ -197,6 +197,8 @@ tsadc: tsadc@20060000 {
|
||||
clock-names = "saradc", "apb_pclk";
|
||||
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#io-channel-cells = <1>;
|
||||
resets = <&cru SRST_SARADC>;
|
||||
reset-names = "saradc-apb";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -279,6 +279,8 @@ saradc: saradc@ff100000 {
|
||||
#io-channel-cells = <1>;
|
||||
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
|
||||
clock-names = "saradc", "apb_pclk";
|
||||
resets = <&cru SRST_SARADC>;
|
||||
reset-names = "saradc-apb";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -399,6 +399,8 @@ saradc: saradc@2006c000 {
|
||||
#io-channel-cells = <1>;
|
||||
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
|
||||
clock-names = "saradc", "apb_pclk";
|
||||
resets = <&cru SRST_SARADC>;
|
||||
reset-names = "saradc-apb";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
smp_rmb();
|
||||
|
||||
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
|
||||
if (is_error_pfn(pfn))
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
if (kvm_is_device_pfn(pfn)) {
|
||||
|
@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_USE_DMA | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
|
@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
|
||||
};
|
||||
|
||||
static struct smc91x_platdata xcep_smc91x_info = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_NOWAIT | SMC91X_USE_DMA,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
|
@ -93,7 +93,8 @@ static struct smsc911x_platform_config smsc911x_config = {
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device realview_eth_device = {
|
||||
|
@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
|
@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
||||
static struct vcpu_info __percpu *xen_vcpu_info;
|
||||
|
||||
/* Linux <-> Xen vCPU id mapping */
|
||||
DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
|
||||
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
|
||||
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
||||
|
||||
/* These are unused until we support booting "pre-ballooned" */
|
||||
|
@ -270,6 +270,8 @@ saradc: saradc@ff100000 {
|
||||
#io-channel-cells = <1>;
|
||||
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
|
||||
clock-names = "saradc", "apb_pclk";
|
||||
resets = <&cru SRST_SARADC>;
|
||||
reset-names = "saradc-apb";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
|
||||
isb
|
||||
bl __create_page_tables // recreate kernel mapping
|
||||
|
||||
tlbi vmalle1 // Remove any stale TLB entries
|
||||
dsb nsh
|
||||
|
||||
msr sctlr_el1, x19 // re-enable the MMU
|
||||
isb
|
||||
ic iallu // flush instructions fetched
|
||||
|
@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
/*
|
||||
* We must restore the 32-bit state before the sysregs, thanks
|
||||
* to Cortex-A57 erratum #852523.
|
||||
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
|
||||
*/
|
||||
__sysreg32_restore_state(vcpu);
|
||||
__sysreg_restore_guest_state(guest_ctxt);
|
||||
|
@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
*
|
||||
* We could trap ID_DFR0 and tell the guest we don't support performance
|
||||
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
|
||||
* NAKed, so it will read the PMCR anyway.
|
||||
*
|
||||
* Therefore we tell the guest we have 0 counters. Unfortunately, we
|
||||
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
|
||||
* all PM registers, which doesn't crash the guest kernel at least.
|
||||
*
|
||||
* Debug handling: We do trap most, if not all debug related system
|
||||
* registers. The implementation is good enough to ensure that a guest
|
||||
* can use these with minimal performance degradation. The drawback is
|
||||
@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
|
||||
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
|
||||
|
||||
/* ICC_SRE */
|
||||
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
|
||||
{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
|
||||
|
||||
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
||||
|
||||
|
@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
|
||||
|
||||
msr tcr_el1, x8
|
||||
msr vbar_el1, x9
|
||||
|
||||
/*
|
||||
* __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
|
||||
* debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
|
||||
* exception. Mask them until local_dbg_restore() in cpu_suspend()
|
||||
* resets them.
|
||||
*/
|
||||
disable_dbg
|
||||
msr mdscr_el1, x10
|
||||
|
||||
msr sctlr_el1, x12
|
||||
/*
|
||||
* Restore oslsr_el1 by writing oslar_el1
|
||||
|
@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
static struct smc91x_platdata smc91x_info = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_NOWAIT,
|
||||
.leda = RPC_LED_100_10,
|
||||
.ledb = RPC_LED_TX_RX,
|
||||
};
|
||||
|
@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
static struct smc91x_platdata smc91x_info = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_NOWAIT,
|
||||
.leda = RPC_LED_100_10,
|
||||
.ledb = RPC_LED_TX_RX,
|
||||
};
|
||||
|
@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
||||
*/
|
||||
static inline unsigned long ___pa(unsigned long x)
|
||||
{
|
||||
if (config_enabled(CONFIG_64BIT)) {
|
||||
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||
/*
|
||||
* For MIPS64 the virtual address may either be in one of
|
||||
* the compatibility segements ckseg0 or ckseg1, or it may
|
||||
@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
|
||||
return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
|
||||
}
|
||||
|
||||
if (!config_enabled(CONFIG_EVA)) {
|
||||
if (!IS_ENABLED(CONFIG_EVA)) {
|
||||
/*
|
||||
* We're using the standard MIPS32 legacy memory map, ie.
|
||||
* the address x is going to be in kseg0 or kseg1. We can
|
||||
|
@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
pfn = gfn_to_pfn(kvm, gfn);
|
||||
|
||||
if (is_error_pfn(pfn)) {
|
||||
if (is_error_noslot_pfn(pfn)) {
|
||||
kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
|
@ -1,6 +1,5 @@
|
||||
config PARISC
|
||||
def_bool y
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select HAVE_IDE
|
||||
select HAVE_OPROFILE
|
||||
|
@ -245,7 +245,6 @@ CONFIG_DEBUG_RT_MUTEXES=y
|
||||
CONFIG_PROVE_RCU_DELAY=y
|
||||
CONFIG_DEBUG_BLOCK_EXT_DEVT=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
|
||||
CONFIG_KEYS=y
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_FONTS=y
|
||||
|
@ -291,7 +291,6 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
|
||||
CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
|
||||
CONFIG_CRYPTO_MANAGER=y
|
||||
CONFIG_CRYPTO_ECB=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
|
@ -208,13 +208,13 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
extern void copy_from_user_overflow(void)
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
__compiletime_error("copy_from_user() buffer size is not provably correct")
|
||||
#else
|
||||
__compiletime_warning("copy_from_user() buffer size is not provably correct")
|
||||
#endif
|
||||
;
|
||||
extern void __compiletime_error("usercopy buffer size is too small")
|
||||
__bad_copy_user(void);
|
||||
|
||||
static inline void copy_user_overflow(int size, unsigned long count)
|
||||
{
|
||||
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to,
|
||||
const void __user *from,
|
||||
@ -223,10 +223,12 @@ static inline unsigned long __must_check copy_from_user(void *to,
|
||||
int sz = __compiletime_object_size(to);
|
||||
int ret = -EFAULT;
|
||||
|
||||
if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
|
||||
if (likely(sz == -1 || sz >= n))
|
||||
ret = __copy_from_user(to, from, n);
|
||||
else
|
||||
copy_from_user_overflow();
|
||||
else if (!__builtin_constant_p(n))
|
||||
copy_user_overflow(sz, n);
|
||||
else
|
||||
__bad_copy_user();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
|
||||
/*
|
||||
* Mapping of threads to cores
|
||||
|
@ -21,7 +21,7 @@
|
||||
#ifndef __ASM_PPC64_HMI_H__
|
||||
#define __ASM_PPC64_HMI_H__
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
|
||||
#define CORE_TB_RESYNC_REQ_BIT 63
|
||||
#define MAX_SUBCORE_PER_CORE 4
|
||||
|
@ -183,11 +183,6 @@ struct paca_struct {
|
||||
*/
|
||||
u16 in_mce;
|
||||
u8 hmi_event_available; /* HMI event is available */
|
||||
/*
|
||||
* Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
|
||||
* more details
|
||||
*/
|
||||
struct sibling_subcore_state *sibling_subcore_state;
|
||||
#endif
|
||||
|
||||
/* Stuff for accurate time accounting */
|
||||
@ -202,6 +197,13 @@ struct paca_struct {
|
||||
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
|
||||
#endif
|
||||
struct kvmppc_host_state kvm_hstate;
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/*
|
||||
* Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
|
||||
* more details
|
||||
*/
|
||||
struct sibling_subcore_state *sibling_subcore_state;
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -301,6 +301,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
|
||||
/* Allocate & free a PCI host bridge structure */
|
||||
extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
|
||||
extern void pcibios_free_controller(struct pci_controller *phb);
|
||||
extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern int pcibios_vaddr_is_ioport(void __iomem *address);
|
||||
|
@ -41,7 +41,7 @@ obj-$(CONFIG_VDSO32) += vdso32/
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o hmi.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
|
||||
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
|
||||
obj-$(CONFIG_PPC64) += vdso64/
|
||||
obj-$(CONFIG_ALTIVEC) += vecemu.o
|
||||
|
@ -368,13 +368,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
tabort_syscall:
|
||||
/* Firstly we need to enable TM in the kernel */
|
||||
mfmsr r10
|
||||
li r13, 1
|
||||
rldimi r10, r13, MSR_TM_LG, 63-MSR_TM_LG
|
||||
li r9, 1
|
||||
rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r10, 0
|
||||
|
||||
/* tabort, this dooms the transaction, nothing else */
|
||||
li r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
|
||||
TABORT(R13)
|
||||
li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
|
||||
TABORT(R9)
|
||||
|
||||
/*
|
||||
* Return directly to userspace. We have corrupted user register state,
|
||||
@ -382,8 +382,8 @@ tabort_syscall:
|
||||
* resume after the tbegin of the aborted transaction with the
|
||||
* checkpointed register state.
|
||||
*/
|
||||
li r13, MSR_RI
|
||||
andc r10, r10, r13
|
||||
li r9, MSR_RI
|
||||
andc r10, r10, r9
|
||||
mtmsrd r10, 1
|
||||
mtspr SPRN_SRR0, r11
|
||||
mtspr SPRN_SRR1, r12
|
||||
|
@ -485,7 +485,23 @@ machine_check_fwnmi:
|
||||
EXCEPTION_PROLOG_0(PACA_EXMC)
|
||||
machine_check_pSeries_0:
|
||||
EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
|
||||
EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
|
||||
/*
|
||||
* The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
|
||||
* difference that MSR_RI is not enabled, because PACA_EXMC is being
|
||||
* used, so nested machine check corrupts it. machine_check_common
|
||||
* enables MSR_RI.
|
||||
*/
|
||||
ld r12,PACAKBASE(r13)
|
||||
ld r10,PACAKMSR(r13)
|
||||
xori r10,r10,MSR_RI
|
||||
mfspr r11,SPRN_SRR0
|
||||
LOAD_HANDLER(r12, machine_check_common)
|
||||
mtspr SPRN_SRR0,r12
|
||||
mfspr r12,SPRN_SRR1
|
||||
mtspr SPRN_SRR1,r10
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
|
||||
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
|
||||
KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
|
||||
@ -969,14 +985,17 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||
machine_check_common:
|
||||
|
||||
mfspr r10,SPRN_DAR
|
||||
std r10,PACA_EXGEN+EX_DAR(r13)
|
||||
std r10,PACA_EXMC+EX_DAR(r13)
|
||||
mfspr r10,SPRN_DSISR
|
||||
stw r10,PACA_EXGEN+EX_DSISR(r13)
|
||||
stw r10,PACA_EXMC+EX_DSISR(r13)
|
||||
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
|
||||
FINISH_NAP
|
||||
RECONCILE_IRQ_STATE(r10, r11)
|
||||
ld r3,PACA_EXGEN+EX_DAR(r13)
|
||||
lwz r4,PACA_EXGEN+EX_DSISR(r13)
|
||||
ld r3,PACA_EXMC+EX_DAR(r13)
|
||||
lwz r4,PACA_EXMC+EX_DSISR(r13)
|
||||
/* Enable MSR_RI when finished with PACA_EXMC */
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1
|
||||
std r3,_DAR(r1)
|
||||
std r4,_DSISR(r1)
|
||||
bl save_nvgprs
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/code-patching.h>
|
||||
|
@ -153,6 +153,42 @@ void pcibios_free_controller(struct pci_controller *phb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_free_controller);
|
||||
|
||||
/*
|
||||
* This function is used to call pcibios_free_controller()
|
||||
* in a deferred manner: a callback from the PCI subsystem.
|
||||
*
|
||||
* _*DO NOT*_ call pcibios_free_controller() explicitly if
|
||||
* this is used (or it may access an invalid *phb pointer).
|
||||
*
|
||||
* The callback occurs when all references to the root bus
|
||||
* are dropped (e.g., child buses/devices and their users).
|
||||
*
|
||||
* It's called as .release_fn() of 'struct pci_host_bridge'
|
||||
* which is associated with the 'struct pci_controller.bus'
|
||||
* (root bus) - it expects .release_data to hold a pointer
|
||||
* to 'struct pci_controller'.
|
||||
*
|
||||
* In order to use it, register .release_fn()/release_data
|
||||
* like this:
|
||||
*
|
||||
* pci_set_host_bridge_release(bridge,
|
||||
* pcibios_free_controller_deferred
|
||||
* (void *) phb);
|
||||
*
|
||||
* e.g. in the pcibios_root_bridge_prepare() callback from
|
||||
* pci_create_root_bus().
|
||||
*/
|
||||
void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
|
||||
{
|
||||
struct pci_controller *phb = (struct pci_controller *)
|
||||
bridge->release_data;
|
||||
|
||||
pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
|
||||
|
||||
pcibios_free_controller(phb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
|
||||
|
||||
/*
|
||||
* The function is used to return the minimal alignment
|
||||
* for memory or I/O windows of the associated P2P bridge.
|
||||
|
@ -695,7 +695,7 @@ unsigned char ibm_architecture_vec[] = {
|
||||
OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
|
||||
|
||||
/* option vector 5: PAPR/OF options */
|
||||
VECTOR_LENGTH(18), /* length */
|
||||
VECTOR_LENGTH(21), /* length */
|
||||
0, /* don't ignore, don't halt */
|
||||
OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
|
||||
OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
|
||||
@ -726,8 +726,11 @@ unsigned char ibm_architecture_vec[] = {
|
||||
0,
|
||||
0,
|
||||
OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
|
||||
OV5_FEAT(OV5_PFO_HW_842),
|
||||
OV5_FEAT(OV5_SUB_PROCESSORS),
|
||||
OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */
|
||||
0, /* Byte 18 */
|
||||
0, /* Byte 19 */
|
||||
0, /* Byte 20 */
|
||||
OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */
|
||||
|
||||
/* option vector 6: IBM PAPR hints */
|
||||
VECTOR_LENGTH(3), /* length */
|
||||
|
@ -1226,7 +1226,21 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
|
||||
(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
|
||||
if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
|
||||
goto bad;
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* If there is a transactional state then throw it away.
|
||||
* The purpose of a sigreturn is to destroy all traces of the
|
||||
* signal frame, this includes any transactional state created
|
||||
* within in. We only check for suspended as we can never be
|
||||
* active in the kernel, we are active, there is nothing better to
|
||||
* do than go ahead and Bad Thing later.
|
||||
* The cause is not important as there will never be a
|
||||
* recheckpoint so it's not user visible.
|
||||
*/
|
||||
if (MSR_TM_SUSPENDED(mfmsr()))
|
||||
tm_reclaim_current(0);
|
||||
|
||||
if (__get_user(tmp, &rt_sf->uc.uc_link))
|
||||
goto bad;
|
||||
uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
|
||||
|
@ -676,7 +676,21 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
|
||||
goto badframe;
|
||||
set_current_blocked(&set);
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* If there is a transactional state then throw it away.
|
||||
* The purpose of a sigreturn is to destroy all traces of the
|
||||
* signal frame, this includes any transactional state created
|
||||
* within in. We only check for suspended as we can never be
|
||||
* active in the kernel, we are active, there is nothing better to
|
||||
* do than go ahead and Bad Thing later.
|
||||
* The cause is not important as there will never be a
|
||||
* recheckpoint so it's not user visible.
|
||||
*/
|
||||
if (MSR_TM_SUSPENDED(mfmsr()))
|
||||
tm_reclaim_current(0);
|
||||
|
||||
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
|
||||
goto badframe;
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
|
@ -830,7 +830,7 @@ int __cpu_disable(void)
|
||||
|
||||
/* Update sibling maps */
|
||||
base = cpu_first_thread_sibling(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
|
||||
cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
|
||||
cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
|
||||
|
@ -25,7 +25,8 @@
|
||||
#include <linux/user.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/module.h> /* print_modules */
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
@ -78,6 +78,7 @@ kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
|
||||
|
||||
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_hv_hmi.o \
|
||||
book3s_hv_rmhandlers.o \
|
||||
book3s_hv_rm_mmu.o \
|
||||
book3s_hv_ras.o \
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
@ -528,7 +528,6 @@ static struct platform_driver mpc512x_lpbfifo_driver = {
|
||||
.remove = mpc512x_lpbfifo_remove,
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = mpc512x_lpbfifo_match,
|
||||
},
|
||||
};
|
||||
|
@ -222,7 +222,6 @@ static const struct of_device_id mcu_of_match_table[] = {
|
||||
static struct i2c_driver mcu_driver = {
|
||||
.driver = {
|
||||
.name = "mcu-mpc8349emitx",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = mcu_of_match_table,
|
||||
},
|
||||
.probe = mcu_probe,
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <linux/tty.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
|
||||
#include <asm/time.h>
|
||||
#include <asm/machdep.h>
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
@ -370,6 +370,7 @@ static irqreturn_t process_dump(int irq, void *data)
|
||||
uint32_t dump_id, dump_size, dump_type;
|
||||
struct dump_obj *dump;
|
||||
char name[22];
|
||||
struct kobject *kobj;
|
||||
|
||||
rc = dump_read_info(&dump_id, &dump_size, &dump_type);
|
||||
if (rc != OPAL_SUCCESS)
|
||||
@ -381,8 +382,12 @@ static irqreturn_t process_dump(int irq, void *data)
|
||||
* that gracefully and not create two conflicting
|
||||
* entries.
|
||||
*/
|
||||
if (kset_find_obj(dump_kset, name))
|
||||
kobj = kset_find_obj(dump_kset, name);
|
||||
if (kobj) {
|
||||
/* Drop reference added by kset_find_obj() */
|
||||
kobject_put(kobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dump = create_dump_obj(dump_id, dump_size, dump_type);
|
||||
if (!dump)
|
||||
|
@ -247,6 +247,7 @@ static irqreturn_t elog_event(int irq, void *data)
|
||||
uint64_t elog_type;
|
||||
int rc;
|
||||
char name[2+16+1];
|
||||
struct kobject *kobj;
|
||||
|
||||
rc = opal_get_elog_size(&id, &size, &type);
|
||||
if (rc != OPAL_SUCCESS) {
|
||||
@ -269,8 +270,12 @@ static irqreturn_t elog_event(int irq, void *data)
|
||||
* that gracefully and not create two conflicting
|
||||
* entries.
|
||||
*/
|
||||
if (kset_find_obj(elog_kset, name))
|
||||
kobj = kset_find_obj(elog_kset, name);
|
||||
if (kobj) {
|
||||
/* Drop reference added by kset_find_obj() */
|
||||
kobject_put(kobj);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
create_elog_obj(log_id, elog_size, elog_type);
|
||||
|
||||
|
@ -149,7 +149,7 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
|
||||
|
||||
static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
|
||||
{
|
||||
unsigned long pe = phb->ioda.total_pe_num - 1;
|
||||
long pe;
|
||||
|
||||
for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
|
||||
if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
|
||||
|
@ -119,6 +119,10 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
|
||||
|
||||
bus = bridge->bus;
|
||||
|
||||
/* Rely on the pcibios_free_controller_deferred() callback. */
|
||||
pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred,
|
||||
(void *) pci_bus_to_host(bus));
|
||||
|
||||
dn = pcibios_get_phb_of_node(bus);
|
||||
if (!dn)
|
||||
return 0;
|
||||
|
@ -106,8 +106,11 @@ int remove_phb_dynamic(struct pci_controller *phb)
|
||||
release_resource(res);
|
||||
}
|
||||
|
||||
/* Free pci_controller data structure */
|
||||
pcibios_free_controller(phb);
|
||||
/*
|
||||
* The pci_controller data structure is freed by
|
||||
* the pcibios_free_controller_deferred() callback;
|
||||
* see pseries_root_bridge_prepare().
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -534,7 +534,8 @@ struct cpm1_gpio16_chip {
|
||||
|
||||
static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
|
||||
struct cpm1_gpio16_chip *cpm1_gc =
|
||||
container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
|
||||
struct cpm_ioport16 __iomem *iop = mm_gc->regs;
|
||||
|
||||
cpm1_gc->cpdata = in_be16(&iop->dat);
|
||||
@ -649,7 +650,8 @@ struct cpm1_gpio32_chip {
|
||||
|
||||
static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
|
||||
struct cpm1_gpio32_chip *cpm1_gc =
|
||||
container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
|
||||
struct cpm_ioport32b __iomem *iop = mm_gc->regs;
|
||||
|
||||
cpm1_gc->cpdata = in_be32(&iop->dat);
|
||||
|
@ -94,7 +94,8 @@ struct cpm2_gpio32_chip {
|
||||
|
||||
static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc);
|
||||
struct cpm2_gpio32_chip *cpm2_gc =
|
||||
container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
|
||||
struct cpm2_ioports __iomem *iop = mm_gc->regs;
|
||||
|
||||
cpm2_gc->cpdata = in_be32(&iop->dat);
|
||||
|
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -68,7 +68,6 @@ config DEBUG_RODATA
|
||||
config S390
|
||||
def_bool y
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
@ -602,7 +602,6 @@ CONFIG_FAIL_FUTEX=y
|
||||
CONFIG_FAULT_INJECTION_DEBUG_FS=y
|
||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
|
||||
CONFIG_IRQSOFF_TRACER=y
|
||||
CONFIG_PREEMPT_TRACER=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
|
@ -552,7 +552,6 @@ CONFIG_NOTIFIER_ERROR_INJECTION=m
|
||||
CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_PM_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
# CONFIG_KPROBE_EVENT is not set
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
|
@ -549,7 +549,6 @@ CONFIG_TIMER_STATS=y
|
||||
CONFIG_RCU_TORTURE_TEST=m
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
|
@ -172,7 +172,6 @@ CONFIG_DEBUG_NOTIFIERS=y
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_RCU_TRACE=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
|
||||
|
@ -311,6 +311,14 @@ int __get_user_bad(void) __attribute__((noreturn));
|
||||
#define __put_user_unaligned __put_user
|
||||
#define __get_user_unaligned __get_user
|
||||
|
||||
extern void __compiletime_error("usercopy buffer size is too small")
|
||||
__bad_copy_user(void);
|
||||
|
||||
static inline void copy_user_overflow(int size, unsigned long count)
|
||||
{
|
||||
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* copy_to_user: - Copy a block of data into user space.
|
||||
* @to: Destination address, in user space.
|
||||
@ -332,12 +340,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
return __copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
void copy_from_user_overflow(void)
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
__compiletime_warning("copy_from_user() buffer size is not provably correct")
|
||||
#endif
|
||||
;
|
||||
|
||||
/**
|
||||
* copy_from_user: - Copy a block of data from user space.
|
||||
* @to: Destination address, in kernel space.
|
||||
@ -362,7 +364,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
|
||||
might_fault();
|
||||
if (unlikely(sz != -1 && sz < n)) {
|
||||
copy_from_user_overflow();
|
||||
if (!__builtin_constant_p(n))
|
||||
copy_user_overflow(sz, n);
|
||||
else
|
||||
__bad_copy_user();
|
||||
return n;
|
||||
}
|
||||
return __copy_from_user(to, from, n);
|
||||
|
@ -204,11 +204,9 @@ static void __init conmode_default(void)
|
||||
#endif
|
||||
}
|
||||
} else if (MACHINE_IS_KVM) {
|
||||
if (sclp.has_vt220 &&
|
||||
config_enabled(CONFIG_SCLP_VT220_CONSOLE))
|
||||
if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
|
||||
SET_CONSOLE_VT220;
|
||||
else if (sclp.has_linemode &&
|
||||
config_enabled(CONFIG_SCLP_CONSOLE))
|
||||
else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
|
||||
SET_CONSOLE_SCLP;
|
||||
else
|
||||
SET_CONSOLE_HVC;
|
||||
|
@ -4,7 +4,6 @@
|
||||
config TILE
|
||||
def_bool y
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
|
@ -416,14 +416,13 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
return n;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
/*
|
||||
* There are still unprovable places in the generic code as of 2.6.34, so this
|
||||
* option is not really compatible with -Werror, which is more useful in
|
||||
* general.
|
||||
*/
|
||||
extern void copy_from_user_overflow(void)
|
||||
__compiletime_warning("copy_from_user() size is not provably correct");
|
||||
extern void __compiletime_error("usercopy buffer size is too small")
|
||||
__bad_copy_user(void);
|
||||
|
||||
static inline void copy_user_overflow(int size, unsigned long count)
|
||||
{
|
||||
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to,
|
||||
const void __user *from,
|
||||
@ -433,14 +432,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
|
||||
|
||||
if (likely(sz == -1 || sz >= n))
|
||||
n = _copy_from_user(to, from, n);
|
||||
else if (!__builtin_constant_p(n))
|
||||
copy_user_overflow(sz, n);
|
||||
else
|
||||
copy_from_user_overflow();
|
||||
__bad_copy_user();
|
||||
|
||||
return n;
|
||||
}
|
||||
#else
|
||||
#define copy_from_user _copy_from_user
|
||||
#endif
|
||||
|
||||
#ifdef __tilegx__
|
||||
/**
|
||||
|
@ -81,7 +81,7 @@
|
||||
.altinstr_replacement : { *(.altinstr_replacement) }
|
||||
/* .exit.text is discard at runtime, not link time, to deal with references
|
||||
from .altinstructions and .eh_frame */
|
||||
.exit.text : { *(.exit.text) }
|
||||
.exit.text : { EXIT_TEXT }
|
||||
.exit.data : { *(.exit.data) }
|
||||
|
||||
.preinit_array : {
|
||||
|
@ -24,7 +24,6 @@ config X86
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
|
@ -1 +1,3 @@
|
||||
CONFIG_NOHIGHMEM=y
|
||||
# CONFIG_HIGHMEM4G is not set
|
||||
# CONFIG_HIGHMEM64G is not set
|
||||
|
@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
||||
|
||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||
if (irqs_disabled())
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
else {
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
|
||||
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
|
||||
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
|
||||
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
|
||||
movl _args_digest+4*32(state, idx, 4), tmp2_w
|
||||
vmovd _args_digest(state , idx, 4) , %xmm0
|
||||
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
|
||||
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
|
||||
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
|
||||
|
||||
vmovdqu %xmm0, _result_digest(job_rax)
|
||||
movl tmp2_w, _result_digest+1*16(job_rax)
|
||||
vmovdqu %xmm0, _result_digest(job_rax)
|
||||
offset = (_result_digest + 1*16)
|
||||
vmovdqu %xmm1, offset(job_rax)
|
||||
|
||||
pop %rbx
|
||||
|
||||
|
@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
||||
|
||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||
if (irqs_disabled())
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
else {
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
@ -697,43 +697,14 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
|
||||
unsigned long __must_check _copy_to_user(void __user *to, const void *from,
|
||||
unsigned n);
|
||||
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
# define copy_user_diag __compiletime_error
|
||||
#else
|
||||
# define copy_user_diag __compiletime_warning
|
||||
#endif
|
||||
extern void __compiletime_error("usercopy buffer size is too small")
|
||||
__bad_copy_user(void);
|
||||
|
||||
extern void copy_user_diag("copy_from_user() buffer size is too small")
|
||||
copy_from_user_overflow(void);
|
||||
extern void copy_user_diag("copy_to_user() buffer size is too small")
|
||||
copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
|
||||
|
||||
#undef copy_user_diag
|
||||
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
|
||||
extern void
|
||||
__compiletime_warning("copy_from_user() buffer size is not provably correct")
|
||||
__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
|
||||
#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
|
||||
|
||||
extern void
|
||||
__compiletime_warning("copy_to_user() buffer size is not provably correct")
|
||||
__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
|
||||
#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
|
||||
|
||||
#else
|
||||
|
||||
static inline void
|
||||
__copy_from_user_overflow(int size, unsigned long count)
|
||||
static inline void copy_user_overflow(int size, unsigned long count)
|
||||
{
|
||||
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
||||
}
|
||||
|
||||
#define __copy_to_user_overflow __copy_from_user_overflow
|
||||
|
||||
#endif
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
@ -743,31 +714,13 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
|
||||
kasan_check_write(to, n);
|
||||
|
||||
/*
|
||||
* While we would like to have the compiler do the checking for us
|
||||
* even in the non-constant size case, any false positives there are
|
||||
* a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
|
||||
* without - the [hopefully] dangerous looking nature of the warning
|
||||
* would make people go look at the respecitive call sites over and
|
||||
* over again just to find that there's no problem).
|
||||
*
|
||||
* And there are cases where it's just not realistic for the compiler
|
||||
* to prove the count to be in range. For example when multiple call
|
||||
* sites of a helper function - perhaps in different source files -
|
||||
* all doing proper range checking, yet the helper function not doing
|
||||
* so again.
|
||||
*
|
||||
* Therefore limit the compile time checking to the constant size
|
||||
* case, and do only runtime checking for non-constant sizes.
|
||||
*/
|
||||
|
||||
if (likely(sz < 0 || sz >= n)) {
|
||||
check_object_size(to, n, false);
|
||||
n = _copy_from_user(to, from, n);
|
||||
} else if (__builtin_constant_p(n))
|
||||
copy_from_user_overflow();
|
||||
} else if (!__builtin_constant_p(n))
|
||||
copy_user_overflow(sz, n);
|
||||
else
|
||||
__copy_from_user_overflow(sz, n);
|
||||
__bad_copy_user();
|
||||
|
||||
return n;
|
||||
}
|
||||
@ -781,21 +734,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
|
||||
might_fault();
|
||||
|
||||
/* See the comment in copy_from_user() above. */
|
||||
if (likely(sz < 0 || sz >= n)) {
|
||||
check_object_size(from, n, true);
|
||||
n = _copy_to_user(to, from, n);
|
||||
} else if (__builtin_constant_p(n))
|
||||
copy_to_user_overflow();
|
||||
} else if (!__builtin_constant_p(n))
|
||||
copy_user_overflow(sz, n);
|
||||
else
|
||||
__copy_to_user_overflow(sz, n);
|
||||
__bad_copy_user();
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
#undef __copy_from_user_overflow
|
||||
#undef __copy_to_user_overflow
|
||||
|
||||
/*
|
||||
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
||||
* nested NMI paths are careful to preserve CR2.
|
||||
|
@ -1623,6 +1623,9 @@ void __init enable_IR_x2apic(void)
|
||||
unsigned long flags;
|
||||
int ret, ir_stat;
|
||||
|
||||
if (skip_ioapic_setup)
|
||||
return;
|
||||
|
||||
ir_stat = irq_remapping_prepare();
|
||||
if (ir_stat < 0 && !x2apic_supported())
|
||||
return;
|
||||
|
@ -669,6 +669,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
|
||||
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
|
||||
}
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xC0011029
|
||||
|
||||
static void init_amd_ln(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* Apply erratum 665 fix unconditionally so machines without a BIOS
|
||||
* fix work.
|
||||
*/
|
||||
msr_set_bit(MSR_AMD64_DE_CFG, 31);
|
||||
}
|
||||
|
||||
static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 value;
|
||||
@ -726,6 +737,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
case 6: init_amd_k7(c); break;
|
||||
case 0xf: init_amd_k8(c); break;
|
||||
case 0x10: init_amd_gh(c); break;
|
||||
case 0x12: init_amd_ln(c); break;
|
||||
case 0x15: init_amd_bd(c); break;
|
||||
}
|
||||
|
||||
|
@ -56,12 +56,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
|
||||
".popsection");
|
||||
|
||||
/* identity function, which can be inlined */
|
||||
u32 _paravirt_ident_32(u32 x)
|
||||
u32 notrace _paravirt_ident_32(u32 x)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
u64 _paravirt_ident_64(u64 x)
|
||||
u64 notrace _paravirt_ident_64(u64 x)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
@ -422,6 +422,7 @@ struct nested_vmx {
|
||||
struct list_head vmcs02_pool;
|
||||
int vmcs02_num;
|
||||
u64 vmcs01_tsc_offset;
|
||||
bool change_vmcs01_virtual_x2apic_mode;
|
||||
/* L2 must run next, and mustn't decide to exit to L1. */
|
||||
bool nested_run_pending;
|
||||
/*
|
||||
@ -435,6 +436,8 @@ struct nested_vmx {
|
||||
bool pi_pending;
|
||||
u16 posted_intr_nv;
|
||||
|
||||
unsigned long *msr_bitmap;
|
||||
|
||||
struct hrtimer preemption_timer;
|
||||
bool preemption_timer_expired;
|
||||
|
||||
@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
|
||||
static unsigned long *vmx_msr_bitmap_longmode;
|
||||
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
|
||||
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
|
||||
static unsigned long *vmx_msr_bitmap_nested;
|
||||
static unsigned long *vmx_vmread_bitmap;
|
||||
static unsigned long *vmx_vmwrite_bitmap;
|
||||
|
||||
@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
new.control) != old.control);
|
||||
}
|
||||
|
||||
static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
|
||||
{
|
||||
vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
|
||||
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
|
||||
* vcpu mutex is already taken.
|
||||
@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
/* Setup TSC multiplier */
|
||||
if (kvm_has_tsc_control &&
|
||||
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
|
||||
vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
|
||||
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
|
||||
}
|
||||
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
vmx_vcpu_pi_load(vcpu, cpu);
|
||||
vmx->host_pkru = read_pkru();
|
||||
@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
|
||||
unsigned long *msr_bitmap;
|
||||
|
||||
if (is_guest_mode(vcpu))
|
||||
msr_bitmap = vmx_msr_bitmap_nested;
|
||||
msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
|
||||
else if (cpu_has_secondary_exec_ctrls() &&
|
||||
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
|
||||
@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
|
||||
if (!vmx_msr_bitmap_longmode_x2apic)
|
||||
goto out4;
|
||||
|
||||
if (nested) {
|
||||
vmx_msr_bitmap_nested =
|
||||
(unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx_msr_bitmap_nested)
|
||||
goto out5;
|
||||
}
|
||||
|
||||
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx_vmread_bitmap)
|
||||
goto out6;
|
||||
@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
|
||||
|
||||
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
|
||||
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
|
||||
if (nested)
|
||||
memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
|
||||
|
||||
if (setup_vmcs_config(&vmcs_config) < 0) {
|
||||
r = -EIO;
|
||||
@ -6529,9 +6526,6 @@ static __init int hardware_setup(void)
|
||||
out7:
|
||||
free_page((unsigned long)vmx_vmread_bitmap);
|
||||
out6:
|
||||
if (nested)
|
||||
free_page((unsigned long)vmx_msr_bitmap_nested);
|
||||
out5:
|
||||
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
|
||||
out4:
|
||||
free_page((unsigned long)vmx_msr_bitmap_longmode);
|
||||
@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
|
||||
free_page((unsigned long)vmx_io_bitmap_a);
|
||||
free_page((unsigned long)vmx_vmwrite_bitmap);
|
||||
free_page((unsigned long)vmx_vmread_bitmap);
|
||||
if (nested)
|
||||
free_page((unsigned long)vmx_msr_bitmap_nested);
|
||||
|
||||
free_kvm_area();
|
||||
}
|
||||
@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (cpu_has_vmx_msr_bitmap()) {
|
||||
vmx->nested.msr_bitmap =
|
||||
(unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx->nested.msr_bitmap)
|
||||
goto out_msr_bitmap;
|
||||
}
|
||||
|
||||
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
||||
if (!vmx->nested.cached_vmcs12)
|
||||
return -ENOMEM;
|
||||
goto out_cached_vmcs12;
|
||||
|
||||
if (enable_shadow_vmcs) {
|
||||
shadow_vmcs = alloc_vmcs();
|
||||
if (!shadow_vmcs) {
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!shadow_vmcs)
|
||||
goto out_shadow_vmcs;
|
||||
/* mark vmcs as shadow */
|
||||
shadow_vmcs->revision_id |= (1u << 31);
|
||||
/* init shadow vmcs */
|
||||
@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
||||
skip_emulated_instruction(vcpu);
|
||||
nested_vmx_succeed(vcpu);
|
||||
return 1;
|
||||
|
||||
out_shadow_vmcs:
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
|
||||
out_cached_vmcs12:
|
||||
free_page((unsigned long)vmx->nested.msr_bitmap);
|
||||
|
||||
out_msr_bitmap:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
|
||||
vmx->nested.vmxon = false;
|
||||
free_vpid(vmx->nested.vpid02);
|
||||
nested_release_vmcs12(vmx);
|
||||
if (vmx->nested.msr_bitmap) {
|
||||
free_page((unsigned long)vmx->nested.msr_bitmap);
|
||||
vmx->nested.msr_bitmap = NULL;
|
||||
}
|
||||
if (enable_shadow_vmcs)
|
||||
free_vmcs(vmx->nested.current_shadow_vmcs);
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
|
||||
{
|
||||
u32 sec_exec_control;
|
||||
|
||||
/* Postpone execution until vmcs01 is the current VMCS. */
|
||||
if (is_guest_mode(vcpu)) {
|
||||
to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is not point to enable virtualize x2apic without enable
|
||||
* apicv
|
||||
@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
int msr;
|
||||
struct page *page;
|
||||
unsigned long *msr_bitmap;
|
||||
unsigned long *msr_bitmap_l1;
|
||||
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
|
||||
|
||||
/* This shortcut is ok because we support only x2APIC MSRs so far. */
|
||||
if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
|
||||
return false;
|
||||
|
||||
@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
msr_bitmap = (unsigned long *)kmap(page);
|
||||
if (!msr_bitmap) {
|
||||
msr_bitmap_l1 = (unsigned long *)kmap(page);
|
||||
if (!msr_bitmap_l1) {
|
||||
nested_release_page_clean(page);
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
|
||||
|
||||
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
|
||||
if (nested_cpu_has_apic_reg_virt(vmcs12))
|
||||
for (msr = 0x800; msr <= 0x8ff; msr++)
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
msr, MSR_TYPE_R);
|
||||
/* TPR is allowed */
|
||||
nested_vmx_disable_intercept_for_msr(msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
|
||||
MSR_TYPE_R | MSR_TYPE_W);
|
||||
|
||||
if (nested_cpu_has_vid(vmcs12)) {
|
||||
/* EOI and self-IPI are allowed */
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_EOI >> 4),
|
||||
MSR_TYPE_W);
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
|
||||
MSR_TYPE_W);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Enable reading intercept of all the x2apic
|
||||
* MSRs. We should not rely on vmcs12 to do any
|
||||
* optimizations here, it may have been modified
|
||||
* by L1.
|
||||
*/
|
||||
for (msr = 0x800; msr <= 0x8ff; msr++)
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
msr,
|
||||
MSR_TYPE_R);
|
||||
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
|
||||
MSR_TYPE_W);
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_EOI >> 4),
|
||||
MSR_TYPE_W);
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
|
||||
MSR_TYPE_W);
|
||||
}
|
||||
kunmap(page);
|
||||
nested_release_page_clean(page);
|
||||
@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||||
}
|
||||
|
||||
if (cpu_has_vmx_msr_bitmap() &&
|
||||
exec_control & CPU_BASED_USE_MSR_BITMAPS) {
|
||||
nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
|
||||
/* MSR_BITMAP will be set by following vmx_set_efer. */
|
||||
} else
|
||||
exec_control & CPU_BASED_USE_MSR_BITMAPS &&
|
||||
nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
|
||||
; /* MSR_BITMAP will be set by following vmx_set_efer. */
|
||||
else
|
||||
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
|
||||
|
||||
/*
|
||||
@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||||
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
|
||||
else
|
||||
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
|
||||
if (kvm_has_tsc_control)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
if (enable_vpid) {
|
||||
/*
|
||||
@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
||||
else
|
||||
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
if (kvm_has_tsc_control)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
|
||||
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
|
||||
vmx_set_virtual_x2apic_mode(vcpu,
|
||||
vcpu->arch.apic_base & X2APIC_ENABLE);
|
||||
}
|
||||
|
||||
/* This is needed for same reason as it was needed in prepare_vmcs02 */
|
||||
vmx->host_rsp = 0;
|
||||
|
@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
|
||||
*/
|
||||
static inline bool kaslr_memory_enabled(void)
|
||||
{
|
||||
return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
|
||||
return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
|
||||
}
|
||||
|
||||
/* Initialize base and padding for each memory region randomized with KASLR */
|
||||
|
@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
|
||||
* @node: list item for parent traversal.
|
||||
* @rcu: RCU callback item for freeing.
|
||||
* @irq: back pointer to parent.
|
||||
* @enabled: true if driver enabled IRQ
|
||||
* @virq: the virtual IRQ value provided to the requesting driver.
|
||||
*
|
||||
* Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
|
||||
@ -50,6 +51,7 @@ struct vmd_irq {
|
||||
struct list_head node;
|
||||
struct rcu_head rcu;
|
||||
struct vmd_irq_list *irq;
|
||||
bool enabled;
|
||||
unsigned int virq;
|
||||
};
|
||||
|
||||
@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&list_lock, flags);
|
||||
WARN_ON(vmdirq->enabled);
|
||||
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
|
||||
vmdirq->enabled = true;
|
||||
raw_spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
data->chip->irq_unmask(data);
|
||||
@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
|
||||
data->chip->irq_mask(data);
|
||||
|
||||
raw_spin_lock_irqsave(&list_lock, flags);
|
||||
list_del_rcu(&vmdirq->node);
|
||||
INIT_LIST_HEAD_RCU(&vmdirq->node);
|
||||
if (vmdirq->enabled) {
|
||||
list_del_rcu(&vmdirq->node);
|
||||
vmdirq->enabled = false;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
||||
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
||||
|
||||
/* Linux <-> Xen vCPU id mapping */
|
||||
DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
|
||||
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
|
||||
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
||||
|
||||
enum xen_domain_type xen_domain_type = XEN_NATIVE;
|
||||
|
21
block/bio.c
21
block/bio.c
@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
goto integrity_clone;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME) {
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
|
||||
goto integrity_clone;
|
||||
break;
|
||||
default:
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
break;
|
||||
}
|
||||
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
|
||||
integrity_clone:
|
||||
if (bio_integrity(bio_src)) {
|
||||
int ret;
|
||||
|
||||
@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
||||
* Discards need a mutable bio_vec to accommodate the payload
|
||||
* required by the DSM TRIM and UNMAP commands.
|
||||
*/
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
|
||||
split = bio_clone_bioset(bio, gfp, bs);
|
||||
else
|
||||
split = bio_clone_fast(bio, gfp, bs);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user