mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 00:50:50 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
xdp_umem.c had overlapping changes between the 64-bit math fix for the calculation of npgs and the removal of the zerocopy memory type which got rid of the chunk_size_nohdr member. The mlx5 Kconfig conflict is a case where we just take the net-next copy of the Kconfig entry dependency as it takes on the ESWITCH dependency by one level of indirection which is what the 'net' conflicting change is trying to ensure. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1806c13dc2
@ -84,15 +84,20 @@ Get a decent editor and don't leave whitespace at the end of lines.
|
||||
Coding style is all about readability and maintainability using commonly
|
||||
available tools.
|
||||
|
||||
The limit on the length of lines is 80 columns and this is a strongly
|
||||
preferred limit.
|
||||
The preferred limit on the length of a single line is 80 columns.
|
||||
|
||||
Statements longer than 80 columns will be broken into sensible chunks, unless
|
||||
exceeding 80 columns significantly increases readability and does not hide
|
||||
information. Descendants are always substantially shorter than the parent and
|
||||
are placed substantially to the right. The same applies to function headers
|
||||
with a long argument list. However, never break user-visible strings such as
|
||||
printk messages, because that breaks the ability to grep for them.
|
||||
Statements longer than 80 columns should be broken into sensible chunks,
|
||||
unless exceeding 80 columns significantly increases readability and does
|
||||
not hide information.
|
||||
|
||||
Descendants are always substantially shorter than the parent and are
|
||||
are placed substantially to the right. A very commonly used style
|
||||
is to align descendants to a function open parenthesis.
|
||||
|
||||
These same rules are applied to function headers with a long argument list.
|
||||
|
||||
However, never break user-visible strings such as printk messages because
|
||||
that breaks the ability to grep for them.
|
||||
|
||||
|
||||
3) Placing Braces and Spaces
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -42,7 +42,7 @@ SECTIONS
|
||||
}
|
||||
.table : ALIGN(4) {
|
||||
_table_start = .;
|
||||
LONG(ZIMAGE_MAGIC(2))
|
||||
LONG(ZIMAGE_MAGIC(4))
|
||||
LONG(ZIMAGE_MAGIC(0x5a534c4b))
|
||||
LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start))
|
||||
LONG(ZIMAGE_MAGIC(_kernel_bss_size))
|
||||
|
@ -943,7 +943,7 @@ ethphy0: ethernet-phy@0 {
|
||||
|
||||
&cpsw_emac0 {
|
||||
phy-handle = <ðphy0>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
};
|
||||
|
||||
&elm {
|
||||
|
@ -504,7 +504,7 @@ ethphy0: ethernet-phy@0 {
|
||||
|
||||
&cpsw_emac0 {
|
||||
phy-handle = <ðphy0>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
};
|
||||
|
||||
&rtc {
|
||||
|
@ -833,13 +833,13 @@ ethphy1: ethernet-phy@5 {
|
||||
|
||||
&cpsw_emac0 {
|
||||
phy-handle = <ðphy0>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
dual_emac_res_vlan = <1>;
|
||||
};
|
||||
|
||||
&cpsw_emac1 {
|
||||
phy-handle = <ðphy1>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
dual_emac_res_vlan = <2>;
|
||||
};
|
||||
|
||||
|
@ -190,13 +190,13 @@ &mac_sw {
|
||||
|
||||
&cpsw_port1 {
|
||||
phy-handle = <ðphy0_sw>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
ti,dual-emac-pvid = <1>;
|
||||
};
|
||||
|
||||
&cpsw_port2 {
|
||||
phy-handle = <ðphy1_sw>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
ti,dual-emac-pvid = <2>;
|
||||
};
|
||||
|
||||
|
@ -433,13 +433,13 @@ &mac {
|
||||
|
||||
&cpsw_emac0 {
|
||||
phy-handle = <&phy0>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
dual_emac_res_vlan = <1>;
|
||||
};
|
||||
|
||||
&cpsw_emac1 {
|
||||
phy-handle = <&phy1>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
dual_emac_res_vlan = <2>;
|
||||
};
|
||||
|
||||
|
@ -408,13 +408,13 @@ &rtc {
|
||||
|
||||
&cpsw_emac0 {
|
||||
phy-handle = <ðphy0>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
dual_emac_res_vlan = <1>;
|
||||
};
|
||||
|
||||
&cpsw_emac1 {
|
||||
phy-handle = <ðphy1>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-rxid";
|
||||
dual_emac_res_vlan = <2>;
|
||||
};
|
||||
|
||||
|
@ -75,7 +75,7 @@ a9pll: arm_clk@0 {
|
||||
timer@20200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0x20200 0x100>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
@ -83,7 +83,7 @@ twd-timer@20600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x20600 0x20>;
|
||||
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) |
|
||||
IRQ_TYPE_LEVEL_HIGH)>;
|
||||
IRQ_TYPE_EDGE_RISING)>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
@ -91,7 +91,7 @@ twd-watchdog@20620 {
|
||||
compatible = "arm,cortex-a9-twd-wdt";
|
||||
reg = <0x20620 0x20>;
|
||||
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) |
|
||||
IRQ_TYPE_LEVEL_HIGH)>;
|
||||
IRQ_TYPE_EDGE_RISING)>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
|
@ -24,7 +24,7 @@ chosen {
|
||||
|
||||
leds {
|
||||
act {
|
||||
gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
|
||||
gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -693,7 +693,7 @@ mac: ethernet@0 {
|
||||
|
||||
davinci_mdio: mdio@800 {
|
||||
compatible = "ti,cpsw-mdio", "ti,davinci_mdio";
|
||||
clocks = <&alwon_ethernet_clkctrl DM814_ETHERNET_CPGMAC0_CLKCTRL 0>;
|
||||
clocks = <&cpsw_125mhz_gclk>;
|
||||
clock-names = "fck";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
@ -65,13 +65,6 @@ panel_in_lvds0: endpoint {
|
||||
};
|
||||
};
|
||||
|
||||
&clks {
|
||||
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>;
|
||||
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
|
||||
<&clks IMX6QDL_CLK_PLL3_USB_OTG>;
|
||||
};
|
||||
|
||||
&ldb {
|
||||
status = "okay";
|
||||
|
||||
|
@ -65,13 +65,6 @@ panel_in_lvds0: endpoint {
|
||||
};
|
||||
};
|
||||
|
||||
&clks {
|
||||
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>;
|
||||
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
|
||||
<&clks IMX6QDL_CLK_PLL3_USB_OTG>;
|
||||
};
|
||||
|
||||
&ldb {
|
||||
status = "okay";
|
||||
|
||||
|
@ -53,17 +53,6 @@ chosen {
|
||||
};
|
||||
};
|
||||
|
||||
&clks {
|
||||
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>,
|
||||
<&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
|
||||
<&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>;
|
||||
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
|
||||
<&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
|
||||
<&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
|
||||
<&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
|
||||
};
|
||||
|
||||
&ldb {
|
||||
fsl,dual-channel;
|
||||
status = "okay";
|
||||
|
@ -377,3 +377,18 @@ pci_root: root@0,0 {
|
||||
#interrupt-cells = <1>;
|
||||
};
|
||||
};
|
||||
|
||||
&clks {
|
||||
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>,
|
||||
<&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
|
||||
<&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>,
|
||||
<&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>,
|
||||
<&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>;
|
||||
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
|
||||
<&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
|
||||
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
|
||||
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
|
||||
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
|
||||
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>;
|
||||
};
|
||||
|
@ -98,19 +98,19 @@ &twsi4 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&ssp3 {
|
||||
&ssp1 {
|
||||
status = "okay";
|
||||
cs-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
|
||||
cs-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
|
||||
|
||||
firmware-flash@0 {
|
||||
compatible = "st,m25p80", "jedec,spi-nor";
|
||||
compatible = "winbond,w25q32", "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <40000000>;
|
||||
spi-max-frequency = <104000000>;
|
||||
m25p,fast-read;
|
||||
};
|
||||
};
|
||||
|
||||
&ssp4 {
|
||||
cs-gpios = <&gpio 56 GPIO_ACTIVE_HIGH>;
|
||||
&ssp2 {
|
||||
cs-gpios = <&gpio 56 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -202,8 +202,7 @@ usb_otg0: usb-otg@d4208000 {
|
||||
};
|
||||
|
||||
hsic_phy0: hsic-phy@f0001800 {
|
||||
compatible = "marvell,mmp3-hsic-phy",
|
||||
"usb-nop-xceiv";
|
||||
compatible = "marvell,mmp3-hsic-phy";
|
||||
reg = <0xf0001800 0x40>;
|
||||
#phy-cells = <0>;
|
||||
status = "disabled";
|
||||
@ -224,8 +223,7 @@ hsic0: hsic@f0001000 {
|
||||
};
|
||||
|
||||
hsic_phy1: hsic-phy@f0002800 {
|
||||
compatible = "marvell,mmp3-hsic-phy",
|
||||
"usb-nop-xceiv";
|
||||
compatible = "marvell,mmp3-hsic-phy";
|
||||
reg = <0xf0002800 0x40>;
|
||||
#phy-cells = <0>;
|
||||
status = "disabled";
|
||||
@ -531,7 +529,7 @@ l2: l2-cache-controller@d0020000 {
|
||||
};
|
||||
|
||||
soc_clocks: clocks@d4050000 {
|
||||
compatible = "marvell,mmp2-clock";
|
||||
compatible = "marvell,mmp3-clock";
|
||||
reg = <0xd4050000 0x1000>,
|
||||
<0xd4282800 0x400>,
|
||||
<0xd4015000 0x1000>;
|
||||
|
@ -18,11 +18,11 @@
|
||||
#endif
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/domain.h>
|
||||
#include <asm/opcodes-virt.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/uaccess-asm.h>
|
||||
|
||||
#define IOMEM(x) (x)
|
||||
|
||||
@ -446,79 +446,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
.size \name , . - \name
|
||||
.endm
|
||||
|
||||
.macro csdb
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
.inst.w 0xf3af8014
|
||||
#else
|
||||
.inst 0xe320f014
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
|
||||
#ifndef CONFIG_CPU_USE_DOMAINS
|
||||
adds \tmp, \addr, #\size - 1
|
||||
sbcscc \tmp, \tmp, \limit
|
||||
bcs \bad
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
movcs \addr, #0
|
||||
csdb
|
||||
#endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
sub \tmp, \limit, #1
|
||||
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
||||
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
||||
subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
||||
movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
||||
csdb
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_disable, tmp, isb=1
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Whenever we re-enter userspace, the domains should always be
|
||||
* set appropriately.
|
||||
*/
|
||||
mov \tmp, #DACR_UACCESS_DISABLE
|
||||
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
|
||||
.if \isb
|
||||
instr_sync
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable, tmp, isb=1
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Whenever we re-enter userspace, the domains should always be
|
||||
* set appropriately.
|
||||
*/
|
||||
mov \tmp, #DACR_UACCESS_ENABLE
|
||||
mcr p15, 0, \tmp, c3, c0, 0
|
||||
.if \isb
|
||||
instr_sync
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_save, tmp
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
mrc p15, 0, \tmp, c3, c0, 0
|
||||
str \tmp, [sp, #SVC_DACR]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_restore
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
ldr r0, [sp, #SVC_DACR]
|
||||
mcr p15, 0, r0, c3, c0, 0
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
|
||||
.macro ret\c, reg
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
|
117
arch/arm/include/asm/uaccess-asm.h
Normal file
117
arch/arm/include/asm/uaccess-asm.h
Normal file
@ -0,0 +1,117 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_UACCESS_ASM_H__
|
||||
#define __ASM_UACCESS_ASM_H__
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/domain.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
.macro csdb
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
.inst.w 0xf3af8014
|
||||
#else
|
||||
.inst 0xe320f014
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
|
||||
#ifndef CONFIG_CPU_USE_DOMAINS
|
||||
adds \tmp, \addr, #\size - 1
|
||||
sbcscc \tmp, \tmp, \limit
|
||||
bcs \bad
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
movcs \addr, #0
|
||||
csdb
|
||||
#endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
sub \tmp, \limit, #1
|
||||
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
||||
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
||||
subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
||||
movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
||||
csdb
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_disable, tmp, isb=1
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Whenever we re-enter userspace, the domains should always be
|
||||
* set appropriately.
|
||||
*/
|
||||
mov \tmp, #DACR_UACCESS_DISABLE
|
||||
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
|
||||
.if \isb
|
||||
instr_sync
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable, tmp, isb=1
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Whenever we re-enter userspace, the domains should always be
|
||||
* set appropriately.
|
||||
*/
|
||||
mov \tmp, #DACR_UACCESS_ENABLE
|
||||
mcr p15, 0, \tmp, c3, c0, 0
|
||||
.if \isb
|
||||
instr_sync
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
|
||||
#define DACR(x...) x
|
||||
#else
|
||||
#define DACR(x...)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Save the address limit on entry to a privileged exception.
|
||||
*
|
||||
* If we are using the DACR for kernel access by the user accessors
|
||||
* (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
|
||||
* back to client mode, whether or not \disable is set.
|
||||
*
|
||||
* If we are using SW PAN, set the DACR user domain to no access
|
||||
* if \disable is set.
|
||||
*/
|
||||
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
|
||||
ldr \tmp1, [\tsk, #TI_ADDR_LIMIT]
|
||||
mov \tmp2, #TASK_SIZE
|
||||
str \tmp2, [\tsk, #TI_ADDR_LIMIT]
|
||||
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
|
||||
DACR( str \tmp0, [sp, #SVC_DACR])
|
||||
str \tmp1, [sp, #SVC_ADDR_LIMIT]
|
||||
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
|
||||
/* kernel=client, user=no access */
|
||||
mov \tmp2, #DACR_UACCESS_DISABLE
|
||||
mcr p15, 0, \tmp2, c3, c0, 0
|
||||
instr_sync
|
||||
.elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
|
||||
/* kernel=client */
|
||||
bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
|
||||
orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
|
||||
mcr p15, 0, \tmp2, c3, c0, 0
|
||||
instr_sync
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/* Restore the user access state previously saved by uaccess_entry */
|
||||
.macro uaccess_exit, tsk, tmp0, tmp1
|
||||
ldr \tmp1, [sp, #SVC_ADDR_LIMIT]
|
||||
DACR( ldr \tmp0, [sp, #SVC_DACR])
|
||||
str \tmp1, [\tsk, #TI_ADDR_LIMIT]
|
||||
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
|
||||
.endm
|
||||
|
||||
#undef DACR
|
||||
|
||||
#endif /* __ASM_UACCESS_ASM_H__ */
|
@ -42,7 +42,7 @@ static int __init init_atags_procfs(void)
|
||||
size_t size;
|
||||
|
||||
if (tag->hdr.tag != ATAG_CORE) {
|
||||
pr_info("No ATAGs?");
|
||||
pr_info("No ATAGs?\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/tls.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/uaccess-asm.h>
|
||||
|
||||
#include "entry-header.S"
|
||||
#include <asm/entry-macro-multi.S>
|
||||
@ -179,15 +180,7 @@ ENDPROC(__und_invalid)
|
||||
stmia r7, {r2 - r6}
|
||||
|
||||
get_thread_info tsk
|
||||
ldr r0, [tsk, #TI_ADDR_LIMIT]
|
||||
mov r1, #TASK_SIZE
|
||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
||||
str r0, [sp, #SVC_ADDR_LIMIT]
|
||||
|
||||
uaccess_save r0
|
||||
.if \uaccess
|
||||
uaccess_disable r0
|
||||
.endif
|
||||
uaccess_entry tsk, r0, r1, r2, \uaccess
|
||||
|
||||
.if \trace
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/uaccess-asm.h>
|
||||
#include <asm/v7m.h>
|
||||
|
||||
@ Bad Abort numbers
|
||||
@ -217,9 +218,7 @@
|
||||
blne trace_hardirqs_off
|
||||
#endif
|
||||
.endif
|
||||
ldr r1, [sp, #SVC_ADDR_LIMIT]
|
||||
uaccess_restore
|
||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
||||
uaccess_exit tsk, r0, r1
|
||||
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
@ ARM mode SVC restore
|
||||
@ -263,9 +262,7 @@
|
||||
@ on the stack remains correct).
|
||||
@
|
||||
.macro svc_exit_via_fiq
|
||||
ldr r1, [sp, #SVC_ADDR_LIMIT]
|
||||
uaccess_restore
|
||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
||||
uaccess_exit tsk, r0, r1
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
@ ARM mode restore
|
||||
mov r0, sp
|
||||
|
@ -219,8 +219,8 @@ static struct undef_hook arm_break_hook = {
|
||||
};
|
||||
|
||||
static struct undef_hook thumb_break_hook = {
|
||||
.instr_mask = 0xffff,
|
||||
.instr_val = 0xde01,
|
||||
.instr_mask = 0xffffffff,
|
||||
.instr_val = 0x0000de01,
|
||||
.cpsr_mask = PSR_T_BIT,
|
||||
.cpsr_val = PSR_T_BIT,
|
||||
.fn = break_trap,
|
||||
|
@ -1402,8 +1402,8 @@ vcodec_enc: vcodec@18002000 {
|
||||
"venc_lt_sel";
|
||||
assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>,
|
||||
<&topckgen CLK_TOP_VENC_LT_SEL>;
|
||||
assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>,
|
||||
<&topckgen CLK_TOP_UNIVPLL1_D2>;
|
||||
assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>,
|
||||
<&topckgen CLK_TOP_VCODECPLL_370P5>;
|
||||
};
|
||||
|
||||
jpegdec: jpegdec@18004000 {
|
||||
|
@ -176,7 +176,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
panic("CPU%u detected unsupported configuration\n", cpu);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static void init_gic_priority_masking(void)
|
||||
|
@ -80,7 +80,6 @@
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ALL
|
||||
psrclr ie
|
||||
ldw lr, (sp, 4)
|
||||
ldw a0, (sp, 8)
|
||||
mtcr a0, epc
|
||||
@ -175,9 +174,4 @@
|
||||
movi r6, 0
|
||||
cpwcr r6, cpcr31
|
||||
.endm
|
||||
|
||||
.macro ANDI_R3 rx, imm
|
||||
lsri \rx, 3
|
||||
andi \rx, (\imm >> 3)
|
||||
.endm
|
||||
#endif /* __ASM_CSKY_ENTRY_H */
|
||||
|
@ -13,6 +13,8 @@
|
||||
#define LSAVE_A1 28
|
||||
#define LSAVE_A2 32
|
||||
#define LSAVE_A3 36
|
||||
#define LSAVE_A4 40
|
||||
#define LSAVE_A5 44
|
||||
|
||||
#define KSPTOUSP
|
||||
#define USPTOKSP
|
||||
@ -63,7 +65,6 @@
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ALL
|
||||
psrclr ie
|
||||
ldw tls, (sp, 0)
|
||||
ldw lr, (sp, 4)
|
||||
ldw a0, (sp, 8)
|
||||
@ -301,9 +302,4 @@
|
||||
jmpi 3f /* jump to va */
|
||||
3:
|
||||
.endm
|
||||
|
||||
.macro ANDI_R3 rx, imm
|
||||
lsri \rx, 3
|
||||
andi \rx, (\imm >> 3)
|
||||
.endm
|
||||
#endif /* __ASM_CSKY_ENTRY_H */
|
||||
|
@ -81,4 +81,10 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
|
||||
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SYSCALL_TRACEPOINT)
|
||||
|
||||
#endif /* _ASM_CSKY_THREAD_INFO_H */
|
||||
|
@ -128,39 +128,41 @@ tlbop_end 1
|
||||
ENTRY(csky_systemcall)
|
||||
SAVE_ALL TRAP0_SIZE
|
||||
zero_fp
|
||||
#ifdef CONFIG_RSEQ_DEBUG
|
||||
mov a0, sp
|
||||
jbsr rseq_syscall
|
||||
#endif
|
||||
psrset ee, ie
|
||||
|
||||
lrw r11, __NR_syscalls
|
||||
cmphs syscallid, r11 /* Check nr of syscall */
|
||||
bt ret_from_exception
|
||||
lrw r9, __NR_syscalls
|
||||
cmphs syscallid, r9 /* Check nr of syscall */
|
||||
bt 1f
|
||||
|
||||
lrw r13, sys_call_table
|
||||
ixw r13, syscallid
|
||||
ldw r11, (r13)
|
||||
cmpnei r11, 0
|
||||
lrw r9, sys_call_table
|
||||
ixw r9, syscallid
|
||||
ldw syscallid, (r9)
|
||||
cmpnei syscallid, 0
|
||||
bf ret_from_exception
|
||||
|
||||
mov r9, sp
|
||||
bmaski r10, THREAD_SHIFT
|
||||
andn r9, r10
|
||||
ldw r12, (r9, TINFO_FLAGS)
|
||||
ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
|
||||
cmpnei r12, 0
|
||||
ldw r10, (r9, TINFO_FLAGS)
|
||||
lrw r9, _TIF_SYSCALL_WORK
|
||||
and r10, r9
|
||||
cmpnei r10, 0
|
||||
bt csky_syscall_trace
|
||||
#if defined(__CSKYABIV2__)
|
||||
subi sp, 8
|
||||
stw r5, (sp, 0x4)
|
||||
stw r4, (sp, 0x0)
|
||||
jsr r11 /* Do system call */
|
||||
jsr syscallid /* Do system call */
|
||||
addi sp, 8
|
||||
#else
|
||||
jsr r11
|
||||
jsr syscallid
|
||||
#endif
|
||||
stw a0, (sp, LSAVE_A0) /* Save return value */
|
||||
1:
|
||||
#ifdef CONFIG_DEBUG_RSEQ
|
||||
mov a0, sp
|
||||
jbsr rseq_syscall
|
||||
#endif
|
||||
jmpi ret_from_exception
|
||||
|
||||
csky_syscall_trace:
|
||||
@ -173,18 +175,23 @@ csky_syscall_trace:
|
||||
ldw a3, (sp, LSAVE_A3)
|
||||
#if defined(__CSKYABIV2__)
|
||||
subi sp, 8
|
||||
stw r5, (sp, 0x4)
|
||||
stw r4, (sp, 0x0)
|
||||
ldw r9, (sp, LSAVE_A4)
|
||||
stw r9, (sp, 0x0)
|
||||
ldw r9, (sp, LSAVE_A5)
|
||||
stw r9, (sp, 0x4)
|
||||
jsr syscallid /* Do system call */
|
||||
addi sp, 8
|
||||
#else
|
||||
ldw r6, (sp, LSAVE_A4)
|
||||
ldw r7, (sp, LSAVE_A5)
|
||||
#endif
|
||||
jsr r11 /* Do system call */
|
||||
#if defined(__CSKYABIV2__)
|
||||
addi sp, 8
|
||||
jsr syscallid /* Do system call */
|
||||
#endif
|
||||
stw a0, (sp, LSAVE_A0) /* Save return value */
|
||||
|
||||
#ifdef CONFIG_DEBUG_RSEQ
|
||||
mov a0, sp
|
||||
jbsr rseq_syscall
|
||||
#endif
|
||||
mov a0, sp /* right now, sp --> pt_regs */
|
||||
jbsr syscall_trace_exit
|
||||
br ret_from_exception
|
||||
@ -200,18 +207,20 @@ ENTRY(ret_from_fork)
|
||||
mov r9, sp
|
||||
bmaski r10, THREAD_SHIFT
|
||||
andn r9, r10
|
||||
ldw r12, (r9, TINFO_FLAGS)
|
||||
ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
|
||||
cmpnei r12, 0
|
||||
ldw r10, (r9, TINFO_FLAGS)
|
||||
lrw r9, _TIF_SYSCALL_WORK
|
||||
and r10, r9
|
||||
cmpnei r10, 0
|
||||
bf ret_from_exception
|
||||
mov a0, sp /* sp = pt_regs pointer */
|
||||
jbsr syscall_trace_exit
|
||||
|
||||
ret_from_exception:
|
||||
ld syscallid, (sp, LSAVE_PSR)
|
||||
btsti syscallid, 31
|
||||
bt 1f
|
||||
psrclr ie
|
||||
ld r9, (sp, LSAVE_PSR)
|
||||
btsti r9, 31
|
||||
|
||||
bt 1f
|
||||
/*
|
||||
* Load address of current->thread_info, Then get address of task_struct
|
||||
* Get task_needreshed in task_struct
|
||||
@ -220,11 +229,24 @@ ret_from_exception:
|
||||
bmaski r10, THREAD_SHIFT
|
||||
andn r9, r10
|
||||
|
||||
ldw r12, (r9, TINFO_FLAGS)
|
||||
andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | _TIF_UPROBE)
|
||||
cmpnei r12, 0
|
||||
ldw r10, (r9, TINFO_FLAGS)
|
||||
lrw r9, _TIF_WORK_MASK
|
||||
and r10, r9
|
||||
cmpnei r10, 0
|
||||
bt exit_work
|
||||
1:
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
mov r9, sp
|
||||
bmaski r10, THREAD_SHIFT
|
||||
andn r9, r10
|
||||
|
||||
ldw r10, (r9, TINFO_PREEMPT)
|
||||
cmpnei r10, 0
|
||||
bt 2f
|
||||
jbsr preempt_schedule_irq /* irq en/disable is done inside */
|
||||
2:
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
ld r10, (sp, LSAVE_PSR)
|
||||
btsti r10, 6
|
||||
@ -235,14 +257,15 @@ ret_from_exception:
|
||||
RESTORE_ALL
|
||||
|
||||
exit_work:
|
||||
lrw syscallid, ret_from_exception
|
||||
mov lr, syscallid
|
||||
lrw r9, ret_from_exception
|
||||
mov lr, r9
|
||||
|
||||
btsti r12, TIF_NEED_RESCHED
|
||||
btsti r10, TIF_NEED_RESCHED
|
||||
bt work_resched
|
||||
|
||||
psrset ie
|
||||
mov a0, sp
|
||||
mov a1, r12
|
||||
mov a1, r10
|
||||
jmpi do_notify_resume
|
||||
|
||||
work_resched:
|
||||
@ -291,34 +314,10 @@ ENTRY(csky_irq)
|
||||
jbsr trace_hardirqs_off
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
mov r9, sp /* Get current stack pointer */
|
||||
bmaski r10, THREAD_SHIFT
|
||||
andn r9, r10 /* Get thread_info */
|
||||
|
||||
/*
|
||||
* Get task_struct->stack.preempt_count for current,
|
||||
* and increase 1.
|
||||
*/
|
||||
ldw r12, (r9, TINFO_PREEMPT)
|
||||
addi r12, 1
|
||||
stw r12, (r9, TINFO_PREEMPT)
|
||||
#endif
|
||||
|
||||
mov a0, sp
|
||||
jbsr csky_do_IRQ
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
subi r12, 1
|
||||
stw r12, (r9, TINFO_PREEMPT)
|
||||
cmpnei r12, 0
|
||||
bt 2f
|
||||
ldw r12, (r9, TINFO_FLAGS)
|
||||
btsti r12, TIF_NEED_RESCHED
|
||||
bf 2f
|
||||
jbsr preempt_schedule_irq /* irq en/disable is done inside */
|
||||
#endif
|
||||
2:
|
||||
jmpi ret_from_exception
|
||||
|
||||
/*
|
||||
|
@ -6,7 +6,7 @@
|
||||
#define _ASM_IA64_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
void *iommu; /* hook for IOMMU specific extension */
|
||||
#endif
|
||||
};
|
||||
|
@ -562,7 +562,7 @@ void __init mem_init(void)
|
||||
> BITS_PER_LONG);
|
||||
|
||||
high_memory = __va((max_pfn << PAGE_SHIFT));
|
||||
set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
|
||||
set_max_mapnr(max_low_pfn);
|
||||
memblock_free_all();
|
||||
|
||||
#ifdef CONFIG_PA11
|
||||
|
@ -126,6 +126,7 @@ config PPC
|
||||
select ARCH_HAS_MMIOWB if PPC64
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select ARCH_HAS_PMEM_API
|
||||
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_MEMBARRIER_CALLBACKS
|
||||
|
@ -162,6 +162,9 @@ UBSAN_SANITIZE_kprobes.o := n
|
||||
GCOV_PROFILE_kprobes-ftrace.o := n
|
||||
KCOV_INSTRUMENT_kprobes-ftrace.o := n
|
||||
UBSAN_SANITIZE_kprobes-ftrace.o := n
|
||||
GCOV_PROFILE_syscall_64.o := n
|
||||
KCOV_INSTRUMENT_syscall_64.o := n
|
||||
UBSAN_SANITIZE_syscall_64.o := n
|
||||
UBSAN_SANITIZE_vdso.o := n
|
||||
|
||||
# Necessary for booting with kcov enabled on book3e machines
|
||||
|
@ -2411,6 +2411,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
|
||||
GEN_COMMON facility_unavailable
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl facility_unavailable_exception
|
||||
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM facility_unavailable
|
||||
@ -2440,6 +2441,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
|
||||
GEN_COMMON h_facility_unavailable
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl facility_unavailable_exception
|
||||
REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
|
||||
b interrupt_return
|
||||
|
||||
GEN_KVM h_facility_unavailable
|
||||
|
@ -3,7 +3,7 @@
|
||||
#define _ASM_X86_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
void *iommu; /* hook for IOMMU specific extension */
|
||||
#endif
|
||||
};
|
||||
|
@ -74,7 +74,7 @@
|
||||
#define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT)
|
||||
|
||||
/* 4GB broken PCI/AGP hardware bus master zone */
|
||||
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
|
||||
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* The maximum address that we can perform a DMA transfer to on this platform */
|
||||
|
@ -17,7 +17,7 @@ struct task_struct;
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
void io_bitmap_share(struct task_struct *tsk);
|
||||
void io_bitmap_exit(void);
|
||||
void io_bitmap_exit(struct task_struct *tsk);
|
||||
|
||||
void native_tss_update_io_bitmap(void);
|
||||
|
||||
@ -29,7 +29,7 @@ void native_tss_update_io_bitmap(void);
|
||||
|
||||
#else
|
||||
static inline void io_bitmap_share(struct task_struct *tsk) { }
|
||||
static inline void io_bitmap_exit(void) { }
|
||||
static inline void io_bitmap_exit(struct task_struct *tsk) { }
|
||||
static inline void tss_update_io_bitmap(void) { }
|
||||
#endif
|
||||
|
||||
|
@ -2,8 +2,15 @@
|
||||
#ifndef _UAPI_ASM_X86_UNISTD_H
|
||||
#define _UAPI_ASM_X86_UNISTD_H
|
||||
|
||||
/* x32 syscall flag bit */
|
||||
#define __X32_SYSCALL_BIT 0x40000000UL
|
||||
/*
|
||||
* x32 syscall flag bit. Some user programs expect syscall NR macros
|
||||
* and __X32_SYSCALL_BIT to have type int, even though syscall numbers
|
||||
* are, for practical purposes, unsigned long.
|
||||
*
|
||||
* Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right
|
||||
* thing regardless.
|
||||
*/
|
||||
#define __X32_SYSCALL_BIT 0x40000000
|
||||
|
||||
#ifndef __KERNEL__
|
||||
# ifdef __i386__
|
||||
|
@ -957,18 +957,31 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is similar to user_regset_copyout(), but will not add offset to
|
||||
* the source data pointer or increment pos, count, kbuf, and ubuf.
|
||||
*/
|
||||
static inline void
|
||||
__copy_xstate_to_kernel(void *kbuf, const void *data,
|
||||
unsigned int offset, unsigned int size, unsigned int size_total)
|
||||
static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
|
||||
{
|
||||
if (offset < size_total) {
|
||||
unsigned int copy = min(size, size_total - offset);
|
||||
if (*pos < to) {
|
||||
unsigned size = to - *pos;
|
||||
|
||||
memcpy(kbuf + offset, data, copy);
|
||||
if (size > *count)
|
||||
size = *count;
|
||||
memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size);
|
||||
*kbuf += size;
|
||||
*pos += size;
|
||||
*count -= size;
|
||||
}
|
||||
}
|
||||
|
||||
static void copy_part(unsigned offset, unsigned size, void *from,
|
||||
void **kbuf, unsigned *pos, unsigned *count)
|
||||
{
|
||||
fill_gap(offset, kbuf, pos, count);
|
||||
if (size > *count)
|
||||
size = *count;
|
||||
if (size) {
|
||||
memcpy(*kbuf, from, size);
|
||||
*kbuf += size;
|
||||
*pos += size;
|
||||
*count -= size;
|
||||
}
|
||||
}
|
||||
|
||||
@ -981,8 +994,9 @@ __copy_xstate_to_kernel(void *kbuf, const void *data,
|
||||
*/
|
||||
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
|
||||
{
|
||||
unsigned int offset, size;
|
||||
struct xstate_header header;
|
||||
const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
|
||||
unsigned count = size_total;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@ -998,46 +1012,42 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
|
||||
header.xfeatures = xsave->header.xfeatures;
|
||||
header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
|
||||
|
||||
if (header.xfeatures & XFEATURE_MASK_FP)
|
||||
copy_part(0, off_mxcsr,
|
||||
&xsave->i387, &kbuf, &offset_start, &count);
|
||||
if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
|
||||
copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE,
|
||||
&xsave->i387.mxcsr, &kbuf, &offset_start, &count);
|
||||
if (header.xfeatures & XFEATURE_MASK_FP)
|
||||
copy_part(offsetof(struct fxregs_state, st_space), 128,
|
||||
&xsave->i387.st_space, &kbuf, &offset_start, &count);
|
||||
if (header.xfeatures & XFEATURE_MASK_SSE)
|
||||
copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
|
||||
&xsave->i387.xmm_space, &kbuf, &offset_start, &count);
|
||||
/*
|
||||
* Fill xsave->i387.sw_reserved value for ptrace frame:
|
||||
*/
|
||||
copy_part(offsetof(struct fxregs_state, sw_reserved), 48,
|
||||
xstate_fx_sw_bytes, &kbuf, &offset_start, &count);
|
||||
/*
|
||||
* Copy xregs_state->header:
|
||||
*/
|
||||
offset = offsetof(struct xregs_state, header);
|
||||
size = sizeof(header);
|
||||
copy_part(offsetof(struct xregs_state, header), sizeof(header),
|
||||
&header, &kbuf, &offset_start, &count);
|
||||
|
||||
__copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
|
||||
|
||||
for (i = 0; i < XFEATURE_MAX; i++) {
|
||||
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
|
||||
/*
|
||||
* Copy only in-use xstates:
|
||||
*/
|
||||
if ((header.xfeatures >> i) & 1) {
|
||||
void *src = __raw_xsave_addr(xsave, i);
|
||||
|
||||
offset = xstate_offsets[i];
|
||||
size = xstate_sizes[i];
|
||||
|
||||
/* The next component has to fit fully into the output buffer: */
|
||||
if (offset + size > size_total)
|
||||
break;
|
||||
|
||||
__copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
|
||||
copy_part(xstate_offsets[i], xstate_sizes[i],
|
||||
src, &kbuf, &offset_start, &count);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (xfeatures_mxcsr_quirk(header.xfeatures)) {
|
||||
offset = offsetof(struct fxregs_state, mxcsr);
|
||||
size = MXCSR_AND_FLAGS_SIZE;
|
||||
__copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill xsave->i387.sw_reserved value for ptrace frame:
|
||||
*/
|
||||
offset = offsetof(struct fxregs_state, sw_reserved);
|
||||
size = sizeof(xstate_fx_sw_bytes);
|
||||
|
||||
__copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
|
||||
fill_gap(size_total, &kbuf, &offset_start, &count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -33,15 +33,15 @@ void io_bitmap_share(struct task_struct *tsk)
|
||||
set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
|
||||
}
|
||||
|
||||
static void task_update_io_bitmap(void)
|
||||
static void task_update_io_bitmap(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_struct *t = ¤t->thread;
|
||||
struct thread_struct *t = &tsk->thread;
|
||||
|
||||
if (t->iopl_emul == 3 || t->io_bitmap) {
|
||||
/* TSS update is handled on exit to user space */
|
||||
set_thread_flag(TIF_IO_BITMAP);
|
||||
set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
|
||||
} else {
|
||||
clear_thread_flag(TIF_IO_BITMAP);
|
||||
clear_tsk_thread_flag(tsk, TIF_IO_BITMAP);
|
||||
/* Invalidate TSS */
|
||||
preempt_disable();
|
||||
tss_update_io_bitmap();
|
||||
@ -49,12 +49,12 @@ static void task_update_io_bitmap(void)
|
||||
}
|
||||
}
|
||||
|
||||
void io_bitmap_exit(void)
|
||||
void io_bitmap_exit(struct task_struct *tsk)
|
||||
{
|
||||
struct io_bitmap *iobm = current->thread.io_bitmap;
|
||||
struct io_bitmap *iobm = tsk->thread.io_bitmap;
|
||||
|
||||
current->thread.io_bitmap = NULL;
|
||||
task_update_io_bitmap();
|
||||
tsk->thread.io_bitmap = NULL;
|
||||
task_update_io_bitmap(tsk);
|
||||
if (iobm && refcount_dec_and_test(&iobm->refcnt))
|
||||
kfree(iobm);
|
||||
}
|
||||
@ -102,7 +102,7 @@ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
|
||||
if (!iobm)
|
||||
return -ENOMEM;
|
||||
refcount_set(&iobm->refcnt, 1);
|
||||
io_bitmap_exit();
|
||||
io_bitmap_exit(current);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -134,7 +134,7 @@ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
|
||||
}
|
||||
/* All permissions dropped? */
|
||||
if (max_long == UINT_MAX) {
|
||||
io_bitmap_exit();
|
||||
io_bitmap_exit(current);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -192,7 +192,7 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
|
||||
}
|
||||
|
||||
t->iopl_emul = level;
|
||||
task_update_io_bitmap();
|
||||
task_update_io_bitmap(current);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
* Free thread data structures etc..
|
||||
*/
|
||||
void exit_thread(struct task_struct *tsk)
|
||||
{
|
||||
@ -104,7 +104,7 @@ void exit_thread(struct task_struct *tsk)
|
||||
struct fpu *fpu = &t->fpu;
|
||||
|
||||
if (test_thread_flag(TIF_IO_BITMAP))
|
||||
io_bitmap_exit();
|
||||
io_bitmap_exit(tsk);
|
||||
|
||||
free_vm86(t);
|
||||
|
||||
|
@ -891,14 +891,11 @@ generic_make_request_checks(struct bio *bio)
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-mq queues do not honor REQ_NOWAIT, so complete a bio
|
||||
* with BLK_STS_AGAIN status in order to catch -EAGAIN and
|
||||
* to give a chance to the caller to repeat request gracefully.
|
||||
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
|
||||
* if queue is not a request based queue.
|
||||
*/
|
||||
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
|
||||
status = BLK_STS_AGAIN;
|
||||
goto end_io;
|
||||
}
|
||||
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
|
||||
goto not_supported;
|
||||
|
||||
if (should_fail_bio(bio))
|
||||
goto end_io;
|
||||
|
@ -377,6 +377,7 @@ config SM_GCC_8150
|
||||
|
||||
config SM_GCC_8250
|
||||
tristate "SM8250 Global Clock Controller"
|
||||
select QCOM_GDSC
|
||||
help
|
||||
Support for the global clock controller on SM8250 devices.
|
||||
Say Y if you want to use peripheral devices such as UART,
|
||||
|
@ -76,8 +76,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
|
||||
.clkr.hw.init = &(struct clk_init_data){
|
||||
.name = "gpll0_out_even",
|
||||
.parent_data = &(const struct clk_parent_data){
|
||||
.fw_name = "bi_tcxo",
|
||||
.name = "bi_tcxo",
|
||||
.hw = &gpll0.clkr.hw,
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_trion_pll_postdiv_ops,
|
||||
|
@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp)
|
||||
make_tx_data_wr(sk, skb, immdlen, len,
|
||||
credits_needed, completion);
|
||||
tp->snd_nxt += len;
|
||||
tp->lsndtime = tcp_time_stamp(tp);
|
||||
tp->lsndtime = tcp_jiffies32;
|
||||
if (completion)
|
||||
ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
|
||||
} else {
|
||||
|
@ -625,7 +625,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
|
||||
|
||||
kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(kona_gpio->reg_base)) {
|
||||
ret = -ENXIO;
|
||||
ret = PTR_ERR(kona_gpio->reg_base);
|
||||
goto err_irq_domain;
|
||||
}
|
||||
|
||||
|
@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev)
|
||||
mutex_init(&exar_gpio->lock);
|
||||
|
||||
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
|
||||
if (index < 0)
|
||||
goto err_destroy;
|
||||
if (index < 0) {
|
||||
ret = index;
|
||||
goto err_mutex_destroy;
|
||||
}
|
||||
|
||||
sprintf(exar_gpio->name, "exar_gpio%d", index);
|
||||
exar_gpio->gpio_chip.label = exar_gpio->name;
|
||||
@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
|
||||
|
||||
err_destroy:
|
||||
ida_simple_remove(&ida_index, index);
|
||||
err_mutex_destroy:
|
||||
mutex_destroy(&exar_gpio->lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -127,8 +127,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
|
||||
{
|
||||
u32 arm_gpio_lock_val;
|
||||
|
||||
spin_lock(&gs->gc.bgpio_lock);
|
||||
mutex_lock(yu_arm_gpio_lock_param.lock);
|
||||
spin_lock(&gs->gc.bgpio_lock);
|
||||
|
||||
arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
|
||||
|
||||
@ -136,8 +136,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
|
||||
* When lock active bit[31] is set, ModeX is write enabled
|
||||
*/
|
||||
if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
|
||||
mutex_unlock(yu_arm_gpio_lock_param.lock);
|
||||
spin_unlock(&gs->gc.bgpio_lock);
|
||||
mutex_unlock(yu_arm_gpio_lock_param.lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -152,8 +152,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
|
||||
static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
|
||||
{
|
||||
writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
|
||||
mutex_unlock(yu_arm_gpio_lock_param.lock);
|
||||
spin_unlock(&gs->gc.bgpio_lock);
|
||||
mutex_unlock(yu_arm_gpio_lock_param.lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -782,6 +782,15 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
|
||||
"marvell,armada-370-gpio"))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* There are only two sets of PWM configuration registers for
|
||||
* all the GPIO lines on those SoCs which this driver reserves
|
||||
* for the first two GPIO chips. So if the resource is missing
|
||||
* we can't treat it as an error.
|
||||
*/
|
||||
if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
|
||||
return 0;
|
||||
|
||||
if (IS_ERR(mvchip->clk))
|
||||
return PTR_ERR(mvchip->clk);
|
||||
|
||||
@ -804,12 +813,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
|
||||
mvchip->mvpwm = mvpwm;
|
||||
mvpwm->mvchip = mvchip;
|
||||
|
||||
/*
|
||||
* There are only two sets of PWM configuration registers for
|
||||
* all the GPIO lines on those SoCs which this driver reserves
|
||||
* for the first two GPIO chips. So if the resource is missing
|
||||
* we can't treat it as an error.
|
||||
*/
|
||||
mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
|
||||
if (IS_ERR(mvpwm->membase))
|
||||
return PTR_ERR(mvpwm->membase);
|
||||
|
@ -660,8 +660,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
|
||||
pchip->irq1 = irq1;
|
||||
|
||||
gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (!gpio_reg_base)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(gpio_reg_base))
|
||||
return PTR_ERR(gpio_reg_base);
|
||||
|
||||
clk = clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(clk)) {
|
||||
|
@ -729,6 +729,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
|
||||
if (ret)
|
||||
goto out_free_descs;
|
||||
}
|
||||
|
||||
atomic_notifier_call_chain(&desc->gdev->notifier,
|
||||
GPIOLINE_CHANGED_REQUESTED, desc);
|
||||
|
||||
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
|
||||
offset);
|
||||
}
|
||||
@ -1083,6 +1087,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
||||
if (ret)
|
||||
goto out_free_desc;
|
||||
|
||||
atomic_notifier_call_chain(&desc->gdev->notifier,
|
||||
GPIOLINE_CHANGED_REQUESTED, desc);
|
||||
|
||||
le->irq = gpiod_to_irq(desc);
|
||||
if (le->irq <= 0) {
|
||||
ret = -ENODEV;
|
||||
@ -2998,8 +3005,6 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
|
||||
}
|
||||
done:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
atomic_notifier_call_chain(&desc->gdev->notifier,
|
||||
GPIOLINE_CHANGED_REQUESTED, desc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4215,7 +4220,9 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
|
||||
}
|
||||
}
|
||||
|
||||
if (test_bit(FLAG_IS_OUT, &desc->flags)) {
|
||||
/* To be valid for IRQ the line needs to be input or open drain */
|
||||
if (test_bit(FLAG_IS_OUT, &desc->flags) &&
|
||||
!test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
|
||||
chip_err(gc,
|
||||
"%s: tried to flag a GPIO set as output for IRQ\n",
|
||||
__func__);
|
||||
@ -4278,7 +4285,12 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
|
||||
|
||||
if (!IS_ERR(desc) &&
|
||||
!WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
|
||||
WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags));
|
||||
/*
|
||||
* We must not be output when using IRQ UNLESS we are
|
||||
* open drain.
|
||||
*/
|
||||
WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
|
||||
!test_bit(FLAG_OPEN_DRAIN, &desc->flags));
|
||||
set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
|
||||
}
|
||||
}
|
||||
@ -4961,6 +4973,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
atomic_notifier_call_chain(&desc->gdev->notifier,
|
||||
GPIOLINE_CHANGED_REQUESTED, desc);
|
||||
|
||||
return desc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_get_index);
|
||||
@ -5026,6 +5041,9 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
atomic_notifier_call_chain(&desc->gdev->notifier,
|
||||
GPIOLINE_CHANGED_REQUESTED, desc);
|
||||
|
||||
return desc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
|
||||
|
@ -1050,7 +1050,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev);
|
||||
/* Check with device cgroup if @kfd device is accessible */
|
||||
static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
|
||||
{
|
||||
#if defined(CONFIG_CGROUP_DEVICE)
|
||||
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
|
||||
struct drm_device *ddev = kfd->ddev;
|
||||
|
||||
return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
|
||||
|
@ -7880,13 +7880,6 @@ static int dm_update_plane_state(struct dc *dc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
|
||||
new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
|
||||
DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
|
||||
new_plane_state->crtc_x, new_plane_state->crtc_y);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1684,6 +1684,8 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
return;
|
||||
|
||||
/* Stall out until the cursor update completes. */
|
||||
if (vupdate_end < vupdate_start)
|
||||
vupdate_end += stream->timing.v_total;
|
||||
us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
|
||||
udelay(us_to_vupdate + us_vupdate);
|
||||
}
|
||||
|
@ -328,8 +328,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
if (!drm_atomic_crtc_needs_modeset(state))
|
||||
return 0;
|
||||
|
||||
if (state->mode.hdisplay > priv->soc_info->max_height ||
|
||||
state->mode.vdisplay > priv->soc_info->max_width)
|
||||
if (state->mode.hdisplay > priv->soc_info->max_width ||
|
||||
state->mode.vdisplay > priv->soc_info->max_height)
|
||||
return -EINVAL;
|
||||
|
||||
rate = clk_round_rate(priv->pix_clk,
|
||||
@ -474,7 +474,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
|
||||
static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct ingenic_drm *priv = arg;
|
||||
struct ingenic_drm *priv = drm_device_get_priv(arg);
|
||||
unsigned int state;
|
||||
|
||||
regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
|
||||
|
@ -153,9 +153,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
|
||||
uobj->context = NULL;
|
||||
|
||||
/*
|
||||
* For DESTROY the usecnt is held write locked, the caller is expected
|
||||
* to put it unlock and put the object when done with it. Only DESTROY
|
||||
* can remove the IDR handle.
|
||||
* For DESTROY the usecnt is not changed, the caller is expected to
|
||||
* manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
|
||||
* handle.
|
||||
*/
|
||||
if (reason != RDMA_REMOVE_DESTROY)
|
||||
atomic_set(&uobj->usecnt, 0);
|
||||
@ -187,7 +187,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
|
||||
/*
|
||||
* This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
|
||||
* sequence. It should only be used from command callbacks. On success the
|
||||
* caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
|
||||
* caller must pair this with uobj_put_destroy(). This
|
||||
* version requires the caller to have already obtained an
|
||||
* LOOKUP_DESTROY uobject kref.
|
||||
*/
|
||||
@ -198,6 +198,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
|
||||
|
||||
down_read(&ufile->hw_destroy_rwsem);
|
||||
|
||||
/*
|
||||
* Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
|
||||
* write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
|
||||
* This is because any other concurrent thread can still see the object
|
||||
* in the xarray due to RCU. Leaving it locked ensures nothing else will
|
||||
* touch it.
|
||||
*/
|
||||
ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
@ -216,7 +223,7 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
|
||||
/*
|
||||
* uobj_get_destroy destroys the HW object and returns a handle to the uobj
|
||||
* with a NULL object pointer. The caller must pair this with
|
||||
* uverbs_put_destroy.
|
||||
* uobj_put_destroy().
|
||||
*/
|
||||
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
|
||||
u32 id, struct uverbs_attr_bundle *attrs)
|
||||
@ -250,8 +257,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
|
||||
uobj = __uobj_get_destroy(obj, id, attrs);
|
||||
if (IS_ERR(uobj))
|
||||
return PTR_ERR(uobj);
|
||||
|
||||
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
|
||||
uobj_put_destroy(uobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1439,6 +1439,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
|
||||
if (is_odp_mr(mr)) {
|
||||
to_ib_umem_odp(mr->umem)->private = mr;
|
||||
init_waitqueue_head(&mr->q_deferred_work);
|
||||
atomic_set(&mr->num_deferred_work, 0);
|
||||
err = xa_err(xa_store(&dev->odp_mkeys,
|
||||
mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
|
||||
|
@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
qib_dev_err(dd,
|
||||
"Skipping linkcontrol sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail;
|
||||
goto bail_link;
|
||||
}
|
||||
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
|
||||
|
||||
@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
qib_dev_err(dd,
|
||||
"Skipping sl2vl sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_link;
|
||||
goto bail_sl;
|
||||
}
|
||||
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
|
||||
|
||||
@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
qib_dev_err(dd,
|
||||
"Skipping diag_counters sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_sl;
|
||||
goto bail_diagc;
|
||||
}
|
||||
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
|
||||
|
||||
@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
qib_dev_err(dd,
|
||||
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_diagc;
|
||||
goto bail_cc;
|
||||
}
|
||||
|
||||
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
|
||||
@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
|
||||
&cc_table_bin_attr);
|
||||
kobject_put(&ppd->pport_cc_kobj);
|
||||
}
|
||||
kobject_put(&ppd->diagc_kobj);
|
||||
kobject_put(&ppd->sl2vl_kobj);
|
||||
kobject_put(&ppd->pport_kobj);
|
||||
}
|
||||
|
@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
|
||||
!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
|
||||
dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_device;
|
||||
goto err_disable_pdev;
|
||||
}
|
||||
|
||||
ret = pci_request_regions(pdev, DRV_NAME);
|
||||
|
@ -377,8 +377,12 @@ struct ipoib_dev_priv {
|
||||
struct ipoib_rx_buf *rx_ring;
|
||||
|
||||
struct ipoib_tx_buf *tx_ring;
|
||||
/* cyclic ring variables for managing tx_ring, for UD only */
|
||||
unsigned int tx_head;
|
||||
unsigned int tx_tail;
|
||||
/* cyclic ring variables for counting overall outstanding send WRs */
|
||||
unsigned int global_tx_head;
|
||||
unsigned int global_tx_tail;
|
||||
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
|
||||
struct ib_ud_wr tx_wr;
|
||||
struct ib_wc send_wc[MAX_SEND_CQE];
|
||||
|
@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||
return;
|
||||
}
|
||||
|
||||
if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
|
||||
if ((priv->global_tx_head - priv->global_tx_tail) ==
|
||||
ipoib_sendq_size - 1) {
|
||||
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
|
||||
tx->qp->qp_num);
|
||||
netif_stop_queue(dev);
|
||||
@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||
} else {
|
||||
netif_trans_update(dev);
|
||||
++tx->tx_head;
|
||||
++priv->tx_head;
|
||||
++priv->global_tx_head;
|
||||
}
|
||||
}
|
||||
|
||||
@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
||||
netif_tx_lock(dev);
|
||||
|
||||
++tx->tx_tail;
|
||||
++priv->tx_tail;
|
||||
++priv->global_tx_tail;
|
||||
|
||||
if (unlikely(netif_queue_stopped(dev) &&
|
||||
(priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
|
||||
((priv->global_tx_head - priv->global_tx_tail) <=
|
||||
ipoib_sendq_size >> 1) &&
|
||||
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
|
||||
netif_wake_queue(dev);
|
||||
|
||||
@ -1232,8 +1234,9 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
|
||||
dev_kfree_skb_any(tx_req->skb);
|
||||
netif_tx_lock_bh(p->dev);
|
||||
++p->tx_tail;
|
||||
++priv->tx_tail;
|
||||
if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
|
||||
++priv->global_tx_tail;
|
||||
if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
|
||||
ipoib_sendq_size >> 1) &&
|
||||
netif_queue_stopped(p->dev) &&
|
||||
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
|
||||
netif_wake_queue(p->dev);
|
||||
|
@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
||||
dev_kfree_skb_any(tx_req->skb);
|
||||
|
||||
++priv->tx_tail;
|
||||
++priv->global_tx_tail;
|
||||
|
||||
if (unlikely(netif_queue_stopped(dev) &&
|
||||
((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
|
||||
((priv->global_tx_head - priv->global_tx_tail) <=
|
||||
ipoib_sendq_size >> 1) &&
|
||||
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
|
||||
netif_wake_queue(dev);
|
||||
|
||||
@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||
else
|
||||
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
|
||||
/* increase the tx_head after send success, but use it for queue state */
|
||||
if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
|
||||
if ((priv->global_tx_head - priv->global_tx_tail) ==
|
||||
ipoib_sendq_size - 1) {
|
||||
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
|
||||
netif_stop_queue(dev);
|
||||
}
|
||||
@ -662,6 +665,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||
|
||||
rc = priv->tx_head;
|
||||
++priv->tx_head;
|
||||
++priv->global_tx_head;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
|
||||
ipoib_dma_unmap_tx(priv, tx_req);
|
||||
dev_kfree_skb_any(tx_req->skb);
|
||||
++priv->tx_tail;
|
||||
++priv->global_tx_tail;
|
||||
}
|
||||
|
||||
for (i = 0; i < ipoib_recvq_size; ++i) {
|
||||
|
@ -1184,9 +1184,11 @@ static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
|
||||
|
||||
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
|
||||
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
|
||||
ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
|
||||
netif_queue_stopped(dev),
|
||||
priv->tx_head, priv->tx_tail);
|
||||
ipoib_warn(priv,
|
||||
"queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
|
||||
netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
|
||||
priv->global_tx_head, priv->global_tx_tail);
|
||||
|
||||
/* XXX reset QP, etc. */
|
||||
}
|
||||
|
||||
@ -1701,7 +1703,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
|
||||
goto out_rx_ring_cleanup;
|
||||
}
|
||||
|
||||
/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
|
||||
/* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
|
||||
|
||||
if (ipoib_transport_dev_init(dev, priv->ca)) {
|
||||
pr_warn("%s: ipoib_transport_dev_init failed\n",
|
||||
|
@ -326,20 +326,6 @@ static int evdev_fasync(int fd, struct file *file, int on)
|
||||
return fasync_helper(fd, file, on, &client->fasync);
|
||||
}
|
||||
|
||||
static int evdev_flush(struct file *file, fl_owner_t id)
|
||||
{
|
||||
struct evdev_client *client = file->private_data;
|
||||
struct evdev *evdev = client->evdev;
|
||||
|
||||
mutex_lock(&evdev->mutex);
|
||||
|
||||
if (evdev->exist && !client->revoked)
|
||||
input_flush_device(&evdev->handle, file);
|
||||
|
||||
mutex_unlock(&evdev->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void evdev_free(struct device *dev)
|
||||
{
|
||||
struct evdev *evdev = container_of(dev, struct evdev, dev);
|
||||
@ -453,6 +439,10 @@ static int evdev_release(struct inode *inode, struct file *file)
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&evdev->mutex);
|
||||
|
||||
if (evdev->exist && !client->revoked)
|
||||
input_flush_device(&evdev->handle, file);
|
||||
|
||||
evdev_ungrab(evdev, client);
|
||||
mutex_unlock(&evdev->mutex);
|
||||
|
||||
@ -1310,7 +1300,6 @@ static const struct file_operations evdev_fops = {
|
||||
.compat_ioctl = evdev_ioctl_compat,
|
||||
#endif
|
||||
.fasync = evdev_fasync,
|
||||
.flush = evdev_flush,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
|
@ -458,6 +458,16 @@ static const u8 xboxone_fw2015_init[] = {
|
||||
0x05, 0x20, 0x00, 0x01, 0x00
|
||||
};
|
||||
|
||||
/*
|
||||
* This packet is required for Xbox One S (0x045e:0x02ea)
|
||||
* and Xbox One Elite Series 2 (0x045e:0x0b00) pads to
|
||||
* initialize the controller that was previously used in
|
||||
* Bluetooth mode.
|
||||
*/
|
||||
static const u8 xboxone_s_init[] = {
|
||||
0x05, 0x20, 0x00, 0x0f, 0x06
|
||||
};
|
||||
|
||||
/*
|
||||
* This packet is required for the Titanfall 2 Xbox One pads
|
||||
* (0x0e6f:0x0165) to finish initialization and for Hori pads
|
||||
@ -516,6 +526,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
|
||||
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
|
||||
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
|
||||
XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
|
||||
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
|
||||
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
|
||||
|
@ -186,7 +186,7 @@ struct touchpad_protocol {
|
||||
u8 number_of_fingers;
|
||||
u8 clicked2;
|
||||
u8 unknown3[16];
|
||||
struct tp_finger fingers[0];
|
||||
struct tp_finger fingers[];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -347,18 +347,14 @@ static int cros_ec_keyb_info(struct cros_ec_device *ec_dev,
|
||||
params->info_type = info_type;
|
||||
params->event_type = event_type;
|
||||
|
||||
ret = cros_ec_cmd_xfer(ec_dev, msg);
|
||||
if (ret < 0) {
|
||||
dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n",
|
||||
(int)info_type, (int)event_type, ret);
|
||||
} else if (msg->result == EC_RES_INVALID_VERSION) {
|
||||
ret = cros_ec_cmd_xfer_status(ec_dev, msg);
|
||||
if (ret == -ENOTSUPP) {
|
||||
/* With older ECs we just return 0 for everything */
|
||||
memset(result, 0, result_size);
|
||||
ret = 0;
|
||||
} else if (msg->result != EC_RES_SUCCESS) {
|
||||
dev_warn(ec_dev->dev, "Error getting info %d/%d: %d\n",
|
||||
(int)info_type, (int)event_type, msg->result);
|
||||
ret = -EPROTO;
|
||||
} else if (ret < 0) {
|
||||
dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n",
|
||||
(int)info_type, (int)event_type, ret);
|
||||
} else if (ret != result_size) {
|
||||
dev_warn(ec_dev->dev, "Wrong size %d/%d: %d != %zu\n",
|
||||
(int)info_type, (int)event_type,
|
||||
|
@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match);
|
||||
|
||||
static struct i2c_driver dir685_tk_i2c_driver = {
|
||||
.driver = {
|
||||
.name = "dlin-dir685-touchkeys",
|
||||
.name = "dlink-dir685-touchkeys",
|
||||
.of_match_table = of_match_ptr(dir685_tk_of_match),
|
||||
},
|
||||
.probe = dir685_tk_probe,
|
||||
|
@ -205,8 +205,11 @@ ATTRIBUTE_GROUPS(axp20x);
|
||||
|
||||
static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
|
||||
{
|
||||
struct input_dev *idev = pwr;
|
||||
struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
|
||||
struct axp20x_pek *axp20x_pek = pwr;
|
||||
struct input_dev *idev = axp20x_pek->input;
|
||||
|
||||
if (!idev)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/*
|
||||
* The power-button is connected to ground so a falling edge (dbf)
|
||||
@ -225,22 +228,9 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
|
||||
static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct axp20x_dev *axp20x = axp20x_pek->axp20x;
|
||||
struct input_dev *idev;
|
||||
int error;
|
||||
|
||||
axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
|
||||
if (axp20x_pek->irq_dbr < 0)
|
||||
return axp20x_pek->irq_dbr;
|
||||
axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc,
|
||||
axp20x_pek->irq_dbr);
|
||||
|
||||
axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
|
||||
if (axp20x_pek->irq_dbf < 0)
|
||||
return axp20x_pek->irq_dbf;
|
||||
axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc,
|
||||
axp20x_pek->irq_dbf);
|
||||
|
||||
axp20x_pek->input = devm_input_allocate_device(&pdev->dev);
|
||||
if (!axp20x_pek->input)
|
||||
return -ENOMEM;
|
||||
@ -255,24 +245,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
|
||||
|
||||
input_set_drvdata(idev, axp20x_pek);
|
||||
|
||||
error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
|
||||
axp20x_pek_irq, 0,
|
||||
"axp20x-pek-dbr", idev);
|
||||
if (error < 0) {
|
||||
dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
|
||||
axp20x_pek->irq_dbr, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
|
||||
axp20x_pek_irq, 0,
|
||||
"axp20x-pek-dbf", idev);
|
||||
if (error < 0) {
|
||||
dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
|
||||
axp20x_pek->irq_dbf, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = input_register_device(idev);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "Can't register input device: %d\n",
|
||||
@ -280,8 +252,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
|
||||
return error;
|
||||
}
|
||||
|
||||
device_init_wakeup(&pdev->dev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -339,6 +309,18 @@ static int axp20x_pek_probe(struct platform_device *pdev)
|
||||
|
||||
axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
|
||||
|
||||
axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
|
||||
if (axp20x_pek->irq_dbr < 0)
|
||||
return axp20x_pek->irq_dbr;
|
||||
axp20x_pek->irq_dbr = regmap_irq_get_virq(
|
||||
axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr);
|
||||
|
||||
axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
|
||||
if (axp20x_pek->irq_dbf < 0)
|
||||
return axp20x_pek->irq_dbf;
|
||||
axp20x_pek->irq_dbf = regmap_irq_get_virq(
|
||||
axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
|
||||
|
||||
if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
|
||||
error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
|
||||
if (error)
|
||||
@ -347,6 +329,26 @@ static int axp20x_pek_probe(struct platform_device *pdev)
|
||||
|
||||
axp20x_pek->info = (struct axp20x_info *)match->driver_data;
|
||||
|
||||
error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
|
||||
axp20x_pek_irq, 0,
|
||||
"axp20x-pek-dbr", axp20x_pek);
|
||||
if (error < 0) {
|
||||
dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
|
||||
axp20x_pek->irq_dbr, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
|
||||
axp20x_pek_irq, 0,
|
||||
"axp20x-pek-dbf", axp20x_pek);
|
||||
if (error < 0) {
|
||||
dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
|
||||
axp20x_pek->irq_dbf, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
device_init_wakeup(&pdev->dev, true);
|
||||
|
||||
platform_set_drvdata(pdev, axp20x_pek);
|
||||
|
||||
return 0;
|
||||
|
@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
|
||||
"LEN005b", /* P50 */
|
||||
"LEN005e", /* T560 */
|
||||
"LEN006c", /* T470s */
|
||||
"LEN007a", /* T470s */
|
||||
"LEN0071", /* T480 */
|
||||
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
|
||||
"LEN0073", /* X1 Carbon G5 (Elantech) */
|
||||
|
@ -205,7 +205,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
|
||||
|
||||
if (count) {
|
||||
kfree(attn_data.data);
|
||||
attn_data.data = NULL;
|
||||
drvdata->attn_data.data = NULL;
|
||||
}
|
||||
|
||||
if (!kfifo_is_empty(&drvdata->attn_fifo))
|
||||
@ -1210,7 +1210,8 @@ static int rmi_driver_probe(struct device *dev)
|
||||
if (data->input) {
|
||||
rmi_driver_set_input_name(rmi_dev, data->input);
|
||||
if (!rmi_dev->xport->input) {
|
||||
if (input_register_device(data->input)) {
|
||||
retval = input_register_device(data->input);
|
||||
if (retval) {
|
||||
dev_err(dev, "%s: Failed to register input device.\n",
|
||||
__func__);
|
||||
goto err_destroy_functions;
|
||||
|
@ -662,6 +662,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad Twist S230u */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/interrupt.h>
|
||||
@ -73,6 +74,7 @@
|
||||
#define FW_POS_STATE 1
|
||||
#define FW_POS_TOTAL 2
|
||||
#define FW_POS_XY 3
|
||||
#define FW_POS_TOOL_TYPE 33
|
||||
#define FW_POS_CHECKSUM 34
|
||||
#define FW_POS_WIDTH 35
|
||||
#define FW_POS_PRESSURE 45
|
||||
@ -842,6 +844,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
|
||||
{
|
||||
struct input_dev *input = ts->input;
|
||||
unsigned int n_fingers;
|
||||
unsigned int tool_type;
|
||||
u16 finger_state;
|
||||
int i;
|
||||
|
||||
@ -852,6 +855,10 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
|
||||
dev_dbg(&ts->client->dev,
|
||||
"n_fingers: %u, state: %04x\n", n_fingers, finger_state);
|
||||
|
||||
/* Note: all fingers have the same tool type */
|
||||
tool_type = buf[FW_POS_TOOL_TYPE] & BIT(0) ?
|
||||
MT_TOOL_FINGER : MT_TOOL_PALM;
|
||||
|
||||
for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) {
|
||||
if (finger_state & 1) {
|
||||
unsigned int x, y, p, w;
|
||||
@ -867,7 +874,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
|
||||
i, x, y, p, w);
|
||||
|
||||
input_mt_slot(input, i);
|
||||
input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
|
||||
input_mt_report_slot_state(input, tool_type, true);
|
||||
input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
|
||||
input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
|
||||
input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
|
||||
@ -1307,6 +1314,8 @@ static int elants_i2c_probe(struct i2c_client *client,
|
||||
input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
|
||||
input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
|
||||
input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
|
||||
input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE,
|
||||
0, MT_TOOL_PALM, 0, 0);
|
||||
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
|
||||
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
|
||||
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
|
||||
|
@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg,
|
||||
if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL)
|
||||
BUG();
|
||||
|
||||
/* Write register: use repeated start */
|
||||
/* Write register */
|
||||
xfer[0].addr = client->addr;
|
||||
xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART;
|
||||
xfer[0].flags = client->flags & I2C_M_TEN;
|
||||
xfer[0].len = 1;
|
||||
xfer[0].buf = &buf;
|
||||
|
||||
/* Read data */
|
||||
xfer[1].addr = client->addr;
|
||||
xfer[1].flags = I2C_M_RD;
|
||||
xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
|
||||
xfer[1].len = len;
|
||||
xfer[1].buf = val;
|
||||
|
||||
@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client,
|
||||
const void *match_data;
|
||||
int error;
|
||||
|
||||
if (!i2c_check_functionality(client->adapter,
|
||||
I2C_FUNC_PROTOCOL_MANGLING)) {
|
||||
dev_err(&client->dev,
|
||||
"Need i2c bus that supports protocol mangling\n");
|
||||
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
|
||||
dev_err(&client->dev, "Not supported I2C adapter\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -182,6 +182,7 @@ static const struct usb_device_id usbtouch_devices[] = {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
|
||||
{USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
|
||||
{USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
|
||||
{USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
|
||||
{USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
|
||||
|
@ -510,7 +510,7 @@ struct iommu_group *iommu_group_alloc(void)
|
||||
NULL, "%d", group->id);
|
||||
if (ret) {
|
||||
ida_simple_remove(&iommu_group_ida, group->id);
|
||||
kfree(group);
|
||||
kobject_put(&group->kobj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -2484,8 +2484,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
|
||||
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
|
||||
struct mmc_rpmb_data, chrdev);
|
||||
|
||||
put_device(&rpmb->dev);
|
||||
mmc_blk_put(rpmb->md);
|
||||
put_device(&rpmb->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4000,9 +4000,6 @@ int sdhci_setup_host(struct sdhci_host *host)
|
||||
mmc_hostname(mmc), host->version);
|
||||
}
|
||||
|
||||
if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
|
||||
mmc->caps2 &= ~MMC_CAP2_CQE;
|
||||
|
||||
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
|
||||
host->flags |= SDHCI_USE_SDMA;
|
||||
else if (!(host->caps & SDHCI_CAN_DO_SDMA))
|
||||
@ -4539,6 +4536,12 @@ int __sdhci_add_host(struct sdhci_host *host)
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
int ret;
|
||||
|
||||
if ((mmc->caps2 & MMC_CAP2_CQE) &&
|
||||
(host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
|
||||
mmc->caps2 &= ~MMC_CAP2_CQE;
|
||||
mmc->cqe_ops = NULL;
|
||||
}
|
||||
|
||||
host->complete_wq = alloc_workqueue("sdhci", flags, 0);
|
||||
if (!host->complete_wq)
|
||||
return -ENOMEM;
|
||||
|
@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave)
|
||||
|
||||
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
|
||||
&(slave->dev->dev.kobj), "bonding_slave");
|
||||
if (err)
|
||||
if (err) {
|
||||
kobject_put(&slave->kobj);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (a = slave_attrs; *a; ++a) {
|
||||
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
|
||||
|
@ -103,13 +103,17 @@ static void felix_vlan_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan)
|
||||
{
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
u16 flags = vlan->flags;
|
||||
u16 vid;
|
||||
int err;
|
||||
|
||||
if (dsa_is_cpu_port(ds, port))
|
||||
flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
|
||||
|
||||
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
|
||||
err = ocelot_vlan_add(ocelot, port, vid,
|
||||
vlan->flags & BRIDGE_VLAN_INFO_PVID,
|
||||
vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
|
||||
flags & BRIDGE_VLAN_INFO_PVID,
|
||||
flags & BRIDGE_VLAN_INFO_UNTAGGED);
|
||||
if (err) {
|
||||
dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
|
||||
vid, port, err);
|
||||
|
@ -4176,14 +4176,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
||||
int i, intr_process, rc, tmo_count;
|
||||
struct input *req = msg;
|
||||
u32 *data = msg;
|
||||
__le32 *resp_len;
|
||||
u8 *valid;
|
||||
u16 cp_ring_id, len = 0;
|
||||
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
|
||||
struct hwrm_short_input short_input = {0};
|
||||
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
|
||||
u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
|
||||
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
|
||||
u16 dst = BNXT_HWRM_CHNL_CHIMP;
|
||||
|
||||
@ -4201,7 +4199,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
||||
bar_offset = BNXT_GRCPF_REG_KONG_COMM;
|
||||
doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
|
||||
resp = bp->hwrm_cmd_kong_resp_addr;
|
||||
resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
|
||||
}
|
||||
|
||||
memset(resp, 0, PAGE_SIZE);
|
||||
@ -4270,7 +4267,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
||||
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
|
||||
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
|
||||
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
|
||||
resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
|
||||
|
||||
if (intr_process) {
|
||||
u16 seq_id = bp->hwrm_intr_seq_id;
|
||||
@ -4298,9 +4294,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
||||
le16_to_cpu(req->req_type));
|
||||
return -EBUSY;
|
||||
}
|
||||
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
|
||||
HWRM_RESP_LEN_SFT;
|
||||
valid = resp_addr + len - 1;
|
||||
len = le16_to_cpu(resp->resp_len);
|
||||
valid = ((u8 *)resp) + len - 1;
|
||||
} else {
|
||||
int j;
|
||||
|
||||
@ -4311,8 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
||||
*/
|
||||
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
|
||||
return -EBUSY;
|
||||
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
|
||||
HWRM_RESP_LEN_SFT;
|
||||
len = le16_to_cpu(resp->resp_len);
|
||||
if (len)
|
||||
break;
|
||||
/* on first few passes, just barely sleep */
|
||||
@ -4334,7 +4328,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
||||
}
|
||||
|
||||
/* Last byte of resp contains valid bit */
|
||||
valid = resp_addr + len - 1;
|
||||
valid = ((u8 *)resp) + len - 1;
|
||||
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
|
||||
/* make sure we read from updated DMA memory */
|
||||
dma_rmb();
|
||||
@ -9333,7 +9327,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
|
||||
bnxt_free_skbs(bp);
|
||||
|
||||
/* Save ring stats before shutdown */
|
||||
if (bp->bnapi)
|
||||
if (bp->bnapi && irq_re_init)
|
||||
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
|
||||
if (irq_re_init) {
|
||||
bnxt_free_irq(bp);
|
||||
|
@ -659,11 +659,6 @@ struct nqe_cn {
|
||||
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
|
||||
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
|
||||
#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
|
||||
#define HWRM_RESP_ERR_CODE_MASK 0xffff
|
||||
#define HWRM_RESP_LEN_OFFSET 4
|
||||
#define HWRM_RESP_LEN_MASK 0xffff0000
|
||||
#define HWRM_RESP_LEN_SFT 16
|
||||
#define HWRM_RESP_VALID_MASK 0xff000000
|
||||
#define BNXT_HWRM_REQ_MAX_SIZE 128
|
||||
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
|
||||
BNXT_HWRM_REQ_MAX_SIZE)
|
||||
|
@ -2119,11 +2119,12 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
|
||||
|
||||
bnxt_hwrm_fw_set_time(bp);
|
||||
|
||||
if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
|
||||
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
|
||||
&index, &item_len, NULL) != 0) {
|
||||
rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
|
||||
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
|
||||
&index, &item_len, NULL);
|
||||
if (rc) {
|
||||
netdev_err(dev, "PKG update area not created in nvram\n");
|
||||
return -ENOBUFS;
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = request_firmware(&fw, filename, &dev->dev);
|
||||
|
@ -2914,7 +2914,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
/* Do this here, so we can be verbose early */
|
||||
SET_NETDEV_DEV(net_dev, dev);
|
||||
SET_NETDEV_DEV(net_dev, dev->parent);
|
||||
dev_set_drvdata(dev, net_dev);
|
||||
|
||||
priv = netdev_priv(net_dev);
|
||||
|
@ -4678,12 +4678,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
||||
dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
|
||||
break;
|
||||
}
|
||||
dev_info(dev, "Partner protocol version is %d\n",
|
||||
crq->version_exchange_rsp.version);
|
||||
if (be16_to_cpu(crq->version_exchange_rsp.version) <
|
||||
ibmvnic_version)
|
||||
ibmvnic_version =
|
||||
ibmvnic_version =
|
||||
be16_to_cpu(crq->version_exchange_rsp.version);
|
||||
dev_info(dev, "Partner protocol version is %d\n",
|
||||
ibmvnic_version);
|
||||
send_cap_queries(adapter);
|
||||
break;
|
||||
case QUERY_CAPABILITY_RSP:
|
||||
|
@ -995,10 +995,12 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
|
||||
|
||||
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
|
||||
int num_channels);
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
|
||||
u8 cq_period_mode);
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
|
||||
u8 cq_period_mode);
|
||||
|
||||
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
|
||||
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
|
@ -369,17 +369,19 @@ enum mlx5e_fec_supported_link_mode {
|
||||
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
|
||||
} while (0)
|
||||
|
||||
#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
|
||||
do { \
|
||||
u16 *__policy = &(policy); \
|
||||
bool _write = (write); \
|
||||
\
|
||||
if (_write && *__policy) \
|
||||
*__policy = find_first_bit((u_long *)__policy, \
|
||||
sizeof(u16) * BITS_PER_BYTE);\
|
||||
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
|
||||
if (!_write && *__policy) \
|
||||
*__policy = 1 << *__policy; \
|
||||
#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
|
||||
do { \
|
||||
unsigned long policy_long; \
|
||||
u16 *__policy = &(policy); \
|
||||
bool _write = (write); \
|
||||
\
|
||||
policy_long = *__policy; \
|
||||
if (_write && *__policy) \
|
||||
*__policy = find_first_bit(&policy_long, \
|
||||
sizeof(policy_long) * BITS_PER_BYTE);\
|
||||
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
|
||||
if (!_write && *__policy) \
|
||||
*__policy = 1 << *__policy; \
|
||||
} while (0)
|
||||
|
||||
/* get/set FEC admin field for a given speed */
|
||||
|
@ -527,8 +527,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
||||
struct dim_cq_moder *rx_moder, *tx_moder;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_channels new_channels = {};
|
||||
bool reset_rx, reset_tx;
|
||||
int err = 0;
|
||||
bool reset;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, cq_moderation))
|
||||
return -EOPNOTSUPP;
|
||||
@ -566,15 +566,28 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
||||
}
|
||||
/* we are opened */
|
||||
|
||||
reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) ||
|
||||
(!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled);
|
||||
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
|
||||
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
|
||||
|
||||
if (!reset) {
|
||||
if (!reset_rx && !reset_tx) {
|
||||
mlx5e_set_priv_channels_coalesce(priv, coal);
|
||||
priv->channels.params = new_channels.params;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (reset_rx) {
|
||||
u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
|
||||
MLX5E_PFLAG_RX_CQE_BASED_MODER);
|
||||
|
||||
mlx5e_reset_rx_moderation(&new_channels.params, mode);
|
||||
}
|
||||
if (reset_tx) {
|
||||
u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
|
||||
MLX5E_PFLAG_TX_CQE_BASED_MODER);
|
||||
|
||||
mlx5e_reset_tx_moderation(&new_channels.params, mode);
|
||||
}
|
||||
|
||||
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
|
||||
|
||||
out:
|
||||
@ -665,11 +678,12 @@ static const u32 pplm_fec_2_ethtool_linkmodes[] = {
|
||||
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
|
||||
struct ethtool_link_ksettings *link_ksettings)
|
||||
{
|
||||
u_long active_fec = 0;
|
||||
unsigned long active_fec_long;
|
||||
u32 active_fec;
|
||||
u32 bitn;
|
||||
int err;
|
||||
|
||||
err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL);
|
||||
err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
|
||||
if (err)
|
||||
return (err == -EOPNOTSUPP) ? 0 : err;
|
||||
|
||||
@ -682,10 +696,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
|
||||
MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
|
||||
ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
|
||||
|
||||
active_fec_long = active_fec;
|
||||
/* active fec is a bit set, find out which bit is set and
|
||||
* advertise the corresponding ethtool bit
|
||||
*/
|
||||
bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE);
|
||||
bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE);
|
||||
if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
|
||||
__set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
|
||||
link_ksettings->link_modes.advertising);
|
||||
@ -1517,8 +1532,8 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u16 fec_configured = 0;
|
||||
u32 fec_active = 0;
|
||||
u16 fec_configured;
|
||||
u32 fec_active;
|
||||
int err;
|
||||
|
||||
err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
|
||||
@ -1526,14 +1541,14 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
|
||||
sizeof(u32) * BITS_PER_BYTE);
|
||||
fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active,
|
||||
sizeof(unsigned long) * BITS_PER_BYTE);
|
||||
|
||||
if (!fecparam->active_fec)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
|
||||
sizeof(u16) * BITS_PER_BYTE);
|
||||
fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured,
|
||||
sizeof(unsigned long) * BITS_PER_BYTE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4707,7 +4707,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
|
||||
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
||||
}
|
||||
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
if (params->tx_dim_enabled) {
|
||||
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
|
||||
@ -4716,13 +4716,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
} else {
|
||||
params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
|
||||
}
|
||||
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
|
||||
params->tx_cq_moderation.cq_period_mode ==
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||
}
|
||||
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
if (params->rx_dim_enabled) {
|
||||
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
|
||||
@ -4731,7 +4727,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
} else {
|
||||
params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
mlx5e_reset_tx_moderation(params, cq_period_mode);
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
|
||||
params->tx_cq_moderation.cq_period_mode ==
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||
}
|
||||
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
mlx5e_reset_rx_moderation(params, cq_period_mode);
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
|
||||
params->rx_cq_moderation.cq_period_mode ==
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||
|
@ -2153,7 +2153,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
|
||||
flow_rule_match_meta(rule, &match);
|
||||
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
|
||||
@ -2161,13 +2161,13 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
|
||||
if (!ingress_dev) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't find the ingress port to match on");
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (ingress_dev != filter_dev) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't match on the ingress filter port");
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -4162,10 +4162,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
||||
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"devices are not on same switch HW, can't offload forwarding");
|
||||
netdev_warn(priv->netdev,
|
||||
"devices %s %s not on same switch HW, can't offload forwarding\n",
|
||||
priv->netdev->name,
|
||||
out_dev->name);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -4950,7 +4946,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
|
||||
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
|
||||
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
|
||||
rpriv->prev_vf_vport_stats = cur_stats;
|
||||
flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
|
||||
flow_stats_update(&ma->stats, dbytes, dpkts, jiffies,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
}
|
||||
|
||||
|
@ -1544,6 +1544,22 @@ static void shutdown(struct pci_dev *pdev)
|
||||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
|
||||
static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
|
||||
mlx5_unload_one(dev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
|
||||
return mlx5_load_one(dev, false);
|
||||
}
|
||||
|
||||
static const struct pci_device_id mlx5_core_pci_table[] = {
|
||||
{ PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
|
||||
@ -1587,6 +1603,8 @@ static struct pci_driver mlx5_core_driver = {
|
||||
.id_table = mlx5_core_pci_table,
|
||||
.probe = init_one,
|
||||
.remove = remove_one,
|
||||
.suspend = mlx5_suspend,
|
||||
.resume = mlx5_resume,
|
||||
.shutdown = shutdown,
|
||||
.err_handler = &mlx5_err_handler,
|
||||
.sriov_configure = mlx5_core_sriov_configure,
|
||||
|
@ -1440,7 +1440,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
|
||||
ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
|
||||
priv->stats[ctx_id].pkts += pkts;
|
||||
priv->stats[ctx_id].bytes += bytes;
|
||||
max_t(u64, priv->stats[ctx_id].used, used);
|
||||
priv->stats[ctx_id].used = max_t(u64, used,
|
||||
priv->stats[ctx_id].used);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
|
||||
ahw->diag_cnt = 0;
|
||||
ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
|
||||
if (ret)
|
||||
goto fail_diag_irq;
|
||||
goto fail_mbx_args;
|
||||
|
||||
if (adapter->flags & QLCNIC_MSIX_ENABLED)
|
||||
intrpt_id = ahw->intr_tbl[0].id;
|
||||
@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
|
||||
|
||||
done:
|
||||
qlcnic_free_mbx_args(&cmd);
|
||||
|
||||
fail_mbx_args:
|
||||
qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
|
||||
|
||||
fail_diag_irq:
|
||||
|
@ -630,7 +630,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
|
||||
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
|
||||
ptp_v2 = PTP_TCR_TSVER2ENA;
|
||||
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
|
||||
ts_event_en = PTP_TCR_TSEVNTENA;
|
||||
if (priv->synopsys_id != DWMAC_CORE_5_10)
|
||||
ts_event_en = PTP_TCR_TSEVNTENA;
|
||||
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
|
||||
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
|
||||
ptp_over_ethernet = PTP_TCR_TSIPENA;
|
||||
|
@ -1324,6 +1324,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
|
||||
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
|
||||
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
|
||||
|
@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev,
|
||||
memcpy(atr_res->gbi, atr_req->gbi, gb_len);
|
||||
r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi,
|
||||
gb_len);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
kfree_skb(skb);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
info->dep_info.curr_nfc_dep_pni = 0;
|
||||
|
@ -1382,16 +1382,19 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
|
||||
|
||||
/*
|
||||
* Called only on a device that has been disabled and after all other threads
|
||||
* that can check this device's completion queues have synced. This is the
|
||||
* last chance for the driver to see a natural completion before
|
||||
* nvme_cancel_request() terminates all incomplete requests.
|
||||
* that can check this device's completion queues have synced, except
|
||||
* nvme_poll(). This is the last chance for the driver to see a natural
|
||||
* completion before nvme_cancel_request() terminates all incomplete requests.
|
||||
*/
|
||||
static void nvme_reap_pending_cqes(struct nvme_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = dev->ctrl.queue_count - 1; i > 0; i--)
|
||||
for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
|
||||
spin_lock(&dev->queues[i].cq_poll_lock);
|
||||
nvme_process_cq(&dev->queues[i]);
|
||||
spin_unlock(&dev->queues[i].cq_poll_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
||||
|
@ -351,7 +351,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
|
||||
spin_unlock_irqrestore(&client->lock, flags);
|
||||
}
|
||||
|
||||
mbox_send_message(client->chan, pkt);
|
||||
err = mbox_send_message(client->chan, pkt);
|
||||
if (err < 0)
|
||||
return err;
|
||||
/* We can send next packet immediately, so just call txdone. */
|
||||
mbox_client_txdone(client->chan, 0);
|
||||
|
||||
|
@ -1733,7 +1733,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
|
||||
(!regset->active || regset->active(t->task, regset) > 0)) {
|
||||
int ret;
|
||||
size_t size = regset_size(t->task, regset);
|
||||
void *data = kmalloc(size, GFP_KERNEL);
|
||||
void *data = kzalloc(size, GFP_KERNEL);
|
||||
if (unlikely(!data))
|
||||
return 0;
|
||||
ret = regset->get(t->task, regset,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user