mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 00:10:51 +07:00
Networking fixes for 5.10-rc7, including fixes from bpf, netfilter,
wireless drivers, wireless mesh and can. Current release - regressions: - mt76: usb: fix crash on device removal Current release - always broken: - xsk: Fix umem cleanup from wrong context in socket destruct Previous release - regressions: - net: ip6_gre: set dev->hard_header_len when using header_ops - ipv4: Fix TOS mask in inet_rtm_getroute() - net, xsk: Avoid taking multiple skbuff references Previous release - always broken: - net/x25: prevent a couple of overflows - netfilter: ipset: prevent uninit-value in hash_ip6_add - geneve: pull IP header before ECN decapsulation - mpls: ensure LSE is pullable in TC and openvswitch paths - vxlan: respect needed_headroom of lower device - batman-adv: Consider fragmentation for needed packet headroom - can: drivers: don't count arbitration loss as an error - netfilter: bridge: reset skb->pkt_type after POST_ROUTING traversal - inet_ecn: Fix endianness of checksum update when setting ECT(1) - ibmvnic: fix various corner cases around reset handling - net/mlx5: fix rejecting unsupported Connect-X6DX SW steering - net/mlx5: Enforce HW TX csum offload with kTLS Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAl/JS3sACgkQMUZtbf5S Irs7QA/9ELcJ2gklCJwrlVGXNhUddGpZH9OX2K3WL/c1ZzgARt3e0jkO88lY25Tk tXTRTelx7xzHUNBmXJhBx1Wj8H+S/5A1FLMdl3ZqkeFrvrYIUxSvnbRoFB0CALrV OXYtsd7P86BHrT5hQNGte9V5JV5LpYAUvH6+QSD7mWOzul0gtIcKEJ7claypYuRT hm+wt2ENSRU3bNNwOVG8SoA1CEFFXePfyqEr6cBTs+1/OyzYV4880LvJXVdwwOx0 DogwsPt5L53Y2uoOaFKVRr2SUVzOi9Y79FAX3rfqIqoi89xcbK6ihHsb4ldGxkAy ILZEU/Y4lB6YsdtJjGGrB7cPhiWOl0AzPYgmOczWHw/5LMzgWKEt6H/JvkjGSlQJ pXixi6/cmsQOS6o5ydQT9Iu5qLMOOduv2mmQmOPJHkq8/SgiYTuTUiJkXgL8pPv+ Mq4Qm4JL+6aB2WL0NNzlqjVnIbFQmmGdrYGWdQnSeTN6X4T/uFQIz4fSQlQmFils qw1MBLZfhgjc4npfC0j5LdcABhC0BwEGelTJBKnc6+MbZlDTv2NdzP7wldzpjalR /a0/hLHsDMCkft92BQ3jp0C1LSikSYAhBPRJLSQiQbxzBv5JnDr6S5WpBTtBoDKT LdEqlS+mo0GwRK3pm2vSHQ4iVJY9v0PV0SbeJXH/SlJGYieUqJc= =HskU -----END PGP SIGNATURE----- Merge tag 'net-5.10-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Networking fixes for 5.10-rc7, including fixes from bpf, netfilter, wireless drivers, wireless mesh and can. Current release - regressions: - mt76: usb: fix crash on device removal Current release - always broken: - xsk: Fix umem cleanup from wrong context in socket destruct Previous release - regressions: - net: ip6_gre: set dev->hard_header_len when using header_ops - ipv4: Fix TOS mask in inet_rtm_getroute() - net, xsk: Avoid taking multiple skbuff references Previous release - always broken: - net/x25: prevent a couple of overflows - netfilter: ipset: prevent uninit-value in hash_ip6_add - geneve: pull IP header before ECN decapsulation - mpls: ensure LSE is pullable in TC and openvswitch paths - vxlan: respect needed_headroom of lower device - batman-adv: Consider fragmentation for needed packet headroom - can: drivers: don't count arbitration loss as an error - netfilter: bridge: reset skb->pkt_type after POST_ROUTING traversal - inet_ecn: Fix endianness of checksum update when setting ECT(1) - ibmvnic: fix various corner cases around reset handling - net/mlx5: fix rejecting unsupported Connect-X6DX SW steering - net/mlx5: Enforce HW TX csum offload with kTLS" * tag 'net-5.10-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (62 commits) net/mlx5: DR, Proper handling of unsupported Connect-X6DX SW steering net/mlx5e: kTLS, Enforce HW TX csum offload with kTLS net: mlx5e: fix fs_tcp.c build when IPV6 is not enabled net/mlx5: Fix wrong address reclaim when command interface is down net/sched: act_mpls: ensure LSE is pullable before reading it net: openvswitch: ensure LSE is pullable before reading it net: skbuff: ensure LSE is pullable before decrementing the MPLS ttl net: mvpp2: Fix error return code in mvpp2_open() chelsio/chtls: fix a double free in chtls_setkey() rtw88: debug: Fix uninitialized memory in debugfs code vxlan: fix error return code in __vxlan_dev_create() net: pasemi: fix error return code in pasemi_mac_open() cxgb3: fix error return code in t3_sge_alloc_qset() net/x25: prevent a couple of overflows dpaa_eth: copy timestamp fields to new skb in A-050385 workaround net: ip6_gre: set dev->hard_header_len when using header_ops mt76: usb: fix crash on device removal iwlwifi: pcie: add some missing entries for AX210 iwlwifi: pcie: invert values of NO_160 device config entries iwlwifi: pcie: add one missing entry for AX210 ...
This commit is contained in:
commit
bbe2ba04c5
@ -33,7 +33,7 @@ tcan4x5x: tcan4x5x@0 {
|
||||
spi-max-frequency = <10000000>;
|
||||
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <14 GPIO_ACTIVE_LOW>;
|
||||
interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
|
||||
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
||||
device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
|
||||
reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
|
||||
|
@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
|
||||
clock-frequency = <100000>;
|
||||
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <29 GPIO_ACTIVE_HIGH>;
|
||||
interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
|
||||
firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
|
||||
|
@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2):
|
||||
clock-frequency = <400000>;
|
||||
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <17 GPIO_ACTIVE_HIGH>;
|
||||
interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
||||
firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
|
||||
|
26
MAINTAINERS
26
MAINTAINERS
@ -3357,6 +3357,17 @@ S: Supported
|
||||
F: arch/x86/net/
|
||||
X: arch/x86/net/bpf_jit_comp32.c
|
||||
|
||||
BPF LSM (Security Audit and Enforcement using BPF)
|
||||
M: KP Singh <kpsingh@chromium.org>
|
||||
R: Florent Revest <revest@chromium.org>
|
||||
R: Brendan Jackman <jackmanb@chromium.org>
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/bpf/bpf_lsm.rst
|
||||
F: include/linux/bpf_lsm.h
|
||||
F: kernel/bpf/bpf_lsm.c
|
||||
F: security/bpf/
|
||||
|
||||
BROADCOM B44 10/100 ETHERNET DRIVER
|
||||
M: Michael Chan <michael.chan@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
@ -9069,10 +9080,7 @@ S: Supported
|
||||
F: drivers/net/wireless/intel/iwlegacy/
|
||||
|
||||
INTEL WIRELESS WIFI LINK (iwlwifi)
|
||||
M: Johannes Berg <johannes.berg@intel.com>
|
||||
M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
|
||||
M: Luca Coelho <luciano.coelho@intel.com>
|
||||
M: Intel Linux Wireless <linuxwifi@intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
|
||||
@ -19114,12 +19122,17 @@ L: netdev@vger.kernel.org
|
||||
L: bpf@vger.kernel.org
|
||||
S: Supported
|
||||
F: include/net/xdp.h
|
||||
F: include/net/xdp_priv.h
|
||||
F: include/trace/events/xdp.h
|
||||
F: kernel/bpf/cpumap.c
|
||||
F: kernel/bpf/devmap.c
|
||||
F: net/core/xdp.c
|
||||
N: xdp
|
||||
K: xdp
|
||||
F: samples/bpf/xdp*
|
||||
F: tools/testing/selftests/bpf/*xdp*
|
||||
F: tools/testing/selftests/bpf/*/*xdp*
|
||||
F: drivers/net/ethernet/*/*/*/*/*xdp*
|
||||
F: drivers/net/ethernet/*/*/*xdp*
|
||||
K: (?:\b|_)xdp(?:\b|_)
|
||||
|
||||
XDP SOCKETS (AF_XDP)
|
||||
M: Björn Töpel <bjorn.topel@intel.com>
|
||||
@ -19128,9 +19141,12 @@ R: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/af_xdp.rst
|
||||
F: include/net/xdp_sock*
|
||||
F: include/net/xsk_buff_pool.h
|
||||
F: include/uapi/linux/if_xdp.h
|
||||
F: include/uapi/linux/xdp_diag.h
|
||||
F: include/net/netns/xdp.h
|
||||
F: net/xdp/
|
||||
F: samples/bpf/xdpsock*
|
||||
F: tools/lib/bpf/xsk*
|
||||
|
@ -1295,12 +1295,22 @@ int c_can_power_up(struct net_device *dev)
|
||||
time_after(time_out, jiffies))
|
||||
cpu_relax();
|
||||
|
||||
if (time_after(jiffies, time_out))
|
||||
return -ETIMEDOUT;
|
||||
if (time_after(jiffies, time_out)) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = c_can_start(dev);
|
||||
if (!ret)
|
||||
c_can_irq_control(priv, true);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
c_can_irq_control(priv, true);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
c_can_reset_ram(priv, false);
|
||||
c_can_pm_runtime_put_sync(priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -692,8 +692,10 @@ static int kvaser_pciefd_open(struct net_device *netdev)
|
||||
return err;
|
||||
|
||||
err = kvaser_pciefd_bus_on(can);
|
||||
if (err)
|
||||
if (err) {
|
||||
close_candev(netdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -489,18 +489,18 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
||||
spi->bits_per_word = 32;
|
||||
ret = spi_setup(spi);
|
||||
if (ret)
|
||||
goto out_clk;
|
||||
goto out_m_can_class_free_dev;
|
||||
|
||||
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
|
||||
&spi->dev, &tcan4x5x_regmap);
|
||||
if (IS_ERR(priv->regmap)) {
|
||||
ret = PTR_ERR(priv->regmap);
|
||||
goto out_clk;
|
||||
goto out_m_can_class_free_dev;
|
||||
}
|
||||
|
||||
ret = tcan4x5x_power_enable(priv->power, 1);
|
||||
if (ret)
|
||||
goto out_clk;
|
||||
goto out_m_can_class_free_dev;
|
||||
|
||||
ret = tcan4x5x_parse_config(mcan_class);
|
||||
if (ret)
|
||||
@ -519,11 +519,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
||||
|
||||
out_power:
|
||||
tcan4x5x_power_enable(priv->power, 0);
|
||||
out_clk:
|
||||
if (!IS_ERR(mcan_class->cclk)) {
|
||||
clk_disable_unprepare(mcan_class->cclk);
|
||||
clk_disable_unprepare(mcan_class->hclk);
|
||||
}
|
||||
out_m_can_class_free_dev:
|
||||
m_can_class_free_dev(mcan_class->net);
|
||||
dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
|
||||
|
@ -474,7 +474,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
||||
netdev_dbg(dev, "arbitration lost interrupt\n");
|
||||
alc = priv->read_reg(priv, SJA1000_ALC);
|
||||
priv->can.can_stats.arbitration_lost++;
|
||||
stats->tx_errors++;
|
||||
cf->can_id |= CAN_ERR_LOSTARB;
|
||||
cf->data[0] = alc & 0x1f;
|
||||
}
|
||||
|
@ -604,7 +604,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
|
||||
netdev_dbg(dev, "arbitration lost interrupt\n");
|
||||
alc = readl(priv->base + SUN4I_REG_STA_ADDR);
|
||||
priv->can.can_stats.arbitration_lost++;
|
||||
stats->tx_errors++;
|
||||
if (likely(skb)) {
|
||||
cf->can_id |= CAN_ERR_LOSTARB;
|
||||
cf->data[0] = (alc >> 8) & 0x1f;
|
||||
|
@ -88,6 +88,7 @@ config BNX2
|
||||
config CNIC
|
||||
tristate "QLogic CNIC support"
|
||||
depends on PCI && (IPV6 || IPV6=n)
|
||||
depends on MMU
|
||||
select BNX2
|
||||
select UIO
|
||||
help
|
||||
|
@ -3175,6 +3175,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
|
||||
GFP_KERNEL | __GFP_COMP);
|
||||
if (!avail) {
|
||||
CH_ALERT(adapter, "free list queue 0 initialization failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (avail < q->fl[0].size)
|
||||
|
@ -1206,6 +1206,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
sk_setup_caps(newsk, dst);
|
||||
ctx = tls_get_ctx(lsk);
|
||||
newsk->sk_destruct = ctx->sk_destruct;
|
||||
newsk->sk_prot_creator = lsk->sk_prot_creator;
|
||||
csk->sk = newsk;
|
||||
csk->passive_reap_next = oreq;
|
||||
csk->tx_chan = cxgb4_port_chan(ndev);
|
||||
|
@ -391,6 +391,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
|
||||
csk->wr_unacked += DIV_ROUND_UP(len, 16);
|
||||
enqueue_wr(csk, skb);
|
||||
cxgb4_ofld_send(csk->egress_dev, skb);
|
||||
skb = NULL;
|
||||
|
||||
chtls_set_scmd(csk);
|
||||
/* Clear quiesce for Rx key */
|
||||
|
@ -2120,6 +2120,15 @@ static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
|
||||
skb_copy_header(new_skb, skb);
|
||||
new_skb->dev = skb->dev;
|
||||
|
||||
/* Copy relevant timestamp info from the old skb to the new */
|
||||
if (priv->tx_tstamp) {
|
||||
skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
|
||||
skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
|
||||
skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
|
||||
if (skb->sk)
|
||||
skb_set_owner_w(new_skb, skb->sk);
|
||||
}
|
||||
|
||||
/* We move the headroom when we align it so we have to reset the
|
||||
* network and transport header offsets relative to the new data
|
||||
* pointer. The checksum offload relies on these offsets.
|
||||
@ -2127,7 +2136,6 @@ static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
|
||||
skb_set_network_header(new_skb, skb_network_offset(skb));
|
||||
skb_set_transport_header(new_skb, skb_transport_offset(skb));
|
||||
|
||||
/* TODO: does timestamping need the result in the old skb? */
|
||||
dev_kfree_skb(skb);
|
||||
*s = new_skb;
|
||||
|
||||
|
@ -834,7 +834,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
|
||||
static int ibmvnic_login(struct net_device *netdev)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
unsigned long timeout = msecs_to_jiffies(20000);
|
||||
int retry_count = 0;
|
||||
int retries = 10;
|
||||
bool retry;
|
||||
@ -850,10 +850,8 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||
adapter->init_done_rc = 0;
|
||||
reinit_completion(&adapter->init_done);
|
||||
rc = send_login(adapter);
|
||||
if (rc) {
|
||||
netdev_warn(netdev, "Unable to login\n");
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!wait_for_completion_timeout(&adapter->init_done,
|
||||
timeout)) {
|
||||
@ -940,7 +938,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
|
||||
static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
unsigned long timeout = msecs_to_jiffies(20000);
|
||||
union ibmvnic_crq crq;
|
||||
bool resend;
|
||||
int rc;
|
||||
@ -1857,7 +1855,7 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
|
||||
if (reset_state == VNIC_OPEN) {
|
||||
rc = __ibmvnic_close(netdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
}
|
||||
|
||||
release_resources(adapter);
|
||||
@ -1875,24 +1873,25 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
|
||||
}
|
||||
|
||||
rc = ibmvnic_reset_init(adapter, true);
|
||||
if (rc)
|
||||
return IBMVNIC_INIT_FAILED;
|
||||
if (rc) {
|
||||
rc = IBMVNIC_INIT_FAILED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If the adapter was in PROBE state prior to the reset,
|
||||
* exit here.
|
||||
*/
|
||||
if (reset_state == VNIC_PROBED)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
rc = ibmvnic_login(netdev);
|
||||
if (rc) {
|
||||
adapter->state = reset_state;
|
||||
return rc;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = init_resources(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
|
||||
ibmvnic_disable_irqs(adapter);
|
||||
|
||||
@ -1902,8 +1901,10 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
|
||||
return 0;
|
||||
|
||||
rc = __ibmvnic_open(netdev);
|
||||
if (rc)
|
||||
return IBMVNIC_OPEN_FAILED;
|
||||
if (rc) {
|
||||
rc = IBMVNIC_OPEN_FAILED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* refresh device's multicast list */
|
||||
ibmvnic_set_multi(netdev);
|
||||
@ -1912,7 +1913,10 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
napi_schedule(&adapter->napi[i]);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
if (rc)
|
||||
adapter->state = reset_state;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2015,7 +2019,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
||||
|
||||
rc = ibmvnic_login(netdev);
|
||||
if (rc) {
|
||||
adapter->state = reset_state;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2083,6 +2086,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
||||
rc = 0;
|
||||
|
||||
out:
|
||||
/* restore the adapter state if reset failed */
|
||||
if (rc)
|
||||
adapter->state = reset_state;
|
||||
rtnl_unlock();
|
||||
|
||||
return rc;
|
||||
@ -2115,43 +2121,46 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
|
||||
if (rc) {
|
||||
netdev_err(adapter->netdev,
|
||||
"Couldn't initialize crq. rc=%d\n", rc);
|
||||
return rc;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = ibmvnic_reset_init(adapter, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
|
||||
/* If the adapter was in PROBE state prior to the reset,
|
||||
* exit here.
|
||||
*/
|
||||
if (reset_state == VNIC_PROBED)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
rc = ibmvnic_login(netdev);
|
||||
if (rc) {
|
||||
adapter->state = VNIC_PROBED;
|
||||
return 0;
|
||||
}
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = init_resources(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
|
||||
ibmvnic_disable_irqs(adapter);
|
||||
adapter->state = VNIC_CLOSED;
|
||||
|
||||
if (reset_state == VNIC_CLOSED)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
rc = __ibmvnic_open(netdev);
|
||||
if (rc)
|
||||
return IBMVNIC_OPEN_FAILED;
|
||||
if (rc) {
|
||||
rc = IBMVNIC_OPEN_FAILED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
|
||||
call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
/* restore adapter state if reset failed */
|
||||
if (rc)
|
||||
adapter->state = reset_state;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
|
||||
@ -2173,17 +2182,6 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
|
||||
return rwi;
|
||||
}
|
||||
|
||||
static void free_all_rwi(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
struct ibmvnic_rwi *rwi;
|
||||
|
||||
rwi = get_next_rwi(adapter);
|
||||
while (rwi) {
|
||||
kfree(rwi);
|
||||
rwi = get_next_rwi(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
static void __ibmvnic_reset(struct work_struct *work)
|
||||
{
|
||||
struct ibmvnic_rwi *rwi;
|
||||
@ -2241,20 +2239,23 @@ static void __ibmvnic_reset(struct work_struct *work)
|
||||
rc = do_hard_reset(adapter, rwi, reset_state);
|
||||
rtnl_unlock();
|
||||
}
|
||||
if (rc) {
|
||||
/* give backing device time to settle down */
|
||||
netdev_dbg(adapter->netdev,
|
||||
"[S:%d] Hard reset failed, waiting 60 secs\n",
|
||||
adapter->state);
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(60 * HZ);
|
||||
}
|
||||
} else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
|
||||
adapter->from_passive_init)) {
|
||||
rc = do_reset(adapter, rwi, reset_state);
|
||||
}
|
||||
kfree(rwi);
|
||||
if (rc == IBMVNIC_OPEN_FAILED) {
|
||||
if (list_empty(&adapter->rwi_list))
|
||||
adapter->state = VNIC_CLOSED;
|
||||
else
|
||||
adapter->state = reset_state;
|
||||
rc = 0;
|
||||
} else if (rc && rc != IBMVNIC_INIT_FAILED &&
|
||||
!adapter->force_reset_recovery)
|
||||
break;
|
||||
adapter->last_reset_time = jiffies;
|
||||
|
||||
if (rc)
|
||||
netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
|
||||
|
||||
rwi = get_next_rwi(adapter);
|
||||
|
||||
@ -2268,11 +2269,6 @@ static void __ibmvnic_reset(struct work_struct *work)
|
||||
complete(&adapter->reset_done);
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
netdev_dbg(adapter->netdev, "Reset failed\n");
|
||||
free_all_rwi(adapter);
|
||||
}
|
||||
|
||||
clear_bit_unlock(0, &adapter->resetting);
|
||||
}
|
||||
|
||||
@ -2360,7 +2356,13 @@ static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
||||
"Adapter is resetting, skip timeout reset\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* No queuing up reset until at least 5 seconds (default watchdog val)
|
||||
* after last reset
|
||||
*/
|
||||
if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
|
||||
netdev_dbg(dev, "Not yet time to tx timeout.\n");
|
||||
return;
|
||||
}
|
||||
ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
|
||||
}
|
||||
|
||||
@ -2402,6 +2404,12 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
|
||||
break;
|
||||
/* The queue entry at the current index is peeked at above
|
||||
* to determine that there is a valid descriptor awaiting
|
||||
* processing. We want to be sure that the current slot
|
||||
* holds a valid descriptor before reading its contents.
|
||||
*/
|
||||
dma_rmb();
|
||||
next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
|
||||
rx_buff =
|
||||
(struct ibmvnic_rx_buff *)be64_to_cpu(next->
|
||||
@ -2860,15 +2868,26 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!scrq) {
|
||||
netdev_dbg(adapter->netdev,
|
||||
"Invalid scrq reset. irq (%d) or msgs (%p).\n",
|
||||
scrq->irq, scrq->msgs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (scrq->irq) {
|
||||
free_irq(scrq->irq, scrq);
|
||||
irq_dispose_mapping(scrq->irq);
|
||||
scrq->irq = 0;
|
||||
}
|
||||
|
||||
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
|
||||
atomic_set(&scrq->used, 0);
|
||||
scrq->cur = 0;
|
||||
if (scrq->msgs) {
|
||||
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
|
||||
atomic_set(&scrq->used, 0);
|
||||
scrq->cur = 0;
|
||||
} else {
|
||||
netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
|
||||
4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
|
||||
@ -3100,13 +3119,18 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
|
||||
unsigned int pool = scrq->pool_index;
|
||||
int num_entries = 0;
|
||||
|
||||
/* The queue entry at the current index is peeked at above
|
||||
* to determine that there is a valid descriptor awaiting
|
||||
* processing. We want to be sure that the current slot
|
||||
* holds a valid descriptor before reading its contents.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
next = ibmvnic_next_scrq(adapter, scrq);
|
||||
for (i = 0; i < next->tx_comp.num_comps; i++) {
|
||||
if (next->tx_comp.rcs[i]) {
|
||||
if (next->tx_comp.rcs[i])
|
||||
dev_err(dev, "tx error %x\n",
|
||||
next->tx_comp.rcs[i]);
|
||||
continue;
|
||||
}
|
||||
index = be32_to_cpu(next->tx_comp.correlators[i]);
|
||||
if (index & IBMVNIC_TSO_POOL_MASK) {
|
||||
tx_pool = &adapter->tso_pool[pool];
|
||||
@ -3500,6 +3524,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
|
||||
}
|
||||
spin_unlock_irqrestore(&scrq->lock, flags);
|
||||
|
||||
/* Ensure that the entire buffer descriptor has been
|
||||
* loaded before reading its contents
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
@ -3721,15 +3750,16 @@ static int send_login(struct ibmvnic_adapter *adapter)
|
||||
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
|
||||
struct ibmvnic_login_buffer *login_buffer;
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
struct vnic_login_client_data *vlcd;
|
||||
dma_addr_t rsp_buffer_token;
|
||||
dma_addr_t buffer_token;
|
||||
size_t rsp_buffer_size;
|
||||
union ibmvnic_crq crq;
|
||||
int client_data_len;
|
||||
size_t buffer_size;
|
||||
__be64 *tx_list_p;
|
||||
__be64 *rx_list_p;
|
||||
int client_data_len;
|
||||
struct vnic_login_client_data *vlcd;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
if (!adapter->tx_scrq || !adapter->rx_scrq) {
|
||||
@ -3833,16 +3863,25 @@ static int send_login(struct ibmvnic_adapter *adapter)
|
||||
crq.login.cmd = LOGIN;
|
||||
crq.login.ioba = cpu_to_be32(buffer_token);
|
||||
crq.login.len = cpu_to_be32(buffer_size);
|
||||
ibmvnic_send_crq(adapter, &crq);
|
||||
|
||||
adapter->login_pending = true;
|
||||
rc = ibmvnic_send_crq(adapter, &crq);
|
||||
if (rc) {
|
||||
adapter->login_pending = false;
|
||||
netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
|
||||
goto buf_rsp_map_failed;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
buf_rsp_map_failed:
|
||||
kfree(login_rsp_buffer);
|
||||
adapter->login_rsp_buf = NULL;
|
||||
buf_rsp_alloc_failed:
|
||||
dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
|
||||
buf_map_failed:
|
||||
kfree(login_buffer);
|
||||
adapter->login_buf = NULL;
|
||||
buf_alloc_failed:
|
||||
return -1;
|
||||
}
|
||||
@ -4385,6 +4424,15 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
||||
u64 *size_array;
|
||||
int i;
|
||||
|
||||
/* CHECK: Test/set of login_pending does not need to be atomic
|
||||
* because only ibmvnic_tasklet tests/clears this.
|
||||
*/
|
||||
if (!adapter->login_pending) {
|
||||
netdev_warn(netdev, "Ignoring unexpected login response\n");
|
||||
return 0;
|
||||
}
|
||||
adapter->login_pending = false;
|
||||
|
||||
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, adapter->login_rsp_buf_token,
|
||||
@ -4414,7 +4462,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
||||
adapter->req_rx_add_queues !=
|
||||
be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
|
||||
dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
|
||||
ibmvnic_remove(adapter->vdev);
|
||||
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
||||
return -EIO;
|
||||
}
|
||||
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
|
||||
@ -4756,6 +4804,11 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
||||
case IBMVNIC_CRQ_INIT:
|
||||
dev_info(dev, "Partner initialized\n");
|
||||
adapter->from_passive_init = true;
|
||||
/* Discard any stale login responses from prev reset.
|
||||
* CHECK: should we clear even on INIT_COMPLETE?
|
||||
*/
|
||||
adapter->login_pending = false;
|
||||
|
||||
if (!completion_done(&adapter->init_done)) {
|
||||
complete(&adapter->init_done);
|
||||
adapter->init_done_rc = -EIO;
|
||||
@ -5093,7 +5146,7 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
|
||||
static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
|
||||
{
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
unsigned long timeout = msecs_to_jiffies(20000);
|
||||
u64 old_num_rx_queues, old_num_tx_queues;
|
||||
int rc;
|
||||
|
||||
@ -5188,6 +5241,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||
dev_set_drvdata(&dev->dev, netdev);
|
||||
adapter->vdev = dev;
|
||||
adapter->netdev = netdev;
|
||||
adapter->login_pending = false;
|
||||
|
||||
ether_addr_copy(adapter->mac_addr, mac_addr_p);
|
||||
ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
|
||||
@ -5251,7 +5305,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||
adapter->state = VNIC_PROBED;
|
||||
|
||||
adapter->wait_for_reset = false;
|
||||
|
||||
adapter->last_reset_time = jiffies;
|
||||
return 0;
|
||||
|
||||
ibmvnic_register_fail:
|
||||
|
@ -1086,6 +1086,9 @@ struct ibmvnic_adapter {
|
||||
struct delayed_work ibmvnic_delayed_reset;
|
||||
unsigned long resetting;
|
||||
bool napi_enabled, from_passive_init;
|
||||
bool login_pending;
|
||||
/* last device reset time */
|
||||
unsigned long last_reset_time;
|
||||
|
||||
bool failover_pending;
|
||||
bool force_reset_recovery;
|
||||
|
@ -4426,6 +4426,7 @@ static int mvpp2_open(struct net_device *dev)
|
||||
if (!valid) {
|
||||
netdev_err(port->dev,
|
||||
"invalid configuration: no dt or link IRQ");
|
||||
err = -ENOENT;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,7 @@ static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock
|
||||
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk)
|
||||
{
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
|
||||
@ -63,6 +64,7 @@ static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock
|
||||
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
0xff, 16);
|
||||
}
|
||||
#endif
|
||||
|
||||
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
|
||||
{
|
||||
|
@ -161,7 +161,9 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static inline void
|
||||
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
|
||||
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_accel_tx_state *accel,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
@ -173,6 +175,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
#ifdef CONFIG_MLX5_EN_TLS
|
||||
} else if (unlikely(accel && accel->tls.tls_tisn)) {
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
#endif
|
||||
} else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
|
||||
@ -607,12 +614,13 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
|
||||
}
|
||||
|
||||
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
|
||||
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
|
||||
return false;
|
||||
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -639,7 +647,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
|
||||
struct mlx5_wqe_eth_seg eseg = {};
|
||||
|
||||
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &eseg)))
|
||||
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
|
||||
@ -656,7 +664,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
/* May update the WQE, but may not post other WQEs. */
|
||||
mlx5e_accel_tx_finish(sq, wqe, &accel,
|
||||
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
|
||||
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth)))
|
||||
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
|
||||
@ -675,7 +683,7 @@ void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit
|
||||
mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
|
||||
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
|
||||
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, &wqe->eth);
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
|
||||
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
|
||||
}
|
||||
|
||||
@ -944,7 +952,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
|
||||
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
|
||||
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
|
||||
|
||||
eseg->mss = attr.mss;
|
||||
|
||||
|
@ -422,6 +422,24 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
|
||||
npages, ec_function, func_id);
|
||||
}
|
||||
|
||||
static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
|
||||
u32 npages)
|
||||
{
|
||||
u32 pages_set = 0;
|
||||
unsigned int n;
|
||||
|
||||
for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
|
||||
MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
|
||||
fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
|
||||
pages_set++;
|
||||
|
||||
if (!--npages)
|
||||
break;
|
||||
}
|
||||
|
||||
return pages_set;
|
||||
}
|
||||
|
||||
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
u32 *in, int in_size, u32 *out, int out_size)
|
||||
{
|
||||
@ -448,8 +466,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
fwp = rb_entry(p, struct fw_page, rb_node);
|
||||
p = rb_next(p);
|
||||
|
||||
MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
|
||||
i++;
|
||||
i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
|
||||
}
|
||||
|
||||
MLX5_SET(manage_pages_out, out, output_num_entries, i);
|
||||
|
@ -92,6 +92,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
|
||||
caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
|
||||
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
|
||||
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
|
||||
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
|
||||
|
||||
if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
|
||||
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
|
||||
|
@ -223,6 +223,11 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
|
||||
mlx5dr_err(dmn, "SW steering is not supported on this device\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ret = dr_domain_query_fdb_caps(mdev, dmn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -625,6 +625,7 @@ struct mlx5dr_cmd_caps {
|
||||
u8 max_ft_level;
|
||||
u16 roce_min_src_udp;
|
||||
u8 num_esw_ports;
|
||||
u8 sw_format_ver;
|
||||
bool eswitch_manager;
|
||||
bool rx_sw_owner;
|
||||
bool tx_sw_owner;
|
||||
|
@ -1078,16 +1078,20 @@ static int pasemi_mac_open(struct net_device *dev)
|
||||
|
||||
mac->tx = pasemi_mac_setup_tx_resources(dev);
|
||||
|
||||
if (!mac->tx)
|
||||
if (!mac->tx) {
|
||||
ret = -ENOMEM;
|
||||
goto out_tx_ring;
|
||||
}
|
||||
|
||||
/* We might already have allocated rings in case mtu was changed
|
||||
* before interface was brought up.
|
||||
*/
|
||||
if (dev->mtu > 1500 && !mac->num_cs) {
|
||||
pasemi_mac_setup_csrings(mac);
|
||||
if (!mac->num_cs)
|
||||
if (!mac->num_cs) {
|
||||
ret = -ENOMEM;
|
||||
goto out_tx_ring;
|
||||
}
|
||||
}
|
||||
|
||||
/* Zero out rmon counters */
|
||||
|
@ -257,11 +257,21 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
||||
skb_dst_set(skb, &tun_dst->dst);
|
||||
|
||||
/* Ignore packet loops (and multicast echo) */
|
||||
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
|
||||
geneve->dev->stats.rx_errors++;
|
||||
goto drop;
|
||||
}
|
||||
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
|
||||
goto rx_error;
|
||||
|
||||
switch (skb_protocol(skb, true)) {
|
||||
case htons(ETH_P_IP):
|
||||
if (pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||
goto rx_error;
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
|
||||
goto rx_error;
|
||||
break;
|
||||
default:
|
||||
goto rx_error;
|
||||
}
|
||||
oiph = skb_network_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
@ -298,6 +308,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
||||
dev_sw_netstats_rx_add(geneve->dev, len);
|
||||
|
||||
return;
|
||||
rx_error:
|
||||
geneve->dev->stats.rx_errors++;
|
||||
drop:
|
||||
/* Consume bad packet */
|
||||
kfree_skb(skb);
|
||||
|
@ -3798,6 +3798,9 @@ static void vxlan_config_apply(struct net_device *dev,
|
||||
dev->gso_max_segs = lowerdev->gso_max_segs;
|
||||
|
||||
needed_headroom = lowerdev->hard_header_len;
|
||||
needed_headroom += lowerdev->needed_headroom;
|
||||
|
||||
dev->needed_tailroom = lowerdev->needed_tailroom;
|
||||
|
||||
max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
|
||||
VXLAN_HEADROOM);
|
||||
@ -3877,8 +3880,10 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
||||
|
||||
if (dst->remote_ifindex) {
|
||||
remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
|
||||
if (!remote_dev)
|
||||
if (!remote_dev) {
|
||||
err = -ENODEV;
|
||||
goto errout;
|
||||
}
|
||||
|
||||
err = netdev_upper_dev_link(remote_dev, dev, extack);
|
||||
if (err)
|
||||
|
@ -491,8 +491,8 @@ struct iwl_cfg {
|
||||
#define IWL_CFG_RF_ID_HR 0x7
|
||||
#define IWL_CFG_RF_ID_HR1 0x4
|
||||
|
||||
#define IWL_CFG_NO_160 0x0
|
||||
#define IWL_CFG_160 0x1
|
||||
#define IWL_CFG_NO_160 0x1
|
||||
#define IWL_CFG_160 0x0
|
||||
|
||||
#define IWL_CFG_CORES_BT 0x0
|
||||
#define IWL_CFG_CORES_BT_GNSS 0x5
|
||||
|
@ -536,9 +536,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
|
||||
{IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0)},
|
||||
{IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
|
||||
{IWL_PCI_DEVICE(0x2726, 0x0070, iwlax201_cfg_snj_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x2726, 0x0074, iwlax201_cfg_snj_hr_b0)},
|
||||
|
@ -1020,8 +1020,6 @@ void mt76u_stop_tx(struct mt76_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mt76_worker_disable(&dev->tx_worker);
|
||||
|
||||
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
|
||||
HZ / 5);
|
||||
if (!ret) {
|
||||
@ -1040,6 +1038,8 @@ void mt76u_stop_tx(struct mt76_dev *dev)
|
||||
usb_kill_urb(q->entry[j].urb);
|
||||
}
|
||||
|
||||
mt76_worker_disable(&dev->tx_worker);
|
||||
|
||||
/* On device removal we maight queue skb's, but mt76u_tx_kick()
|
||||
* will fail to submit urb, cleanup those skb's manually.
|
||||
*/
|
||||
@ -1048,18 +1048,19 @@ void mt76u_stop_tx(struct mt76_dev *dev)
|
||||
if (!q)
|
||||
continue;
|
||||
|
||||
entry = q->entry[q->tail];
|
||||
q->entry[q->tail].done = false;
|
||||
|
||||
mt76_queue_tx_complete(dev, q, &entry);
|
||||
while (q->queued > 0) {
|
||||
entry = q->entry[q->tail];
|
||||
q->entry[q->tail].done = false;
|
||||
mt76_queue_tx_complete(dev, q, &entry);
|
||||
}
|
||||
}
|
||||
|
||||
mt76_worker_enable(&dev->tx_worker);
|
||||
}
|
||||
|
||||
cancel_work_sync(&dev->usb.stat_work);
|
||||
clear_bit(MT76_READING_STATS, &dev->phy.state);
|
||||
|
||||
mt76_worker_enable(&dev->tx_worker);
|
||||
|
||||
mt76_tx_status_check(dev, NULL, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76u_stop_tx);
|
||||
|
@ -147,6 +147,8 @@ static int rtw_debugfs_copy_from_user(char tmp[], int size,
|
||||
{
|
||||
int tmp_len;
|
||||
|
||||
memset(tmp, 0, size);
|
||||
|
||||
if (count < num)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -1223,6 +1223,11 @@ enum mlx5_fc_bulk_alloc_bitmask {
|
||||
|
||||
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
|
||||
|
||||
enum {
|
||||
MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
|
||||
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 reserved_at_0[0x30];
|
||||
u8 vhca_id[0x10];
|
||||
@ -1521,7 +1526,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
|
||||
u8 general_obj_types[0x40];
|
||||
|
||||
u8 reserved_at_440[0x20];
|
||||
u8 reserved_at_440[0x4];
|
||||
u8 steering_format_version[0x4];
|
||||
u8 create_qp_start_hint[0x18];
|
||||
|
||||
u8 reserved_at_460[0x3];
|
||||
u8 log_max_uctx[0x5];
|
||||
|
@ -2813,9 +2813,21 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
|
||||
int dev_queue_xmit(struct sk_buff *skb);
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
||||
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
||||
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
||||
|
||||
static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __dev_direct_xmit(skb, queue_id);
|
||||
if (!dev_xmit_complete(ret))
|
||||
kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int register_netdevice(struct net_device *dev);
|
||||
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
|
||||
void unregister_netdevice_many(struct list_head *head);
|
||||
|
@ -107,7 +107,7 @@ static inline int IP_ECN_set_ect1(struct iphdr *iph)
|
||||
if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
|
||||
return 0;
|
||||
|
||||
check += (__force u16)htons(0x100);
|
||||
check += (__force u16)htons(0x1);
|
||||
|
||||
iph->check = (__force __sum16)(check + (check>=0xFFFF));
|
||||
iph->tos ^= INET_ECN_MASK;
|
||||
|
@ -37,6 +37,7 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
|
||||
|
||||
struct nft_flow_key {
|
||||
struct flow_dissector_key_basic basic;
|
||||
struct flow_dissector_key_control control;
|
||||
union {
|
||||
struct flow_dissector_key_ipv4_addrs ipv4;
|
||||
struct flow_dissector_key_ipv6_addrs ipv6;
|
||||
@ -62,6 +63,9 @@ struct nft_flow_rule {
|
||||
|
||||
#define NFT_OFFLOAD_F_ACTION (1 << 0)
|
||||
|
||||
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
|
||||
enum flow_dissector_key_id addr_type);
|
||||
|
||||
struct nft_rule;
|
||||
struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule);
|
||||
void nft_flow_rule_destroy(struct nft_flow_rule *flow);
|
||||
@ -74,6 +78,9 @@ int nft_flow_rule_offload_commit(struct net *net);
|
||||
offsetof(struct nft_flow_key, __base.__field); \
|
||||
(__reg)->len = __len; \
|
||||
(__reg)->key = __key; \
|
||||
|
||||
#define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \
|
||||
NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
|
||||
memset(&(__reg)->mask, 0xff, (__reg)->len);
|
||||
|
||||
int nft_chain_offload_priority(struct nft_base_chain *basechain);
|
||||
|
@ -31,6 +31,7 @@ struct xdp_umem {
|
||||
struct page **pgs;
|
||||
int id;
|
||||
struct list_head xsk_dma_list;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct xsk_map {
|
||||
|
@ -391,6 +391,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
|
||||
|
||||
/**
|
||||
* batadv_frag_create() - create a fragment from skb
|
||||
* @net_dev: outgoing device for fragment
|
||||
* @skb: skb to create fragment from
|
||||
* @frag_head: header to use in new fragment
|
||||
* @fragment_size: size of new fragment
|
||||
@ -401,22 +402,25 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
|
||||
*
|
||||
* Return: the new fragment, NULL on error.
|
||||
*/
|
||||
static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
|
||||
static struct sk_buff *batadv_frag_create(struct net_device *net_dev,
|
||||
struct sk_buff *skb,
|
||||
struct batadv_frag_packet *frag_head,
|
||||
unsigned int fragment_size)
|
||||
{
|
||||
unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev);
|
||||
unsigned int tailroom = net_dev->needed_tailroom;
|
||||
struct sk_buff *skb_fragment;
|
||||
unsigned int header_size = sizeof(*frag_head);
|
||||
unsigned int mtu = fragment_size + header_size;
|
||||
|
||||
skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
|
||||
skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom);
|
||||
if (!skb_fragment)
|
||||
goto err;
|
||||
|
||||
skb_fragment->priority = skb->priority;
|
||||
|
||||
/* Eat the last mtu-bytes of the skb */
|
||||
skb_reserve(skb_fragment, header_size + ETH_HLEN);
|
||||
skb_reserve(skb_fragment, ll_reserved + header_size);
|
||||
skb_split(skb, skb_fragment, skb->len - fragment_size);
|
||||
|
||||
/* Add the header */
|
||||
@ -439,11 +443,12 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
||||
struct batadv_orig_node *orig_node,
|
||||
struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
struct net_device *net_dev = neigh_node->if_incoming->net_dev;
|
||||
struct batadv_priv *bat_priv;
|
||||
struct batadv_hard_iface *primary_if = NULL;
|
||||
struct batadv_frag_packet frag_header;
|
||||
struct sk_buff *skb_fragment;
|
||||
unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
|
||||
unsigned int mtu = net_dev->mtu;
|
||||
unsigned int header_size = sizeof(frag_header);
|
||||
unsigned int max_fragment_size, num_fragments;
|
||||
int ret;
|
||||
@ -503,7 +508,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
||||
goto put_primary_if;
|
||||
}
|
||||
|
||||
skb_fragment = batadv_frag_create(skb, &frag_header,
|
||||
skb_fragment = batadv_frag_create(net_dev, skb, &frag_header,
|
||||
max_fragment_size);
|
||||
if (!skb_fragment) {
|
||||
ret = -ENOMEM;
|
||||
@ -522,13 +527,14 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
||||
frag_header.no++;
|
||||
}
|
||||
|
||||
/* Make room for the fragment header. */
|
||||
if (batadv_skb_head_push(skb, header_size) < 0 ||
|
||||
pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
|
||||
ret = -ENOMEM;
|
||||
/* make sure that there is at least enough head for the fragmentation
|
||||
* and ethernet headers
|
||||
*/
|
||||
ret = skb_cow_head(skb, ETH_HLEN + header_size);
|
||||
if (ret < 0)
|
||||
goto put_primary_if;
|
||||
}
|
||||
|
||||
skb_push(skb, header_size);
|
||||
memcpy(skb->data, &frag_header, header_size);
|
||||
|
||||
/* Send the last fragment */
|
||||
|
@ -554,6 +554,9 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
|
||||
needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
|
||||
needed_headroom += batadv_max_header_len();
|
||||
|
||||
/* fragmentation headers don't strip the unicast/... header */
|
||||
needed_headroom += sizeof(struct batadv_frag_packet);
|
||||
|
||||
soft_iface->needed_headroom = needed_headroom;
|
||||
soft_iface->needed_tailroom = lower_tailroom;
|
||||
}
|
||||
|
@ -735,6 +735,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
|
||||
mtu_reserved = nf_bridge_mtu_reduction(skb);
|
||||
mtu = skb->dev->mtu;
|
||||
|
||||
if (nf_bridge->pkt_otherhost) {
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
nf_bridge->pkt_otherhost = false;
|
||||
}
|
||||
|
||||
if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
|
||||
mtu = nf_bridge->frag_max_size;
|
||||
|
||||
@ -835,8 +840,6 @@ static unsigned int br_nf_post_routing(void *priv,
|
||||
else
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
|
||||
* about the value of skb->pkt_type. */
|
||||
if (skb->pkt_type == PACKET_OTHERHOST) {
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
nf_bridge->pkt_otherhost = true;
|
||||
|
@ -4180,7 +4180,7 @@ int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
}
|
||||
EXPORT_SYMBOL(dev_queue_xmit_accel);
|
||||
|
||||
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
struct sk_buff *orig_skb = skb;
|
||||
@ -4210,17 +4210,13 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||
dev_xmit_recursion_dec();
|
||||
|
||||
local_bh_enable();
|
||||
|
||||
if (!dev_xmit_complete(ret))
|
||||
kfree_skb(skb);
|
||||
|
||||
return ret;
|
||||
drop:
|
||||
atomic_long_inc(&dev->tx_dropped);
|
||||
kfree_skb_list(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_direct_xmit);
|
||||
EXPORT_SYMBOL(__dev_direct_xmit);
|
||||
|
||||
/*************************************************************************
|
||||
* Receiver routines
|
||||
|
@ -5786,6 +5786,9 @@ int skb_mpls_dec_ttl(struct sk_buff *skb)
|
||||
if (unlikely(!eth_p_mpls(skb->protocol)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
|
||||
return -ENOMEM;
|
||||
|
||||
lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
|
||||
ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
|
||||
if (!--ttl)
|
||||
|
@ -3222,7 +3222,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
||||
|
||||
fl4.daddr = dst;
|
||||
fl4.saddr = src;
|
||||
fl4.flowi4_tos = rtm->rtm_tos;
|
||||
fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
|
||||
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
|
||||
fl4.flowi4_mark = mark;
|
||||
fl4.flowi4_uid = uid;
|
||||
@ -3246,8 +3246,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
||||
fl4.flowi4_iif = iif; /* for rt_fill_info */
|
||||
skb->dev = dev;
|
||||
skb->mark = mark;
|
||||
err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
|
||||
dev, &res);
|
||||
err = ip_route_input_rcu(skb, dst, src,
|
||||
rtm->rtm_tos & IPTOS_RT_MASK, dev,
|
||||
&res);
|
||||
|
||||
rt = skb_rtable(skb);
|
||||
if (err == 0 && rt->dst.error)
|
||||
|
@ -1133,8 +1133,13 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
|
||||
return;
|
||||
|
||||
if (rt->dst.dev) {
|
||||
dev->needed_headroom = rt->dst.dev->hard_header_len +
|
||||
t_hlen;
|
||||
unsigned short dst_len = rt->dst.dev->hard_header_len +
|
||||
t_hlen;
|
||||
|
||||
if (t->dev->header_ops)
|
||||
dev->hard_header_len = dst_len;
|
||||
else
|
||||
dev->needed_headroom = dst_len;
|
||||
|
||||
if (set_mtu) {
|
||||
dev->mtu = rt->dst.dev->mtu - t_hlen;
|
||||
@ -1159,7 +1164,12 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
|
||||
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
|
||||
|
||||
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
|
||||
tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
|
||||
|
||||
if (tunnel->dev->header_ops)
|
||||
tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
else
|
||||
tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
|
||||
|
||||
return t_hlen;
|
||||
}
|
||||
|
||||
|
@ -271,8 +271,7 @@ flag_nested(const struct nlattr *nla)
|
||||
|
||||
static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
|
||||
[IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
|
||||
[IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY,
|
||||
.len = sizeof(struct in6_addr) },
|
||||
[IPSET_ATTR_IPADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -4167,12 +4167,18 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
|
||||
|
||||
spin_lock_init(&ipvs->tot_stats.lock);
|
||||
|
||||
proc_create_net("ip_vs", 0, ipvs->net->proc_net, &ip_vs_info_seq_ops,
|
||||
sizeof(struct ip_vs_iter));
|
||||
proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
|
||||
ip_vs_stats_show, NULL);
|
||||
proc_create_net_single("ip_vs_stats_percpu", 0, ipvs->net->proc_net,
|
||||
ip_vs_stats_percpu_show, NULL);
|
||||
#ifdef CONFIG_PROC_FS
|
||||
if (!proc_create_net("ip_vs", 0, ipvs->net->proc_net,
|
||||
&ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)))
|
||||
goto err_vs;
|
||||
if (!proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
|
||||
ip_vs_stats_show, NULL))
|
||||
goto err_stats;
|
||||
if (!proc_create_net_single("ip_vs_stats_percpu", 0,
|
||||
ipvs->net->proc_net,
|
||||
ip_vs_stats_percpu_show, NULL))
|
||||
goto err_percpu;
|
||||
#endif
|
||||
|
||||
if (ip_vs_control_net_init_sysctl(ipvs))
|
||||
goto err;
|
||||
@ -4180,6 +4186,17 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
|
||||
return 0;
|
||||
|
||||
err:
|
||||
#ifdef CONFIG_PROC_FS
|
||||
remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
|
||||
|
||||
err_percpu:
|
||||
remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
|
||||
|
||||
err_stats:
|
||||
remove_proc_entry("ip_vs", ipvs->net->proc_net);
|
||||
|
||||
err_vs:
|
||||
#endif
|
||||
free_percpu(ipvs->tot_stats.cpustats);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -4188,9 +4205,11 @@ void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
|
||||
{
|
||||
ip_vs_trash_cleanup(ipvs);
|
||||
ip_vs_control_net_cleanup_sysctl(ipvs);
|
||||
#ifdef CONFIG_PROC_FS
|
||||
remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
|
||||
remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
|
||||
remove_proc_entry("ip_vs", ipvs->net->proc_net);
|
||||
#endif
|
||||
free_percpu(ipvs->tot_stats.cpustats);
|
||||
}
|
||||
|
||||
|
@ -619,7 +619,8 @@ static int nft_request_module(struct net *net, const char *fmt, ...)
|
||||
static void lockdep_nfnl_nft_mutex_not_held(void)
|
||||
{
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
|
||||
if (debug_locks)
|
||||
WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,23 @@ static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
|
||||
return flow;
|
||||
}
|
||||
|
||||
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
|
||||
enum flow_dissector_key_id addr_type)
|
||||
{
|
||||
struct nft_flow_match *match = &flow->match;
|
||||
struct nft_flow_key *mask = &match->mask;
|
||||
struct nft_flow_key *key = &match->key;
|
||||
|
||||
if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
|
||||
return;
|
||||
|
||||
key->control.addr_type = addr_type;
|
||||
mask->control.addr_type = 0xffff;
|
||||
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
|
||||
match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
|
||||
offsetof(struct nft_flow_key, control);
|
||||
}
|
||||
|
||||
struct nft_flow_rule *nft_flow_rule_create(struct net *net,
|
||||
const struct nft_rule *rule)
|
||||
{
|
||||
|
@ -123,11 +123,11 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
|
||||
u8 *mask = (u8 *)&flow->match.mask;
|
||||
u8 *key = (u8 *)&flow->match.key;
|
||||
|
||||
if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
|
||||
if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memcpy(key + reg->offset, &priv->data, priv->len);
|
||||
memcpy(mask + reg->offset, ®->mask, priv->len);
|
||||
memcpy(key + reg->offset, &priv->data, reg->len);
|
||||
memcpy(mask + reg->offset, ®->mask, reg->len);
|
||||
|
||||
flow->match.dissector.used_keys |= BIT(reg->key);
|
||||
flow->match.dissector.offset[reg->key] = reg->base_offset;
|
||||
@ -137,7 +137,7 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
|
||||
nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
nft_offload_update_dependency(ctx, &priv->data, priv->len);
|
||||
nft_offload_update_dependency(ctx, &priv->data, reg->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -724,22 +724,22 @@ static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
|
||||
|
||||
switch (priv->key) {
|
||||
case NFT_META_PROTOCOL:
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto,
|
||||
sizeof(__u16), reg);
|
||||
NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto,
|
||||
sizeof(__u16), reg);
|
||||
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
|
||||
break;
|
||||
case NFT_META_L4PROTO:
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
|
||||
sizeof(__u8), reg);
|
||||
NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
|
||||
sizeof(__u8), reg);
|
||||
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
|
||||
break;
|
||||
case NFT_META_IIF:
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
|
||||
ingress_ifindex, sizeof(__u32), reg);
|
||||
NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta,
|
||||
ingress_ifindex, sizeof(__u32), reg);
|
||||
break;
|
||||
case NFT_META_IIFTYPE:
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
|
||||
ingress_iftype, sizeof(__u16), reg);
|
||||
NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta,
|
||||
ingress_iftype, sizeof(__u16), reg);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -165,6 +165,34 @@ static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
|
||||
u32 priv_len, u32 field_len)
|
||||
{
|
||||
unsigned int remainder, delta, k;
|
||||
struct nft_data mask = {};
|
||||
__be32 remainder_mask;
|
||||
|
||||
if (priv_len == field_len) {
|
||||
memset(®->mask, 0xff, priv_len);
|
||||
return true;
|
||||
} else if (priv_len > field_len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(&mask, 0xff, field_len);
|
||||
remainder = priv_len % sizeof(u32);
|
||||
if (remainder) {
|
||||
k = priv_len / sizeof(u32);
|
||||
delta = field_len - priv_len;
|
||||
remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
|
||||
mask.data[k] = (__force u32)remainder_mask;
|
||||
}
|
||||
|
||||
memcpy(®->mask, &mask, field_len);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_payload *priv)
|
||||
@ -173,21 +201,21 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct ethhdr, h_source):
|
||||
if (priv->len != ETH_ALEN)
|
||||
if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
|
||||
src, ETH_ALEN, reg);
|
||||
break;
|
||||
case offsetof(struct ethhdr, h_dest):
|
||||
if (priv->len != ETH_ALEN)
|
||||
if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
|
||||
dst, ETH_ALEN, reg);
|
||||
break;
|
||||
case offsetof(struct ethhdr, h_proto):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
|
||||
@ -195,14 +223,14 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
|
||||
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
|
||||
break;
|
||||
case offsetof(struct vlan_ethhdr, h_vlan_TCI):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
|
||||
vlan_tci, sizeof(__be16), reg);
|
||||
break;
|
||||
case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
|
||||
@ -210,7 +238,7 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
|
||||
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
|
||||
break;
|
||||
case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
|
||||
@ -218,7 +246,7 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
|
||||
break;
|
||||
case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
|
||||
sizeof(struct vlan_hdr):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
|
||||
@ -239,21 +267,25 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct iphdr, saddr):
|
||||
if (priv->len != sizeof(struct in_addr))
|
||||
if (!nft_payload_offload_mask(reg, priv->len,
|
||||
sizeof(struct in_addr)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
|
||||
sizeof(struct in_addr), reg);
|
||||
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
|
||||
break;
|
||||
case offsetof(struct iphdr, daddr):
|
||||
if (priv->len != sizeof(struct in_addr))
|
||||
if (!nft_payload_offload_mask(reg, priv->len,
|
||||
sizeof(struct in_addr)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
|
||||
sizeof(struct in_addr), reg);
|
||||
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
|
||||
break;
|
||||
case offsetof(struct iphdr, protocol):
|
||||
if (priv->len != sizeof(__u8))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
|
||||
@ -275,21 +307,25 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct ipv6hdr, saddr):
|
||||
if (priv->len != sizeof(struct in6_addr))
|
||||
if (!nft_payload_offload_mask(reg, priv->len,
|
||||
sizeof(struct in6_addr)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
|
||||
sizeof(struct in6_addr), reg);
|
||||
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
|
||||
break;
|
||||
case offsetof(struct ipv6hdr, daddr):
|
||||
if (priv->len != sizeof(struct in6_addr))
|
||||
if (!nft_payload_offload_mask(reg, priv->len,
|
||||
sizeof(struct in6_addr)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
|
||||
sizeof(struct in6_addr), reg);
|
||||
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
|
||||
break;
|
||||
case offsetof(struct ipv6hdr, nexthdr):
|
||||
if (priv->len != sizeof(__u8))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
|
||||
@ -331,14 +367,14 @@ static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct tcphdr, source):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
|
||||
sizeof(__be16), reg);
|
||||
break;
|
||||
case offsetof(struct tcphdr, dest):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
|
||||
@ -359,14 +395,14 @@ static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct udphdr, source):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
|
||||
sizeof(__be16), reg);
|
||||
break;
|
||||
case offsetof(struct udphdr, dest):
|
||||
if (priv->len != sizeof(__be16))
|
||||
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
|
||||
|
@ -199,6 +199,9 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
||||
__be32 lse;
|
||||
int err;
|
||||
|
||||
if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
|
||||
return -ENOMEM;
|
||||
|
||||
stack = mpls_hdr(skb);
|
||||
lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
|
||||
err = skb_mpls_update_lse(skb, lse);
|
||||
|
@ -105,6 +105,9 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
goto drop;
|
||||
break;
|
||||
case TCA_MPLS_ACT_MODIFY:
|
||||
if (!pskb_may_pull(skb,
|
||||
skb_network_offset(skb) + MPLS_HLEN))
|
||||
goto drop;
|
||||
new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
|
||||
if (skb_mpls_update_lse(skb, new_lse))
|
||||
goto drop;
|
||||
|
@ -2182,6 +2182,8 @@ void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
|
||||
else if (prop == TIPC_NLA_PROP_MTU)
|
||||
tipc_link_set_mtu(e->link, b->mtu);
|
||||
}
|
||||
/* Update MTU for node link entry */
|
||||
e->mtu = tipc_link_mss(e->link);
|
||||
tipc_node_write_unlock(n);
|
||||
tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
|
||||
}
|
||||
|
@ -681,7 +681,8 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
int len, i, rc = 0;
|
||||
|
||||
if (addr_len != sizeof(struct sockaddr_x25) ||
|
||||
addr->sx25_family != AF_X25) {
|
||||
addr->sx25_family != AF_X25 ||
|
||||
strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -775,7 +776,8 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
|
||||
rc = -EINVAL;
|
||||
if (addr_len != sizeof(struct sockaddr_x25) ||
|
||||
addr->sx25_family != AF_X25)
|
||||
addr->sx25_family != AF_X25 ||
|
||||
strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN)
|
||||
goto out;
|
||||
|
||||
rc = -ENETUNREACH;
|
||||
|
@ -66,18 +66,31 @@ static void xdp_umem_release(struct xdp_umem *umem)
|
||||
kfree(umem);
|
||||
}
|
||||
|
||||
static void xdp_umem_release_deferred(struct work_struct *work)
|
||||
{
|
||||
struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
|
||||
|
||||
xdp_umem_release(umem);
|
||||
}
|
||||
|
||||
void xdp_get_umem(struct xdp_umem *umem)
|
||||
{
|
||||
refcount_inc(&umem->users);
|
||||
}
|
||||
|
||||
void xdp_put_umem(struct xdp_umem *umem)
|
||||
void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
|
||||
{
|
||||
if (!umem)
|
||||
return;
|
||||
|
||||
if (refcount_dec_and_test(&umem->users))
|
||||
xdp_umem_release(umem);
|
||||
if (refcount_dec_and_test(&umem->users)) {
|
||||
if (defer_cleanup) {
|
||||
INIT_WORK(&umem->work, xdp_umem_release_deferred);
|
||||
schedule_work(&umem->work);
|
||||
} else {
|
||||
xdp_umem_release(umem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <net/xdp_sock_drv.h>
|
||||
|
||||
void xdp_get_umem(struct xdp_umem *umem);
|
||||
void xdp_put_umem(struct xdp_umem *umem);
|
||||
void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup);
|
||||
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
|
||||
|
||||
#endif /* XDP_UMEM_H_ */
|
||||
|
@ -411,11 +411,7 @@ static int xsk_generic_xmit(struct sock *sk)
|
||||
skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
|
||||
skb->destructor = xsk_destruct_skb;
|
||||
|
||||
/* Hinder dev_direct_xmit from freeing the packet and
|
||||
* therefore completing it in the destructor
|
||||
*/
|
||||
refcount_inc(&skb->users);
|
||||
err = dev_direct_xmit(skb, xs->queue_id);
|
||||
err = __dev_direct_xmit(skb, xs->queue_id);
|
||||
if (err == NETDEV_TX_BUSY) {
|
||||
/* Tell user-space to retry the send */
|
||||
skb->destructor = sock_wfree;
|
||||
@ -429,12 +425,10 @@ static int xsk_generic_xmit(struct sock *sk)
|
||||
/* Ignore NET_XMIT_CN as packet might have been sent */
|
||||
if (err == NET_XMIT_DROP) {
|
||||
/* SKB completed but not sent */
|
||||
kfree_skb(skb);
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
consume_skb(skb);
|
||||
sent_frame = true;
|
||||
}
|
||||
|
||||
@ -1147,7 +1141,7 @@ static void xsk_destruct(struct sock *sk)
|
||||
return;
|
||||
|
||||
if (!xp_put_pool(xs->pool))
|
||||
xdp_put_umem(xs->umem);
|
||||
xdp_put_umem(xs->umem, !xs->pool);
|
||||
|
||||
sk_refcnt_debug_dec(sk);
|
||||
}
|
||||
|
@ -185,8 +185,10 @@ static int __xp_assign_dev(struct xsk_buff_pool *pool,
|
||||
err_unreg_pool:
|
||||
if (!force_zc)
|
||||
err = 0; /* fallback to copy mode */
|
||||
if (err)
|
||||
if (err) {
|
||||
xsk_clear_pool_at_qid(netdev, queue_id);
|
||||
dev_put(netdev);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -242,7 +244,7 @@ static void xp_release_deferred(struct work_struct *work)
|
||||
pool->cq = NULL;
|
||||
}
|
||||
|
||||
xdp_put_umem(pool->umem);
|
||||
xdp_put_umem(pool->umem, false);
|
||||
xp_destroy(pool);
|
||||
}
|
||||
|
||||
|
@ -693,6 +693,7 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
|
||||
obj_node = calloc(1, sizeof(*obj_node));
|
||||
if (!obj_node) {
|
||||
p_err("failed to allocate memory: %s", strerror(errno));
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
|
@ -59,6 +59,7 @@ CONFIG_NET_IFE_SKBPRIO=m
|
||||
CONFIG_NET_IFE_SKBTCINDEX=m
|
||||
CONFIG_NET_SCH_FIFO=y
|
||||
CONFIG_NET_SCH_ETS=m
|
||||
CONFIG_NET_SCH_RED=m
|
||||
|
||||
#
|
||||
## Network testing
|
||||
|
Loading…
Reference in New Issue
Block a user