From d518d2ed8640c1cbbbb6f63939e3e65471817367 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 12 Sep 2019 12:02:42 +0200 Subject: [PATCH 01/11] net/sched: fix race between deactivation and dequeue for NOLOCK qdisc The test implemented by some_qdisc_is_busy() is somewhat loosy for NOLOCK qdisc, as we may hit the following scenario: CPU1 CPU2 // in net_tx_action() clear_bit(__QDISC_STATE_SCHED...); // in some_qdisc_is_busy() val = (qdisc_is_running(q) || test_bit(__QDISC_STATE_SCHED, &q->state)); // here val is 0 but... qdisc_run(q) // ... CPU1 is going to run the qdisc next As a conseguence qdisc_run() in net_tx_action() can race with qdisc_reset() in dev_qdisc_reset(). Such race is not possible for !NOLOCK qdisc as both the above bit operations are under the root qdisc lock(). After commit 021a17ed796b ("pfifo_fast: drop unneeded additional lock on dequeue") the race can cause use after free and/or null ptr dereference, but the root cause is likely older. This patch addresses the issue explicitly checking for deactivation under the seqlock for NOLOCK qdisc, so that the qdisc_run() in the critical scenario becomes a no-op. Note that the enqueue() op can still execute concurrently with dev_qdisc_reset(), but that is safe due to the skb_array() locking, and we can't avoid that for NOLOCK qdiscs. Fixes: 021a17ed796b ("pfifo_fast: drop unneeded additional lock on dequeue") Reported-by: Li Shuang Reported-and-tested-by: Davide Caratti Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 7 ++++++- net/core/dev.c | 16 ++++++++++------ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index a16fbe9a2a67..aa99c73c3fbd 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q); static inline void qdisc_run(struct Qdisc *q) { if (qdisc_run_begin(q)) { - __qdisc_run(q); + /* NOLOCK qdisc must check 'state' under the qdisc seqlock + * to avoid racing with dev_qdisc_reset() + */ + if (!(q->flags & TCQ_F_NOLOCK) || + likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) + __qdisc_run(q); qdisc_run_end(q); } } diff --git a/net/core/dev.c b/net/core/dev.c index 5156c0edebe8..4ed9df74eb8a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3467,18 +3467,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_calculate_pkt_len(skb, q); if (q->flags & TCQ_F_NOLOCK) { - if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { - __qdisc_drop(skb, &to_free); - rc = NET_XMIT_DROP; - } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && - qdisc_run_begin(q)) { + if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && + qdisc_run_begin(q)) { + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, + &q->state))) { + __qdisc_drop(skb, &to_free); + rc = NET_XMIT_DROP; + goto end_run; + } qdisc_bstats_cpu_update(q, skb); + rc = NET_XMIT_SUCCESS; if (sch_direct_xmit(skb, q, dev, txq, NULL, true)) __qdisc_run(q); +end_run: qdisc_run_end(q); - rc = NET_XMIT_SUCCESS; } else { rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; qdisc_run(q); From 23426a25e55a417dc104df08781b6eff95e65f3f Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 12 Sep 2019 15:16:45 +0200 Subject: [PATCH 02/11] net: dsa: Fix load order between DSA drivers and taggers The DSA core, DSA taggers and DSA drivers all make use of module_init(). Hence they get initialised at device_initcall() time. The ordering is non-deterministic. It can be a DSA driver is bound to a device before the needed tag driver has been initialised, resulting in the message: No tagger for this switch Rather than have this be fatal, return -EPROBE_DEFER so that it is tried again later once all the needed drivers have been loaded. Fixes: d3b8c04988ca ("dsa: Add boilerplate helper to register DSA tag driver modules") Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/dsa/dsa2.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 3abd173ebacb..96f787cf9b6e 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -623,6 +623,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master) tag_protocol = ds->ops->get_tag_protocol(ds, dp->index); tag_ops = dsa_tag_driver_get(tag_protocol); if (IS_ERR(tag_ops)) { + if (PTR_ERR(tag_ops) == -ENOPROTOOPT) + return -EPROBE_DEFER; dev_warn(ds->dev, "No tagger for this switch\n"); return PTR_ERR(tag_ops); } From 6efb971ba8edfbd80b666f29de12882852f095ae Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 12 Sep 2019 10:22:30 -0700 Subject: [PATCH 03/11] net_sched: let qdisc_put() accept NULL pointer When tcf_block_get() fails in sfb_init(), q->qdisc is still a NULL pointer which leads to a crash in sfb_destroy(). Similar for sch_dsmark. Instead of fixing each separately, Linus suggested to just accept NULL pointer in qdisc_put(), which would make callers easier. (For sch_dsmark, the bug probably exists long before commit 6529eaba33f0.) Fixes: 6529eaba33f0 ("net: sched: introduce tcf block infractructure") Reported-by: syzbot+d5870a903591faaca4ae@syzkaller.appspotmail.com Suggested-by: Linus Torvalds Cc: Jamal Hadi Salim Cc: Jiri Pirko Signed-off-by: Cong Wang Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ac28f6a5d70e..17bd8f539bc7 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc) void qdisc_put(struct Qdisc *qdisc) { + if (!qdisc) + return; + if (qdisc->flags & TCQ_F_BUILTIN || !refcount_dec_and_test(&qdisc->refcnt)) return; From 05a82481a3024b94db00b8c816bb3d526b5209e0 Mon Sep 17 00:00:00 2001 From: Gerd Rausch Date: Thu, 12 Sep 2019 13:49:41 -0700 Subject: [PATCH 04/11] net/rds: Fix 'ib_evt_handler_call' element in 'rds_ib_stat_names' All entries in 'rds_ib_stat_names' are stringified versions of the corresponding "struct rds_ib_statistics" element without the "s_"-prefix. Fix entry 'ib_evt_handler_call' to do the same. Fixes: f4f943c958a2 ("RDS: IB: ack more receive completions to improve performance") Signed-off-by: Gerd Rausch Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/ib_stats.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c index 9252ad126335..ac46d8961b61 100644 --- a/net/rds/ib_stats.c +++ b/net/rds/ib_stats.c @@ -42,7 +42,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); static const char *const rds_ib_stat_names[] = { "ib_connect_raced", "ib_listen_closed_stale", - "s_ib_evt_handler_call", + "ib_evt_handler_call", "ib_tasklet_call", "ib_tx_cq_event", "ib_tx_ring_full", From acdcecc61285faed359f1a3568c32089cc3a8329 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 12 Sep 2019 21:16:39 -0400 Subject: [PATCH 05/11] udp: correct reuseport selection with connected sockets UDP reuseport groups can hold a mix unconnected and connected sockets. Ensure that connections only receive all traffic to their 4-tuple. Fast reuseport returns on the first reuseport match on the assumption that all matches are equal. Only if connections are present, return to the previous behavior of scoring all sockets. Record if connections are present and if so (1) treat such connected sockets as an independent match from the group, (2) only return 2-tuple matches from reuseport and (3) do not return on the first 2-tuple reuseport match to allow for a higher scoring match later. New field has_conns is set without locks. No other fields in the bitmap are modified at runtime and the field is only ever set unconditionally, so an RMW cannot miss a change. Fixes: e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection") Link: http://lkml.kernel.org/r/CA+FuTSfRP09aJNYRt04SS6qj22ViiOEWaWmLAwX0psk8-PGNxw@mail.gmail.com Signed-off-by: Willem de Bruijn Acked-by: Paolo Abeni Acked-by: Craig Gallek Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/sock_reuseport.h | 20 +++++++++++++++++++- net/core/sock_reuseport.c | 15 +++++++++++++-- net/ipv4/datagram.c | 2 ++ net/ipv4/udp.c | 5 +++-- net/ipv6/datagram.c | 2 ++ net/ipv6/udp.c | 5 +++-- 6 files changed, 42 insertions(+), 7 deletions(-) diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index d9112de85261..43f4a818d88f 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -21,7 +21,8 @@ struct sock_reuseport { unsigned int synq_overflow_ts; /* ID stays the same even after the size of socks[] grows. */ unsigned int reuseport_id; - bool bind_inany; + unsigned int bind_inany:1; + unsigned int has_conns:1; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ struct sock *socks[0]; /* array of sock pointers */ }; @@ -37,6 +38,23 @@ extern struct sock *reuseport_select_sock(struct sock *sk, extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_detach_prog(struct sock *sk); +static inline bool reuseport_has_conns(struct sock *sk, bool set) +{ + struct sock_reuseport *reuse; + bool ret = false; + + rcu_read_lock(); + reuse = rcu_dereference(sk->sk_reuseport_cb); + if (reuse) { + if (set) + reuse->has_conns = 1; + ret = reuse->has_conns; + } + rcu_read_unlock(); + + return ret; +} + int reuseport_get_id(struct sock_reuseport *reuse); #endif /* _SOCK_REUSEPORT_H */ diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 9408f9264d05..f3ceec93f392 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk, select_by_hash: /* no bpf or invalid bpf result: fall back to hash usage */ - if (!sk2) - sk2 = reuse->socks[reciprocal_scale(hash, socks)]; + if (!sk2) { + int i, j; + + i = j = reciprocal_scale(hash, socks); + while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) { + i++; + if (i >= reuse->num_socks) + i = 0; + if (i == j) + goto out; + } + sk2 = reuse->socks[i]; + } } out: diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 7bd29e694603..9a0fe0c2fa02 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -15,6 +15,7 @@ #include #include #include +#include int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { @@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len } inet->inet_daddr = fl4->daddr; inet->inet_dport = usin->sin_port; + reuseport_has_conns(sk, true); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); inet->inet_id = jiffies; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d88821c794fb..16486c8b708b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -423,12 +423,13 @@ static struct sock *udp4_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { - if (sk->sk_reuseport) { + if (sk->sk_reuseport && + sk->sk_state != TCP_ESTABLISHED) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - if (result) + if (result && !reuseport_has_conns(sk, false)) return result; } badness = score; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 9ab897ded4df..96f939248d2f 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -254,6 +255,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, goto out; } + reuseport_has_conns(sk, true); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); out: diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 827fe7385078..5995fdc99d3f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -158,13 +158,14 @@ static struct sock *udp6_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { - if (sk->sk_reuseport) { + if (sk->sk_reuseport && + sk->sk_state != TCP_ESTABLISHED) { hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - if (result) + if (result && !reuseport_has_conns(sk, false)) return result; } result = sk; From 28e486037747c2180470b77c290d4090ad42f259 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 13 Sep 2019 17:45:47 +0800 Subject: [PATCH 06/11] ip6_gre: fix a dst leak in ip6erspan_tunnel_xmit In ip6erspan_tunnel_xmit(), if the skb will not be sent out, it has to be freed on the tx_err path. Otherwise when deleting a netns, it would cause dst/dev to leak, and dmesg shows: unregister_netdevice: waiting for lo to become free. Usage count = 1 Fixes: ef7baf5e083c ("ip6_gre: add ip6 erspan collect_md mode") Signed-off-by: Xin Long Acked-by: William Tu Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index dd2d0b963260..d5779d6a6065 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || ip_tunnel_info_af(tun_info) != AF_INET6)) - return -EINVAL; + goto tx_err; key = &tun_info->key; memset(&fl6, 0, sizeof(fl6)); From 19e13cb27b998ff49f07e399b5871bfe5ba7e3f0 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 13 Sep 2019 11:50:32 +0200 Subject: [PATCH 07/11] net: stmmac: Hold rtnl lock in suspend/resume callbacks We need to hold rnl lock in suspend and resume callbacks because phylink requires it. Otherwise we will get a WARN() in suspend and resume. Also, move phylink start and stop callbacks to inside device's internal lock so that we prevent concurrent HW accesses. Fixes: 74371272f97f ("net: stmmac: Convert to phylink and remove phylib logic") Reported-by: Christophe ROULLIER Tested-by: Christophe ROULLIER Signed-off-by: Jose Abreu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fd54c7c87485..b19ab09cb18f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4451,10 +4451,12 @@ int stmmac_suspend(struct device *dev) if (!ndev || !netif_running(ndev)) return 0; - phylink_stop(priv->phylink); - mutex_lock(&priv->lock); + rtnl_lock(); + phylink_stop(priv->phylink); + rtnl_unlock(); + netif_device_detach(ndev); stmmac_stop_all_queues(priv); @@ -4558,9 +4560,11 @@ int stmmac_resume(struct device *dev) stmmac_start_all_queues(priv); - mutex_unlock(&priv->lock); - + rtnl_lock(); phylink_start(priv->phylink); + rtnl_unlock(); + + mutex_unlock(&priv->lock); return 0; } From 655e023ed49d4f8a193945c3302784773b0ded95 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Fri, 13 Sep 2019 13:47:27 +0100 Subject: [PATCH 08/11] MAINTAINERS: xen-netback: update my email address My Citrix email address will expire shortly. Signed-off-by: Paul Durrant Acked-by: Wei Liu Acked-by: Wei Liu Signed-off-by: David S. Miller --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index a50e97a63bc8..63ab65dbc966 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17646,7 +17646,7 @@ F: Documentation/ABI/testing/sysfs-hypervisor-xen XEN NETWORK BACKEND DRIVER M: Wei Liu -M: Paul Durrant +M: Paul Durrant L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: netdev@vger.kernel.org S: Supported From 81e09359b4658fde625b59da554386179af31e33 Mon Sep 17 00:00:00 2001 From: Rain River Date: Fri, 13 Sep 2019 21:43:45 +0800 Subject: [PATCH 09/11] MAINTAINERS: update FORCEDETH MAINTAINERS info Many FORCEDETH NICs are used in our hosts. Several bugs are fixed and some features are developed for FORCEDETH NICs. And I have been reviewing patches for FORCEDETH NIC for several months. Mark me as the FORCEDETH NIC maintainer. I will send out the patches and maintain FORCEDETH NIC. Signed-off-by: Rain River Signed-off-by: David S. Miller --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 63ab65dbc966..ceeb7ceae041 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -649,6 +649,12 @@ M: Lino Sanfilippo S: Maintained F: drivers/net/ethernet/alacritech/* +FORCEDETH GIGABIT ETHERNET DRIVER +M: Rain River +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/nvidia/* + ALCATEL SPEEDTOUCH USB DRIVER M: Duncan Sands L: linux-usb@vger.kernel.org From a53651ec93a8d7ab5b26c5390e0c389048b4b4b6 Mon Sep 17 00:00:00 2001 From: Sameeh Jubran Date: Sun, 15 Sep 2019 17:29:44 +0300 Subject: [PATCH 10/11] net: ena: don't wake up tx queue when down There is a race condition that can occur when calling ena_down(). The ena_clean_tx_irq() - which is a part of the napi handler - function might wake up the tx queue when the queue is supposed to be down (during recovery or changing the size of the queues for example) This causes the ena_start_xmit() function to trigger and possibly try to access the destroyed queues. The race is illustrated below: Flow A: Flow B(napi handler) ena_down() netif_carrier_off() netif_tx_disable() ena_clean_tx_irq() netif_tx_wake_queue() ena_napi_disable_all() ena_destroy_all_io_queues() After these flows the tx queue is active and ena_start_xmit() accesses the destroyed queue which leads to a kernel panic. fixes: 1738cd3ed342 (net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)) Signed-off-by: Sameeh Jubran Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 664e3ed97ea9..d118ed4c57ce 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -823,7 +823,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, ENA_TX_WAKEUP_THRESH); - if (netif_tx_queue_stopped(txq) && above_thresh) { + if (netif_tx_queue_stopped(txq) && above_thresh && + test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { netif_tx_wake_queue(txq); u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.queue_wakeup++; From 00b368502d18f790ab715e055869fd4bb7484a9b Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Mon, 16 Sep 2019 11:46:59 +0800 Subject: [PATCH 11/11] xen-netfront: do not assume sk_buff_head list is empty in error handling When skb_shinfo(skb) is not able to cache extra fragment (that is, skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS), xennet_fill_frags() assumes the sk_buff_head list is already empty. As a result, cons is increased only by 1 and returns to error handling path in xennet_poll(). However, if the sk_buff_head list is not empty, queue->rx.rsp_cons may be set incorrectly. That is, queue->rx.rsp_cons would point to the rx ring buffer entries whose queue->rx_skbs[i] and queue->grant_rx_ref[i] are already cleared to NULL. This leads to NULL pointer access in the next iteration to process rx ring buffer entries. Below is how xennet_poll() does error handling. All remaining entries in tmpq are accounted to queue->rx.rsp_cons without assuming how many outstanding skbs are remained in the list. 985 static int xennet_poll(struct napi_struct *napi, int budget) ... ... 1032 if (unlikely(xennet_set_skb_gso(skb, gso))) { 1033 __skb_queue_head(&tmpq, skb); 1034 queue->rx.rsp_cons += skb_queue_len(&tmpq); 1035 goto err; 1036 } It is better to always have the error handling in the same way. Fixes: ad4f15dc2c70 ("xen/netfront: don't bug in case of too many frags") Signed-off-by: Dongli Zhang Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8d33970a2950..5f5722bf6762 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { - queue->rx.rsp_cons = ++cons; + queue->rx.rsp_cons = ++cons + skb_queue_len(list); kfree_skb(nskb); return ~0U; }