r8169: make use of xmit_more

There was a previous attempt to use xmit_more, but the change had to be
reverted because under load sometimes a transmit timeout occurred [0].
Maybe this was caused by a missing memory barrier, the new attempt
keeps the memory barrier before the call to netif_stop_queue like it
is used by the driver as of today. The new attempt also changes the
order of some calls as suggested by Eric.

[0] https://lkml.org/lkml/2019/2/10/39

Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Heiner Kallweit 2019-07-28 11:25:19 +02:00 committed by David S. Miller
parent 171a9bae68
commit ef14358546

View File

@ -5641,6 +5641,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct device *d = tp_to_dev(tp); struct device *d = tp_to_dev(tp);
dma_addr_t mapping; dma_addr_t mapping;
u32 opts[2], len; u32 opts[2], len;
bool stop_queue;
bool door_bell;
int frags; int frags;
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
@ -5684,13 +5686,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]); txd->opts2 = cpu_to_le32(opts[1]);
netdev_sent_queue(dev, skb->len);
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */ /* Force memory writes to complete before releasing descriptor */
dma_wmb(); dma_wmb();
door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry); txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
/* Force all memory writes to complete before notifying device */ /* Force all memory writes to complete before notifying device */
@ -5698,14 +5700,19 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1; tp->cur_tx += frags + 1;
RTL_W8(tp, TxPoll, NPQ); stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
if (unlikely(stop_queue)) {
if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue. * not miss a ring update when it notices a stopped queue.
*/ */
smp_wmb(); smp_wmb();
netif_stop_queue(dev); netif_stop_queue(dev);
}
if (door_bell)
RTL_W8(tp, TxPoll, NPQ);
if (unlikely(stop_queue)) {
/* Sync with rtl_tx: /* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier) * - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier). * - refresh dirty_tx ring index (read barrier).