Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Fixes 2019-02-21

This series contains fixes to ixgbe and i40e.

Majority of the fixes are to resolve XDP issues found in both drivers,
there is only one fix which is not XDP related.  That one fix resolves
an issue seen on older 10GbE devices, where UDP traffic was either being
dropped or being transmitted out of order when the bit to enable L3/L4
filtering for transmit switched packets is enabled on older devices that
did not support this option.

Magnus fixes an XDP issue for both ixgbe and i40e, where receive rings
are created but no buffers are allocated for AF_XDP in zero-copy mode,
so no packets can be received and no interrupts will be generated so
that NAPI poll function that allocates buffers to the rings will never
get executed.

Björn fixes a race in XDP xmit ring cleanup for i40e, where
ndo_xdp_xmit() must be taken into consideration.  Added a
synchronize_rcu() to wait for napi(s) before clearing the queue.

Jan fixes a ixgbe AF_XDP zero-copy transmit issue which can cause a
reset to be triggered, so add a check to ensure that netif carrier is
'ok' before trying to transmit packets.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-02-21 12:21:37 -08:00
commit 033575ecfc
5 changed files with 60 additions and 10 deletions

View File

@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
!i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
if (!ok) {
/* Log this in case the user has forgotten to give the kernel
* any buffers, even later in the application.
*/
dev_info(&vsi->back->pdev->dev,
"Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
"Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
ring->xsk_umem ? "UMEM enabled " : "",
ring->queue_index, pf_q);
}
@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
if (i40e_enabled_xdp_vsi(vsi))
if (i40e_enabled_xdp_vsi(vsi)) {
/* Make sure that in-progress ndo_xdp_xmit
* calls are completed.
*/
synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]);
}
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
if (old_prog)
bpf_prog_put(old_prog);
/* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start.
*/
if (need_reset && prog)
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->xdp_rings[i]->xsk_umem)
(void)i40e_xsk_async_xmit(vsi->netdev, i);
return 0;
}
@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
{
i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
if (i40e_enabled_xdp_vsi(vsi))
if (i40e_enabled_xdp_vsi(vsi)) {
/* Make sure that in-progress ndo_xdp_xmit calls are
* completed.
*/
synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
}
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
}

View File

@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ring *xdp_ring;
int drops = 0;
int i;
@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
test_bit(__I40E_CONFIG_BUSY, pf->state))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))

View File

@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
/* Kick start the NAPI context so that receiving will start */
err = i40e_xsk_async_xmit(vsi->netdev, qid);
if (err)
return err;
}
return 0;

View File

@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
/* Enable L3/L4 for Tx Switched packets */
mrqc |= IXGBE_MRQC_L3L4TXSWEN;
/* Enable L3/L4 for Tx Switched packets only for X550,
* older devices do not support this feature
*/
if (hw->mac.type >= ixgbe_mac_X550)
mrqc |= IXGBE_MRQC_L3L4TXSWEN;
} else {
if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct bpf_prog *old_prog;
bool need_reset;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return -EINVAL;
@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return -ENOMEM;
old_prog = xchg(&adapter->xdp_prog, prog);
need_reset = (!!prog != !!old_prog);
/* If transitioning XDP modes reconfigure rings */
if (!!prog != !!old_prog) {
if (need_reset) {
int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (old_prog)
bpf_prog_put(old_prog);
/* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start.
*/
if (need_reset && prog)
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->xdp_ring[i]->xsk_umem)
(void)ixgbe_xsk_async_xmit(adapter->netdev, i);
return 0;
}

View File

@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
ixgbe_txrx_ring_disable(adapter, qid);
err = ixgbe_add_xsk_umem(adapter, umem, qid);
if (err)
return err;
if (if_running)
if (if_running) {
ixgbe_txrx_ring_enable(adapter, qid);
return err;
/* Kick start the NAPI context so that receiving will start */
err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
if (err)
return err;
}
return 0;
}
static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
!netif_carrier_ok(xdp_ring->netdev)) {
work_done = false;
break;
}