mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-03-05 23:47:25 +07:00
ixgbe: delay tail write to every 'n' packets
Current XDP implementation hits the tail on every XDP_TX return code. This patch changes driver behavior to only hit the tail after packet processing is complete. With this patch I can run XDP drop programs @ 14+Mpps and XDP_TX programs are at ~13.5Mpps. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
33fdc82f08
commit
7379f97a4f
@ -2283,6 +2283,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|||||||
unsigned int mss = 0;
|
unsigned int mss = 0;
|
||||||
#endif /* IXGBE_FCOE */
|
#endif /* IXGBE_FCOE */
|
||||||
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
|
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
|
||||||
|
bool xdp_xmit = false;
|
||||||
|
|
||||||
while (likely(total_rx_packets < budget)) {
|
while (likely(total_rx_packets < budget)) {
|
||||||
union ixgbe_adv_rx_desc *rx_desc;
|
union ixgbe_adv_rx_desc *rx_desc;
|
||||||
@ -2322,10 +2323,12 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ERR(skb)) {
|
if (IS_ERR(skb)) {
|
||||||
if (PTR_ERR(skb) == -IXGBE_XDP_TX)
|
if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
|
||||||
|
xdp_xmit = true;
|
||||||
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
|
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
|
||||||
else
|
} else {
|
||||||
rx_buffer->pagecnt_bias++;
|
rx_buffer->pagecnt_bias++;
|
||||||
|
}
|
||||||
total_rx_packets++;
|
total_rx_packets++;
|
||||||
total_rx_bytes += size;
|
total_rx_bytes += size;
|
||||||
} else if (skb) {
|
} else if (skb) {
|
||||||
@ -2393,6 +2396,16 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|||||||
total_rx_packets++;
|
total_rx_packets++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (xdp_xmit) {
|
||||||
|
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
|
||||||
|
|
||||||
|
/* Force memory writes to complete before letting h/w
|
||||||
|
* know there are new descriptors to fetch.
|
||||||
|
*/
|
||||||
|
wmb();
|
||||||
|
writel(ring->next_to_use, ring->tail);
|
||||||
|
}
|
||||||
|
|
||||||
u64_stats_update_begin(&rx_ring->syncp);
|
u64_stats_update_begin(&rx_ring->syncp);
|
||||||
rx_ring->stats.packets += total_rx_packets;
|
rx_ring->stats.packets += total_rx_packets;
|
||||||
rx_ring->stats.bytes += total_rx_bytes;
|
rx_ring->stats.bytes += total_rx_bytes;
|
||||||
@ -8238,14 +8251,8 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
|
|||||||
tx_desc->read.olinfo_status =
|
tx_desc->read.olinfo_status =
|
||||||
cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
|
cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
|
||||||
|
|
||||||
/* Force memory writes to complete before letting h/w know there
|
/* Avoid any potential race with xdp_xmit and cleanup */
|
||||||
* are new descriptors to fetch. (Only applicable for weak-ordered
|
smp_wmb();
|
||||||
* memory model archs, such as IA-64).
|
|
||||||
*
|
|
||||||
* We also need this memory barrier to make certain all of the
|
|
||||||
* status bits have been updated before next_to_watch is written.
|
|
||||||
*/
|
|
||||||
wmb();
|
|
||||||
|
|
||||||
/* set next_to_watch value indicating a packet is present */
|
/* set next_to_watch value indicating a packet is present */
|
||||||
i++;
|
i++;
|
||||||
@ -8255,7 +8262,6 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
|
|||||||
tx_buffer->next_to_watch = tx_desc;
|
tx_buffer->next_to_watch = tx_desc;
|
||||||
ring->next_to_use = i;
|
ring->next_to_use = i;
|
||||||
|
|
||||||
writel(i, ring->tail);
|
|
||||||
return IXGBE_XDP_TX;
|
return IXGBE_XDP_TX;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user