mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 03:06:46 +07:00
net: ethernet: sxgbe: remove private tx queue lock
The driver uses a private lock for synchronization of the xmit function and the xmit completion handler, but since the NETIF_F_LLTX flag is not set, the xmit function is also called with the xmit_lock held. On the other hand the completion handler uses the reverse locking order by first taking the private lock and (in case that the tx queue had been stopped) then the xmit_lock. Improve the locking by removing the private lock and using only the xmit_lock for synchronization instead. Signed-off-by: Lino Sanfilippo <LinoSanfilippo@gmx.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c280b48266
commit
980f140493
@ -384,7 +384,6 @@ struct sxgbe_tx_queue {
|
||||
dma_addr_t *tx_skbuff_dma;
|
||||
struct sk_buff **tx_skbuff;
|
||||
struct timer_list txtimer;
|
||||
spinlock_t tx_lock; /* lock for tx queues */
|
||||
unsigned int cur_tx;
|
||||
unsigned int dirty_tx;
|
||||
u32 tx_count_frames;
|
||||
|
@ -426,9 +426,6 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
|
||||
tx_ring->dirty_tx = 0;
|
||||
tx_ring->cur_tx = 0;
|
||||
|
||||
/* initialise TX queue lock */
|
||||
spin_lock_init(&tx_ring->tx_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
dmamem_err:
|
||||
@ -743,7 +740,7 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
|
||||
|
||||
dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
|
||||
|
||||
spin_lock(&tqueue->tx_lock);
|
||||
__netif_tx_lock(dev_txq, smp_processor_id());
|
||||
|
||||
priv->xstats.tx_clean++;
|
||||
while (tqueue->dirty_tx != tqueue->cur_tx) {
|
||||
@ -781,18 +778,13 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
|
||||
|
||||
/* wake up queue */
|
||||
if (unlikely(netif_tx_queue_stopped(dev_txq) &&
|
||||
sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
|
||||
netif_tx_lock(priv->dev);
|
||||
if (netif_tx_queue_stopped(dev_txq) &&
|
||||
sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
|
||||
if (netif_msg_tx_done(priv))
|
||||
pr_debug("%s: restart transmit\n", __func__);
|
||||
netif_tx_wake_queue(dev_txq);
|
||||
}
|
||||
netif_tx_unlock(priv->dev);
|
||||
sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
|
||||
if (netif_msg_tx_done(priv))
|
||||
pr_debug("%s: restart transmit\n", __func__);
|
||||
netif_tx_wake_queue(dev_txq);
|
||||
}
|
||||
|
||||
spin_unlock(&tqueue->tx_lock);
|
||||
__netif_tx_unlock(dev_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1304,9 +1296,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
tqueue->hwts_tx_en)))
|
||||
ctxt_desc_req = 1;
|
||||
|
||||
/* get the spinlock */
|
||||
spin_lock(&tqueue->tx_lock);
|
||||
|
||||
if (priv->tx_path_in_lpi_mode)
|
||||
sxgbe_disable_eee_mode(priv);
|
||||
|
||||
@ -1316,8 +1305,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
|
||||
__func__, txq_index);
|
||||
}
|
||||
/* release the spin lock in case of BUSY */
|
||||
spin_unlock(&tqueue->tx_lock);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
@ -1436,8 +1423,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
|
||||
|
||||
spin_unlock(&tqueue->tx_lock);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user