mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 14:41:02 +07:00
net: add netif_tx_queue_frozen_or_stopped
When testing struct netdev_queue state against FROZEN bit, we also test XOFF bit. We can test both bits at once and save some cycles. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d3c15cab21
commit
5a0d2268d2
@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n)
|
||||
enum netdev_queue_state_t {
|
||||
__QUEUE_STATE_XOFF,
|
||||
__QUEUE_STATE_FROZEN,
|
||||
#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
|
||||
(1 << __QUEUE_STATE_FROZEN))
|
||||
};
|
||||
|
||||
struct netdev_queue {
|
||||
@ -1629,9 +1631,9 @@ static inline int netif_queue_stopped(const struct net_device *dev)
|
||||
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
|
||||
}
|
||||
|
||||
static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
|
||||
static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
|
||||
{
|
||||
return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
|
||||
return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -76,8 +76,7 @@ static void queue_process(struct work_struct *work)
|
||||
|
||||
local_irq_save(flags);
|
||||
__netif_tx_lock(txq, smp_processor_id());
|
||||
if (netif_tx_queue_stopped(txq) ||
|
||||
netif_tx_queue_frozen(txq) ||
|
||||
if (netif_tx_queue_frozen_or_stopped(txq) ||
|
||||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
|
||||
skb_queue_head(&npinfo->txq, skb);
|
||||
__netif_tx_unlock(txq);
|
||||
|
@ -3527,7 +3527,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||
|
||||
__netif_tx_lock_bh(txq);
|
||||
|
||||
if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) {
|
||||
if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
|
||||
ret = NETDEV_TX_BUSY;
|
||||
pkt_dev->last_ok = 0;
|
||||
goto unlock;
|
||||
|
@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
|
||||
|
||||
/* check the reason of requeuing without tx lock first */
|
||||
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
||||
if (!netif_tx_queue_stopped(txq) &&
|
||||
!netif_tx_queue_frozen(txq)) {
|
||||
if (!netif_tx_queue_frozen_or_stopped(txq)) {
|
||||
q->gso_skb = NULL;
|
||||
q->q.qlen--;
|
||||
} else
|
||||
@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
|
||||
spin_unlock(root_lock);
|
||||
|
||||
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||
if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
|
||||
if (!netif_tx_queue_frozen_or_stopped(txq))
|
||||
ret = dev_hard_start_xmit(skb, dev, txq);
|
||||
|
||||
HARD_TX_UNLOCK(dev, txq);
|
||||
@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
|
||||
ret = dev_requeue_skb(skb, q);
|
||||
}
|
||||
|
||||
if (ret && (netif_tx_queue_stopped(txq) ||
|
||||
netif_tx_queue_frozen(txq)))
|
||||
if (ret && netif_tx_queue_frozen_or_stopped(txq))
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
|
@ -309,8 +309,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (__netif_tx_trylock(slave_txq)) {
|
||||
unsigned int length = qdisc_pkt_len(skb);
|
||||
|
||||
if (!netif_tx_queue_stopped(slave_txq) &&
|
||||
!netif_tx_queue_frozen(slave_txq) &&
|
||||
if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
|
||||
slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
|
||||
txq_trans_update(slave_txq);
|
||||
__netif_tx_unlock(slave_txq);
|
||||
|
Loading…
Reference in New Issue
Block a user