qtnfmac: send EAPOL frames via control path

Send EAPOL frames via control path so they can be treated in a different
way rather than normal data frames. In this case EAPOLs are sent with
higher priority and with disabled aggregation and encryption. Besides,
all devices benefit from sending EAPOL frames via high priority path,
so move the functionality from chip specific to common code.

Signed-off-by: Igor Mitsyanko <igor.mitsyanko.os@quantenna.com>
Signed-off-by: Sergey Matyukevich <sergey.matyukevich.os@quantenna.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
This commit is contained in:
Sergey Matyukevich 2019-11-13 11:06:55 +00:00 committed by Kalle Valo
parent 97aef03cb7
commit 46d55fcec1
5 changed files with 14 additions and 20 deletions

View File

@ -67,6 +67,14 @@ static int qtnf_netdev_close(struct net_device *ndev)
return 0;
}
static void qtnf_packet_send_hi_pri(struct sk_buff *skb)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
skb_queue_tail(&vif->high_pri_tx_queue, skb);
queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
}
/* Netdev handler for data transmission.
*/
static netdev_tx_t
@ -107,6 +115,12 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* tx path is enabled: reset vif timeout */
vif->cons_tx_timeout_cnt = 0;
if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
qtnf_packet_send_hi_pri(skb);
qtnf_update_tx_stats(ndev, skb);
return NETDEV_TX_OK;
}
return qtnf_bus_data_tx(mac->bus, skb);
}
@ -841,15 +855,6 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
void qtnf_packet_send_hi_pri(struct sk_buff *skb)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
skb_queue_tail(&vif->high_pri_tx_queue, skb);
queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
}
EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri);
struct dentry *qtnf_get_debugfs_dir(void)
{
return qtnf_debugfs_dir;

View File

@ -152,7 +152,6 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
void qtnf_packet_send_hi_pri(struct sk_buff *skb);
struct dentry *qtnf_get_debugfs_dir(void);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)

View File

@ -357,7 +357,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->pcie_irq_count = 0;
pcie_priv->tx_reclaim_done = 0;
pcie_priv->tx_reclaim_req = 0;
pcie_priv->tx_eapol = 0;
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {

View File

@ -63,7 +63,6 @@ struct qtnf_pcie_bus_priv {
u32 tx_done_count;
u32 tx_reclaim_done;
u32 tx_reclaim_req;
u32 tx_eapol;
u8 msi_enabled;
u8 tx_stopped;

View File

@ -509,13 +509,6 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
int len;
int i;
if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
qtnf_packet_send_hi_pri(skb);
qtnf_update_tx_stats(skb->dev, skb);
priv->tx_eapol++;
return NETDEV_TX_OK;
}
spin_lock_irqsave(&priv->tx_lock, flags);
if (!qtnf_tx_queue_ready(ts)) {
@ -779,7 +772,6 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
seq_printf(s, "tx_done_index(%u)\n", tx_done_index);