mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 08:26:53 +07:00
mt76: add skb pointer to mt76_tx_info
Pass skb pointer to tx_prepare_skb through mt76_tx_info data structure. This is a preliminary patch to properly support dma error path for new chipsets (e.g. 7615) Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
f3950a4141
commit
cfaae9e67c
@ -290,7 +290,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt76_queue *q = dev->q_tx[qid].q;
|
||||
struct mt76_tx_info tx_info = {};
|
||||
struct mt76_tx_info tx_info = {
|
||||
.skb = skb,
|
||||
};
|
||||
int len, n = 0, ret = -ENOMEM;
|
||||
struct mt76_queue_entry e;
|
||||
struct mt76_txwi_cache *t;
|
||||
@ -335,8 +337,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
|
||||
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
|
||||
DMA_TO_DEVICE);
|
||||
ret = dev->drv->tx_prepare_skb(dev, txwi, skb, qid, wcid, sta,
|
||||
&tx_info);
|
||||
ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
|
||||
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (ret < 0)
|
||||
@ -348,7 +349,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
}
|
||||
|
||||
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
|
||||
tx_info.info, skb, t);
|
||||
tx_info.info, tx_info.skb, t);
|
||||
|
||||
unmap:
|
||||
for (n--; n > 0; n--)
|
||||
@ -356,7 +357,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
tx_info.buf[n].len, DMA_TO_DEVICE);
|
||||
|
||||
free:
|
||||
e.skb = skb;
|
||||
e.skb = tx_info.skb;
|
||||
e.txwi = t;
|
||||
dev->drv->tx_complete_skb(dev, qid, &e);
|
||||
mt76_put_txwi(dev, t);
|
||||
|
@ -85,6 +85,7 @@ struct mt76_queue_buf {
|
||||
|
||||
struct mt76_tx_info {
|
||||
struct mt76_queue_buf buf[32];
|
||||
struct sk_buff *skb;
|
||||
int nbuf;
|
||||
u32 info;
|
||||
};
|
||||
@ -291,8 +292,7 @@ struct mt76_driver_ops {
|
||||
void (*update_survey)(struct mt76_dev *dev);
|
||||
|
||||
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
|
||||
|
@ -912,13 +912,13 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
}
|
||||
|
||||
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
int pid;
|
||||
|
||||
@ -934,7 +934,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
mt7603_wtbl_set_ps(dev, msta, false);
|
||||
}
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
@ -944,7 +944,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
mt7603_mac_write_txwi(dev, txwi_ptr, skb, qid, wcid, sta, pid, key);
|
||||
mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
|
||||
sta, pid, key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -221,8 +221,8 @@ void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
|
||||
void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
|
||||
|
||||
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
|
@ -183,8 +183,8 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
|
||||
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb);
|
||||
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
const u8 *mac);
|
||||
|
@ -147,12 +147,12 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
|
||||
EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
|
||||
|
||||
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
|
||||
struct mt76x02_txwi *txwi = txwi_ptr;
|
||||
int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
|
||||
|
||||
@ -160,10 +160,10 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
|
||||
|
||||
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
len = skb->len - (hdrlen & 2);
|
||||
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
|
||||
len = tx_info->skb->len - (hdrlen & 2);
|
||||
mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
txwi->pktid = pid;
|
||||
|
||||
if (pid >= MT_PACKET_ID_FIRST)
|
||||
|
@ -26,8 +26,8 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
|
||||
|
||||
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
|
||||
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
|
@ -72,23 +72,23 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
|
||||
}
|
||||
|
||||
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
|
||||
int pid, len = skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
|
||||
int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
|
||||
struct mt76x02_txwi *txwi;
|
||||
enum mt76_qsel qsel;
|
||||
u32 flags;
|
||||
|
||||
mt76_insert_hdr_pad(skb);
|
||||
mt76_insert_hdr_pad(tx_info->skb);
|
||||
|
||||
txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
|
||||
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
|
||||
skb_push(skb, sizeof(struct mt76x02_txwi));
|
||||
txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
|
||||
mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
|
||||
skb_push(tx_info->skb, sizeof(*txwi));
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
txwi->pktid = pid;
|
||||
|
||||
if (pid >= MT_PACKET_ID_FIRST || ep == MT_EP_OUT_HCCA)
|
||||
@ -101,7 +101,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
|
||||
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
|
||||
flags |= MT_TXD_INFO_WIV;
|
||||
|
||||
return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
|
||||
return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
|
||||
|
||||
|
@ -734,7 +734,9 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt76_queue *q = dev->q_tx[qid].q;
|
||||
struct urb *urb;
|
||||
struct mt76_tx_info tx_info = {
|
||||
.skb = skb,
|
||||
};
|
||||
u16 idx = q->tail;
|
||||
int err;
|
||||
|
||||
@ -742,20 +744,20 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
return -ENOSPC;
|
||||
|
||||
skb->prev = skb->next = NULL;
|
||||
err = dev->drv->tx_prepare_skb(dev, NULL, skb, qid, wcid, sta, NULL);
|
||||
err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
urb = q->entry[idx].urb;
|
||||
err = mt76u_tx_setup_buffers(dev, skb, urb);
|
||||
err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
|
||||
urb, mt76u_complete_tx, &q->entry[idx]);
|
||||
q->entry[idx].urb, mt76u_complete_tx,
|
||||
&q->entry[idx]);
|
||||
|
||||
q->tail = (q->tail + 1) % q->ndesc;
|
||||
q->entry[idx].skb = skb;
|
||||
q->entry[idx].skb = tx_info.skb;
|
||||
q->queued++;
|
||||
|
||||
return idx;
|
||||
|
Loading…
Reference in New Issue
Block a user