mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
ath10k: dma unmap mgmt tx buffer if wmi cmd send fails
WCN3990 sends mgmt frames by reference via WMI. The host dma maps the mgmt frame and sends the physical address to the firmware in the wmi command. Since the dma mapping is done in the gen_mgmt_tx and if the wmi command send fails, the corresponding mgmt frame is not being dma unmapped. Fix the missing dma unmapping of mgmt tx frame when wmi command sending fails for mgmt tx by reference via WMI. The already exisiting mgmt tx using copy by value does not need such dma unmapping. Add a separate wmi-tlv op for mgmt tx via ref, which takes care of unmapping the dma address, in case of wmi command sending failure. Signed-off-by: Rakesh Pillai <pillair@codeaurora.org> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
This commit is contained in:
parent
3a3b745f1e
commit
38a1390e02
@ -1,6 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
||||||
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
|
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
|
||||||
|
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission to use, copy, modify, and/or distribute this software for any
|
* Permission to use, copy, modify, and/or distribute this software for any
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
@ -3808,6 +3809,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
|
struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
dma_addr_t paddr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
@ -3815,11 +3817,27 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
|
|||||||
if (!skb)
|
if (!skb)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
ret = ath10k_wmi_mgmt_tx(ar, skb);
|
if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
|
||||||
if (ret) {
|
ar->running_fw->fw_file.fw_features)) {
|
||||||
ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
|
paddr = dma_map_single(ar->dev, skb->data,
|
||||||
ret);
|
skb->len, DMA_TO_DEVICE);
|
||||||
ieee80211_free_txskb(ar->hw, skb);
|
if (!paddr)
|
||||||
|
continue;
|
||||||
|
ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
|
||||||
|
if (ret) {
|
||||||
|
ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
|
||||||
|
ret);
|
||||||
|
dma_unmap_single(ar->dev, paddr, skb->len,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
ieee80211_free_txskb(ar->hw, skb);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret = ath10k_wmi_mgmt_tx(ar, skb);
|
||||||
|
if (ret) {
|
||||||
|
ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
|
||||||
|
ret);
|
||||||
|
ieee80211_free_txskb(ar->hw, skb);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
||||||
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
|
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
|
||||||
|
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission to use, copy, modify, and/or distribute this software for any
|
* Permission to use, copy, modify, and/or distribute this software for any
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
@ -125,6 +126,9 @@ struct wmi_ops {
|
|||||||
enum wmi_force_fw_hang_type type,
|
enum wmi_force_fw_hang_type type,
|
||||||
u32 delay_ms);
|
u32 delay_ms);
|
||||||
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
|
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
|
||||||
|
struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
|
||||||
|
struct sk_buff *skb,
|
||||||
|
dma_addr_t paddr);
|
||||||
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
|
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
|
||||||
u32 log_level);
|
u32 log_level);
|
||||||
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
|
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
|
||||||
@ -371,13 +375,34 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
|
|||||||
return ar->wmi.ops->get_txbf_conf_scheme(ar);
|
return ar->wmi.ops->get_txbf_conf_scheme(ar);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
|
||||||
|
dma_addr_t paddr)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!ar->wmi.ops->gen_mgmt_tx_send)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
|
||||||
|
if (IS_ERR(skb))
|
||||||
|
return PTR_ERR(skb);
|
||||||
|
|
||||||
|
ret = ath10k_wmi_cmd_send(ar, skb,
|
||||||
|
ar->wmi.cmd->mgmt_tx_send_cmdid);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
||||||
{
|
{
|
||||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int ret;
|
int ret;
|
||||||
u32 mgmt_tx_cmdid;
|
|
||||||
|
|
||||||
if (!ar->wmi.ops->gen_mgmt_tx)
|
if (!ar->wmi.ops->gen_mgmt_tx)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
@ -386,13 +411,8 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
|||||||
if (IS_ERR(skb))
|
if (IS_ERR(skb))
|
||||||
return PTR_ERR(skb);
|
return PTR_ERR(skb);
|
||||||
|
|
||||||
if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
|
ret = ath10k_wmi_cmd_send(ar, skb,
|
||||||
ar->running_fw->fw_file.fw_features))
|
ar->wmi.cmd->mgmt_tx_cmdid);
|
||||||
mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid;
|
|
||||||
else
|
|
||||||
mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid;
|
|
||||||
|
|
||||||
ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
||||||
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
|
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
|
||||||
|
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission to use, copy, modify, and/or distribute this software for any
|
* Permission to use, copy, modify, and/or distribute this software for any
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
@ -2484,19 +2485,19 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_buff *
|
static struct sk_buff *
|
||||||
ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
|
||||||
|
dma_addr_t paddr)
|
||||||
{
|
{
|
||||||
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
|
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
|
||||||
struct wmi_tlv_mgmt_tx_cmd *cmd;
|
struct wmi_tlv_mgmt_tx_cmd *cmd;
|
||||||
struct wmi_tlv *tlv;
|
|
||||||
struct ieee80211_hdr *hdr;
|
struct ieee80211_hdr *hdr;
|
||||||
|
struct ath10k_vif *arvif;
|
||||||
|
u32 buf_len = msdu->len;
|
||||||
|
struct wmi_tlv *tlv;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
u32 vdev_id;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
int len;
|
int len;
|
||||||
u32 buf_len = msdu->len;
|
|
||||||
struct ath10k_vif *arvif;
|
|
||||||
dma_addr_t mgmt_frame_dma;
|
|
||||||
u32 vdev_id;
|
|
||||||
|
|
||||||
if (!cb->vif)
|
if (!cb->vif)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
@ -2537,12 +2538,7 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
|||||||
cmd->chanfreq = 0;
|
cmd->chanfreq = 0;
|
||||||
cmd->buf_len = __cpu_to_le32(buf_len);
|
cmd->buf_len = __cpu_to_le32(buf_len);
|
||||||
cmd->frame_len = __cpu_to_le32(msdu->len);
|
cmd->frame_len = __cpu_to_le32(msdu->len);
|
||||||
mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
|
cmd->paddr = __cpu_to_le64(paddr);
|
||||||
msdu->len, DMA_TO_DEVICE);
|
|
||||||
if (!mgmt_frame_dma)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
cmd->paddr = __cpu_to_le64(mgmt_frame_dma);
|
|
||||||
|
|
||||||
ptr += sizeof(*tlv);
|
ptr += sizeof(*tlv);
|
||||||
ptr += sizeof(*cmd);
|
ptr += sizeof(*cmd);
|
||||||
@ -3701,7 +3697,7 @@ static const struct wmi_ops wmi_tlv_ops = {
|
|||||||
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
|
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
|
||||||
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
|
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
|
||||||
/* .gen_mgmt_tx = not implemented; HTT is used */
|
/* .gen_mgmt_tx = not implemented; HTT is used */
|
||||||
.gen_mgmt_tx = ath10k_wmi_tlv_op_gen_mgmt_tx,
|
.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
|
||||||
.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
|
.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
|
||||||
.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
|
.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
|
||||||
.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
|
.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
|
||||||
|
Loading…
Reference in New Issue
Block a user