Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for v5.8. Major changes:

ath11k

* add 802.11 encapsulation offload on hardware support

* add htt_peer_stats_reset debugfs file

ath10k

* sdio: decrease power consumption

* sdio: add HTT TX bundle support to increase throughput

* sdio: add rx bitrate reporting

ath9k

* improvements to AR9002 calibration logic

carl9170

* remove buggy P2P_GO support
This commit is contained in:
Kalle Valo 2020-05-06 12:12:27 +03:00
commit 7f65f6118a
54 changed files with 1802 additions and 302 deletions

View File

@ -96,6 +96,17 @@ Optional properties:
- qcom,coexist-gpio-pin : gpio pin number information to support coex - qcom,coexist-gpio-pin : gpio pin number information to support coex
which will be used by wifi firmware. which will be used by wifi firmware.
* Subnodes
The ath10k wifi node can contain one optional firmware subnode.
Firmware subnode is needed when the platform does not have TustZone.
The firmware subnode must have:
- iommus:
Usage: required
Value type: <prop-encoded-array>
Definition: A list of phandle and IOMMU specifier pairs.
Example (to supply PCI based wifi block details): Example (to supply PCI based wifi block details):
In this example, the node is defined as child node of the PCI controller. In this example, the node is defined as child node of the PCI controller.
@ -196,4 +207,7 @@ wifi@18000000 {
memory-region = <&wifi_msa_mem>; memory-region = <&wifi_msa_mem>;
iommus = <&apps_smmu 0x0040 0x1>; iommus = <&apps_smmu 0x0040 0x1>;
qcom,msa-fixed-perm; qcom,msa-fixed-perm;
wifi-firmware {
iommus = <&apps_iommu 0xc22 0x1>;
};
}; };

View File

@ -380,6 +380,7 @@ static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 l
NULL, NULL); NULL, NULL);
if (ret) { if (ret) {
ath10k_warn(ar, "unable to write to the device\n"); ath10k_warn(ar, "unable to write to the device\n");
kfree(cmd);
return ret; return ret;
} }

View File

@ -419,7 +419,7 @@ struct ce_pipe_config {
#define PIPEDIR_INOUT 3 /* bidirectional */ #define PIPEDIR_INOUT 3 /* bidirectional */
/* Establish a mapping between a service/direction and a pipe. */ /* Establish a mapping between a service/direction and a pipe. */
struct service_to_pipe { struct ce_service_to_pipe {
__le32 service_id; __le32 service_id;
__le32 pipedir; __le32 pipedir;
__le32 pipenum; __le32 pipenum;

View File

@ -190,6 +190,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin_workaround = true, .uart_pin_workaround = true,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.bmi_large_size_download = true, .bmi_large_size_download = true,
.supports_peer_stats_info = true,
}, },
{ {
.id = QCA6174_HW_2_1_VERSION, .id = QCA6174_HW_2_1_VERSION,
@ -725,10 +726,10 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET; param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
/* Alternate credit size of 1544 as used by SDIO firmware is if (mode == ATH10K_FIRMWARE_MODE_NORMAL)
* not big enough for mac80211 / native wifi frames. disable it param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
*/ else
param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
if (mode == ATH10K_FIRMWARE_MODE_UTF) if (mode == ATH10K_FIRMWARE_MODE_UTF)
param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET; param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
@ -2714,7 +2715,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop; goto err_hif_stop;
} }
status = ath10k_hif_swap_mailbox(ar); status = ath10k_hif_start_post(ar);
if (status) { if (status) {
ath10k_err(ar, "failed to swap mailbox: %d\n", status); ath10k_err(ar, "failed to swap mailbox: %d\n", status);
goto err_hif_stop; goto err_hif_stop;
@ -3277,6 +3278,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->thermal.wmi_sync); init_completion(&ar->thermal.wmi_sync);
init_completion(&ar->bss_survey_done); init_completion(&ar->bss_survey_done);
init_completion(&ar->peer_delete_done); init_completion(&ar->peer_delete_done);
init_completion(&ar->peer_stats_info_complete);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work); INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
@ -3288,6 +3290,11 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
if (!ar->workqueue_aux) if (!ar->workqueue_aux)
goto err_free_wq; goto err_free_wq;
ar->workqueue_tx_complete =
create_singlethread_workqueue("ath10k_tx_complete_wq");
if (!ar->workqueue_tx_complete)
goto err_free_aux_wq;
mutex_init(&ar->conf_mutex); mutex_init(&ar->conf_mutex);
mutex_init(&ar->dump_mutex); mutex_init(&ar->dump_mutex);
spin_lock_init(&ar->data_lock); spin_lock_init(&ar->data_lock);
@ -3315,7 +3322,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ret = ath10k_coredump_create(ar); ret = ath10k_coredump_create(ar);
if (ret) if (ret)
goto err_free_aux_wq; goto err_free_tx_complete;
ret = ath10k_debug_create(ar); ret = ath10k_debug_create(ar);
if (ret) if (ret)
@ -3325,12 +3332,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
err_free_coredump: err_free_coredump:
ath10k_coredump_destroy(ar); ath10k_coredump_destroy(ar);
err_free_tx_complete:
destroy_workqueue(ar->workqueue_tx_complete);
err_free_aux_wq: err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux); destroy_workqueue(ar->workqueue_aux);
err_free_wq: err_free_wq:
destroy_workqueue(ar->workqueue); destroy_workqueue(ar->workqueue);
err_free_mac: err_free_mac:
ath10k_mac_destroy(ar); ath10k_mac_destroy(ar);
@ -3346,6 +3353,9 @@ void ath10k_core_destroy(struct ath10k *ar)
flush_workqueue(ar->workqueue_aux); flush_workqueue(ar->workqueue_aux);
destroy_workqueue(ar->workqueue_aux); destroy_workqueue(ar->workqueue_aux);
flush_workqueue(ar->workqueue_tx_complete);
destroy_workqueue(ar->workqueue_tx_complete);
ath10k_debug_destroy(ar); ath10k_debug_destroy(ar);
ath10k_coredump_destroy(ar); ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt); ath10k_htt_tx_destroy(&ar->htt);

View File

@ -149,6 +149,26 @@ static inline u32 host_interest_item_address(u32 item_offset)
return QCA988X_HOST_INTEREST_ADDRESS + item_offset; return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
} }
enum ath10k_phy_mode {
ATH10K_PHY_MODE_LEGACY = 0,
ATH10K_PHY_MODE_HT = 1,
ATH10K_PHY_MODE_VHT = 2,
};
/* Data rate 100KBPS based on IE Index */
struct ath10k_index_ht_data_rate_type {
u8 beacon_rate_index;
u16 supported_rate[4];
};
/* Data rate 100KBPS based on IE Index */
struct ath10k_index_vht_data_rate_type {
u8 beacon_rate_index;
u16 supported_VHT80_rate[2];
u16 supported_VHT40_rate[2];
u16 supported_VHT20_rate[2];
};
struct ath10k_bmi { struct ath10k_bmi {
bool done_sent; bool done_sent;
}; };
@ -500,8 +520,14 @@ struct ath10k_sta {
u16 peer_id; u16 peer_id;
struct rate_info txrate; struct rate_info txrate;
struct ieee80211_tx_info tx_info; struct ieee80211_tx_info tx_info;
u32 tx_retries;
u32 tx_failed;
u32 last_tx_bitrate; u32 last_tx_bitrate;
u32 rx_rate_code;
u32 rx_bitrate_kbps;
u32 tx_rate_code;
u32 tx_bitrate_kbps;
struct work_struct update_wk; struct work_struct update_wk;
u64 rx_duration; u64 rx_duration;
struct ath10k_htt_tx_stats *tx_stats; struct ath10k_htt_tx_stats *tx_stats;
@ -949,6 +975,11 @@ struct ath10k {
struct ieee80211_hw *hw; struct ieee80211_hw *hw;
struct ieee80211_ops *ops; struct ieee80211_ops *ops;
struct device *dev; struct device *dev;
struct msa_region {
dma_addr_t paddr;
u32 mem_size;
void *vaddr;
} msa;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
enum ath10k_hw_rev hw_rev; enum ath10k_hw_rev hw_rev;
@ -1087,11 +1118,12 @@ struct ath10k {
int last_wmi_vdev_start_status; int last_wmi_vdev_start_status;
struct completion vdev_setup_done; struct completion vdev_setup_done;
struct completion vdev_delete_done; struct completion vdev_delete_done;
struct completion peer_stats_info_complete;
struct workqueue_struct *workqueue; struct workqueue_struct *workqueue;
/* Auxiliary workqueue */ /* Auxiliary workqueue */
struct workqueue_struct *workqueue_aux; struct workqueue_struct *workqueue_aux;
struct workqueue_struct *workqueue_tx_complete;
/* prevents concurrent FW reconfiguration */ /* prevents concurrent FW reconfiguration */
struct mutex conf_mutex; struct mutex conf_mutex;
@ -1132,6 +1164,8 @@ struct ath10k {
struct work_struct register_work; struct work_struct register_work;
struct work_struct restart_work; struct work_struct restart_work;
struct work_struct bundle_tx_work;
struct work_struct tx_complete_work;
/* cycle count is reported twice for each visited channel during scan. /* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock * access protected by data_lock

View File

@ -349,7 +349,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
} }
static int ath10k_debug_fw_stats_request(struct ath10k *ar) int ath10k_debug_fw_stats_request(struct ath10k *ar)
{ {
unsigned long timeout, time_left; unsigned long timeout, time_left;
int ret; int ret;
@ -778,7 +778,7 @@ static ssize_t ath10k_mem_value_read(struct file *file,
ret = ath10k_hif_diag_read(ar, *ppos, buf, count); ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n", ath10k_warn(ar, "failed to read address 0x%08x via diagnose window from debugfs: %d\n",
(u32)(*ppos), ret); (u32)(*ppos), ret);
goto exit; goto exit;
} }

View File

@ -125,6 +125,9 @@ static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
{ {
return ar->debug.enable_extd_tx_stats; return ar->debug.enable_extd_tx_stats;
} }
int ath10k_debug_fw_stats_request(struct ath10k *ar);
#else #else
static inline int ath10k_debug_start(struct ath10k *ar) static inline int ath10k_debug_start(struct ath10k *ar)
@ -192,6 +195,11 @@ static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
return 0; return 0;
} }
static inline int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
return 0;
}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL #define ath10k_debug_get_et_strings NULL

View File

@ -54,7 +54,7 @@ struct ath10k_hif_ops {
*/ */
void (*stop)(struct ath10k *ar); void (*stop)(struct ath10k *ar);
int (*swap_mailbox)(struct ath10k *ar); int (*start_post)(struct ath10k *ar);
int (*get_htt_tx_complete)(struct ath10k *ar); int (*get_htt_tx_complete)(struct ath10k *ar);
@ -139,10 +139,10 @@ static inline void ath10k_hif_stop(struct ath10k *ar)
return ar->hif.ops->stop(ar); return ar->hif.ops->stop(ar);
} }
static inline int ath10k_hif_swap_mailbox(struct ath10k *ar) static inline int ath10k_hif_start_post(struct ath10k *ar)
{ {
if (ar->hif.ops->swap_mailbox) if (ar->hif.ops->start_post)
return ar->hif.ops->swap_mailbox(ar); return ar->hif.ops->start_post(ar);
return 0; return 0;
} }
@ -170,7 +170,8 @@ static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
static inline void ath10k_hif_send_complete_check(struct ath10k *ar, static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
u8 pipe_id, int force) u8 pipe_id, int force)
{ {
ar->hif.ops->send_complete_check(ar, pipe_id, force); if (ar->hif.ops->send_complete_check)
ar->hif.ops->send_complete_check(ar, pipe_id, force);
} }
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,

View File

@ -51,10 +51,12 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ath10k *ar = ep->htc->ar; struct ath10k *ar = ep->htc->ar;
struct ath10k_htc_hdr *hdr;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__, ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb); ep->eid, skb);
hdr = (struct ath10k_htc_hdr *)skb->data;
ath10k_htc_restore_tx_skb(ep->htc, skb); ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) { if (!ep->ep_ops.ep_tx_complete) {
@ -63,6 +65,11 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
return; return;
} }
if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
dev_kfree_skb_any(skb);
return;
}
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
} }
EXPORT_SYMBOL(ath10k_htc_notify_tx_completion); EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
@ -78,7 +85,7 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid; hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0; hdr->flags = 0;
if (ep->tx_credit_flow_enabled) if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock); spin_lock_bh(&ep->htc->tx_lock);
@ -86,6 +93,63 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock); spin_unlock_bh(&ep->htc->tx_lock);
} }
static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
unsigned int len,
bool consume)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
enum ath10k_htc_ep_id eid = ep->eid;
int credits, ret = 0;
if (!ep->tx_credit_flow_enabled)
return 0;
credits = DIV_ROUND_UP(len, ep->tx_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d consume %d\n",
eid, credits, ep->tx_credits, consume);
ret = -EAGAIN;
goto unlock;
}
if (consume) {
ep->tx_credits -= credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits total %d\n",
eid, credits, ep->tx_credits);
}
unlock:
spin_unlock_bh(&htc->tx_lock);
return ret;
}
static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
enum ath10k_htc_ep_id eid = ep->eid;
int credits;
if (!ep->tx_credit_flow_enabled)
return;
credits = DIV_ROUND_UP(len, ep->tx_credit_size);
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
int ath10k_htc_send(struct ath10k_htc *htc, int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid, enum ath10k_htc_ep_id eid,
struct sk_buff *skb) struct sk_buff *skb)
@ -95,8 +159,8 @@ int ath10k_htc_send(struct ath10k_htc *htc,
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item; struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev; struct device *dev = htc->ar->dev;
int credits = 0;
int ret; int ret;
unsigned int skb_len;
if (htc->ar->state == ATH10K_STATE_WEDGED) if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM; return -ECOMM;
@ -108,23 +172,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
skb_push(skb, sizeof(struct ath10k_htc_hdr)); skb_push(skb, sizeof(struct ath10k_htc_hdr));
if (ep->tx_credit_flow_enabled) { skb_len = skb->len;
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); ret = ath10k_htc_consume_credit(ep, skb_len, true);
spin_lock_bh(&htc->tx_lock); if (ret)
if (ep->tx_credits < credits) { goto err_pull;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
ath10k_htc_prepare_tx_skb(ep, skb); ath10k_htc_prepare_tx_skb(ep, skb);
@ -155,17 +206,7 @@ int ath10k_htc_send(struct ath10k_htc *htc,
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits: err_credits:
if (ep->tx_credit_flow_enabled) { ath10k_htc_release_credit(ep, skb_len);
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
err_pull: err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr)); skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret; return ret;
@ -581,6 +622,278 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
return allocation; return allocation;
} }
static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
struct sk_buff *bundle_skb,
struct sk_buff_head *tx_save_head)
{
struct ath10k_hif_sg_item sg_item;
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
struct sk_buff *skb;
int ret, cn = 0;
unsigned int skb_len;
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
skb_len = bundle_skb->len;
ret = ath10k_htc_consume_credit(ep, skb_len, true);
if (!ret) {
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = bundle_skb;
sg_item.vaddr = bundle_skb->data;
sg_item.len = bundle_skb->len;
ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
ath10k_htc_release_credit(ep, skb_len);
}
if (ret)
dev_kfree_skb_any(bundle_skb);
for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
if (ret) {
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
skb_queue_head(&ep->tx_req_head, skb);
} else {
skb_queue_tail(&ep->tx_complete_head, skb);
}
}
if (!ret)
queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"bundle tx status %d eid %d req count %d count %d len %d\n",
ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
return ret;
}
static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
int ret;
ret = ath10k_htc_send(htc, ep->eid, skb);
if (ret)
skb_queue_head(&ep->tx_req_head, skb);
ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
}
static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
{
struct ath10k_htc *htc = ep->htc;
struct sk_buff *bundle_skb, *skb;
struct sk_buff_head tx_save_head;
struct ath10k_htc_hdr *hdr;
u8 *bundle_buf;
int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
if (ep->tx_credit_flow_enabled &&
ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
return 0;
bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
bundle_skb = dev_alloc_skb(bundles_left);
if (!bundle_skb)
return -ENOMEM;
bundle_buf = bundle_skb->data;
skb_queue_head_init(&tx_save_head);
while (true) {
skb = skb_dequeue(&ep->tx_req_head);
if (!skb)
break;
credit_pad = 0;
trans_len = skb->len + sizeof(*hdr);
credit_remainder = trans_len % ep->tx_credit_size;
if (credit_remainder != 0) {
credit_pad = ep->tx_credit_size - credit_remainder;
trans_len += credit_pad;
}
ret = ath10k_htc_consume_credit(ep,
bundle_buf + trans_len - bundle_skb->data,
false);
if (ret) {
skb_queue_head(&ep->tx_req_head, skb);
break;
}
if (bundles_left < trans_len) {
bundle_skb->len = bundle_buf - bundle_skb->data;
ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
if (ret) {
skb_queue_head(&ep->tx_req_head, skb);
return ret;
}
if (skb_queue_len(&ep->tx_req_head) == 0) {
ath10k_htc_send_one_skb(ep, skb);
return ret;
}
if (ep->tx_credit_flow_enabled &&
ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
skb_queue_head(&ep->tx_req_head, skb);
return 0;
}
bundles_left =
ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
bundle_skb = dev_alloc_skb(bundles_left);
if (!bundle_skb) {
skb_queue_head(&ep->tx_req_head, skb);
return -ENOMEM;
}
bundle_buf = bundle_skb->data;
skb_queue_head_init(&tx_save_head);
}
skb_push(skb, sizeof(struct ath10k_htc_hdr));
ath10k_htc_prepare_tx_skb(ep, skb);
memcpy(bundle_buf, skb->data, skb->len);
hdr = (struct ath10k_htc_hdr *)bundle_buf;
hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
hdr->pad_len = __cpu_to_le16(credit_pad);
bundle_buf += trans_len;
bundles_left -= trans_len;
skb_queue_tail(&tx_save_head, skb);
}
if (bundle_buf != bundle_skb->data) {
bundle_skb->len = bundle_buf - bundle_skb->data;
ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
} else {
dev_kfree_skb_any(bundle_skb);
}
return ret;
}
static void ath10k_htc_bundle_tx_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
struct ath10k_htc_ep *ep;
struct sk_buff *skb;
int i;
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
if (!ep->bundle_tx)
continue;
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
ep->eid, skb_queue_len(&ep->tx_req_head));
if (skb_queue_len(&ep->tx_req_head) >=
ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
ath10k_htc_send_bundle_skbs(ep);
} else {
skb = skb_dequeue(&ep->tx_req_head);
if (!skb)
continue;
ath10k_htc_send_one_skb(ep, skb);
}
}
}
static void ath10k_htc_tx_complete_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
struct ath10k_htc_ep *ep;
enum ath10k_htc_ep_id eid;
struct sk_buff *skb;
int i;
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
eid = ep->eid;
if (ep->bundle_tx && eid == ar->htt.eid) {
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
ep->eid, skb_queue_len(&ep->tx_complete_head));
while (true) {
skb = skb_dequeue(&ep->tx_complete_head);
if (!skb)
break;
ath10k_htc_notify_tx_completion(ep, skb);
}
}
}
}
int ath10k_htc_send_hl(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k *ar = htc->ar;
if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
return -ENOMEM;
}
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
if (ep->bundle_tx) {
skb_queue_tail(&ep->tx_req_head, skb);
queue_work(ar->workqueue, &ar->bundle_tx_work);
return 0;
} else {
return ath10k_htc_send(htc, eid, skb);
}
}
void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
{
if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
!ep->bundle_tx) {
ep->bundle_tx = true;
skb_queue_head_init(&ep->tx_req_head);
skb_queue_head_init(&ep->tx_complete_head);
}
}
void ath10k_htc_stop_hl(struct ath10k *ar)
{
struct ath10k_htc_ep *ep;
int i;
cancel_work_sync(&ar->bundle_tx_work);
cancel_work_sync(&ar->tx_complete_work);
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
if (!ep->bundle_tx)
continue;
ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
ep->eid, skb_queue_len(&ep->tx_req_head));
skb_queue_purge(&ep->tx_req_head);
}
}
int ath10k_htc_wait_target(struct ath10k_htc *htc) int ath10k_htc_wait_target(struct ath10k_htc *htc)
{ {
struct ath10k *ar = htc->ar; struct ath10k *ar = htc->ar;
@ -649,14 +962,21 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
*/ */
if (htc->control_resp_len >= if (htc->control_resp_len >=
sizeof(msg->hdr) + sizeof(msg->ready_ext)) { sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
htc->alt_data_credit_size =
__le16_to_cpu(msg->ready_ext.reserved) &
ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
htc->max_msgs_per_htc_bundle = htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle, min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
HTC_HOST_MAX_MSG_PER_RX_BUNDLE); HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC, ath10k_dbg(ar, ATH10K_DBG_HTC,
"Extended ready message. RX bundle size: %d\n", "Extended ready message RX bundle size %d alt size %d\n",
htc->max_msgs_per_htc_bundle); htc->max_msgs_per_htc_bundle,
htc->alt_data_credit_size);
} }
INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
return 0; return 0;
} }
@ -801,6 +1121,11 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
ep->max_tx_queue_depth = conn_req->max_send_queue_depth; ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc; ep->tx_credits = tx_alloc;
ep->tx_credit_size = htc->target_credit_size;
if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
htc->alt_data_credit_size != 0)
ep->tx_credit_size = htc->alt_data_credit_size;
/* copy all the callbacks */ /* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops; ep->ep_ops = conn_req->ep_ops;

View File

@ -83,8 +83,14 @@ struct ath10k_htc_hdr {
u8 seq_no; /* for tx */ u8 seq_no; /* for tx */
u8 control_byte1; u8 control_byte1;
} __packed; } __packed;
u8 pad0; union {
u8 pad1; __le16 pad_len;
struct {
u8 pad0;
u8 pad1;
} __packed;
} __packed;
} __packed __aligned(4); } __packed __aligned(4);
enum ath10k_ath10k_htc_msg_id { enum ath10k_ath10k_htc_msg_id {
@ -113,6 +119,8 @@ enum ath10k_htc_conn_flags {
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8 #define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
}; };
#define ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK 0xFFF
enum ath10k_htc_conn_svc_status { enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0, ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1, ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
@ -121,6 +129,10 @@ enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4 ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
}; };
#define ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE 32
#define ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE 2
#define ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE 2
enum ath10k_htc_setup_complete_flags { enum ath10k_htc_setup_complete_flags {
ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1 ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1
}; };
@ -145,8 +157,14 @@ struct ath10k_htc_ready_extended {
struct ath10k_htc_ready base; struct ath10k_htc_ready base;
u8 htc_version; /* @enum ath10k_htc_version */ u8 htc_version; /* @enum ath10k_htc_version */
u8 max_msgs_per_htc_bundle; u8 max_msgs_per_htc_bundle;
u8 pad0; union {
u8 pad1; __le16 reserved;
struct {
u8 pad0;
u8 pad1;
} __packed;
} __packed;
} __packed; } __packed;
struct ath10k_htc_conn_svc { struct ath10k_htc_conn_svc {
@ -353,7 +371,12 @@ struct ath10k_htc_ep {
u8 seq_no; /* for debugging */ u8 seq_no; /* for debugging */
int tx_credits; int tx_credits;
int tx_credit_size;
bool tx_credit_flow_enabled; bool tx_credit_flow_enabled;
bool bundle_tx;
struct sk_buff_head tx_req_head;
struct sk_buff_head tx_complete_head;
}; };
struct ath10k_htc_svc_tx_credits { struct ath10k_htc_svc_tx_credits {
@ -378,10 +401,12 @@ struct ath10k_htc {
int total_transmit_credits; int total_transmit_credits;
int target_credit_size; int target_credit_size;
u8 max_msgs_per_htc_bundle; u8 max_msgs_per_htc_bundle;
int alt_data_credit_size;
}; };
int ath10k_htc_init(struct ath10k *ar); int ath10k_htc_init(struct ath10k *ar);
int ath10k_htc_wait_target(struct ath10k_htc *htc); int ath10k_htc_wait_target(struct ath10k_htc *htc);
void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep);
int ath10k_htc_start(struct ath10k_htc *htc); int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc, int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req, struct ath10k_htc_svc_conn_req *conn_req,
@ -391,6 +416,10 @@ void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
bool enable); bool enable);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet); struct sk_buff *packet);
void ath10k_htc_stop_hl(struct ath10k *ar);
int ath10k_htc_send_hl(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size); struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb); void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb); void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);

View File

@ -135,6 +135,8 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
{ {
struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp; struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k *ar = htt->ar;
struct ath10k_htc_ep *ep;
int status; int status;
memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_req, 0, sizeof(conn_req));
@ -142,6 +144,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete; conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler; conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
conn_req.ep_ops.ep_tx_credits = ath10k_htt_op_ep_tx_credits;
/* connect to control service */ /* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@ -154,6 +157,11 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
htt->eid = conn_resp.eid; htt->eid = conn_resp.eid;
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
ep = &ar->htc.endpoint[htt->eid];
ath10k_htc_setup_tx_req(ep);
}
htt->disable_tx_comp = ath10k_hif_get_htt_tx_complete(htt->ar); htt->disable_tx_comp = ath10k_hif_get_htt_tx_complete(htt->ar);
if (htt->disable_tx_comp) if (htt->disable_tx_comp)
ath10k_htc_change_tx_credit_flow(&htt->ar->htc, htt->eid, true); ath10k_htc_change_tx_credit_flow(&htt->ar->htc, htt->eid, true);

View File

@ -2032,6 +2032,9 @@ struct ath10k_htt {
const struct ath10k_htt_tx_ops *tx_ops; const struct ath10k_htt_tx_ops *tx_ops;
const struct ath10k_htt_rx_ops *rx_ops; const struct ath10k_htt_rx_ops *rx_ops;
bool disable_tx_comp; bool disable_tx_comp;
bool bundle_tx;
struct sk_buff_head tx_req_head;
struct sk_buff_head tx_complete_head;
}; };
struct ath10k_htt_tx_ops { struct ath10k_htt_tx_ops {
@ -2046,6 +2049,7 @@ struct ath10k_htt_tx_ops {
int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
u8 max_subfrms_ampdu, u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu); u8 max_subfrms_amsdu);
void (*htt_flush_tx)(struct ath10k_htt *htt);
}; };
static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
@ -2085,6 +2089,12 @@ static inline int ath10k_htt_tx(struct ath10k_htt *htt,
return htt->tx_ops->htt_tx(htt, txmode, msdu); return htt->tx_ops->htt_tx(htt, txmode, msdu);
} }
static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
{
if (htt->tx_ops->htt_flush_tx)
htt->tx_ops->htt_flush_tx(htt);
}
static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
{ {
if (!htt->tx_ops->htt_alloc_txbuff) if (!htt->tx_ops->htt_alloc_txbuff)
@ -2278,6 +2288,7 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
__le16 fetch_seq_num, __le16 fetch_seq_num,
struct htt_tx_fetch_record *records, struct htt_tx_fetch_record *records,
size_t num_records); size_t num_records);
void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
struct ieee80211_txq *txq); struct ieee80211_txq *txq);

View File

@ -3574,6 +3574,13 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
} }
if (ar->htt.disable_tx_comp) {
arsta->tx_retries += peer_stats->retry_pkts;
arsta->tx_failed += peer_stats->failed_pkts;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d tx failed %d\n",
arsta->tx_retries, arsta->tx_failed);
}
if (ath10k_debug_is_extd_tx_stats_enabled(ar)) if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx); rate_idx);
@ -3919,6 +3926,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt credit total %d\n", "htt credit total %d\n",
ep->tx_credits); ep->tx_credits);
ep->ep_ops.ep_tx_credits(htc->ar);
} }
break; break;
} }

View File

@ -529,9 +529,15 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
htt->tx_mem_allocated = false; htt->tx_mem_allocated = false;
} }
static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
{
ath10k_htc_stop_hl(htt->ar);
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
}
void ath10k_htt_tx_stop(struct ath10k_htt *htt) void ath10k_htt_tx_stop(struct ath10k_htt *htt)
{ {
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); ath10k_htt_flush_tx_queue(htt);
idr_destroy(&htt->pending_tx); idr_destroy(&htt->pending_tx);
} }
@ -541,6 +547,11 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
ath10k_htt_tx_destroy(htt); ath10k_htt_tx_destroy(htt);
} }
void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
{
queue_work(ar->workqueue, &ar->bundle_tx_work);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{ {
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
@ -1379,7 +1390,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
*/ */
tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID); tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu); res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
out: out:
return res; return res;
@ -1819,6 +1830,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
.htt_tx = ath10k_htt_tx_hl, .htt_tx = ath10k_htt_tx_hl,
.htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
.htt_flush_tx = ath10k_htt_flush_tx_queue,
}; };
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)

View File

@ -623,6 +623,9 @@ struct ath10k_hw_params {
/* tx stats support over pktlog */ /* tx stats support over pktlog */
bool tx_stats_over_pktlog; bool tx_stats_over_pktlog;
/* provides bitrates for sta_statistics using WMI_TLV_PEER_STATS_INFO_EVENTID */
bool supports_peer_stats_info;
}; };
struct htt_rx_desc; struct htt_rx_desc;

View File

@ -2959,6 +2959,11 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->aid = bss_conf->aid; arvif->aid = bss_conf->aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid); ether_addr_copy(arvif->bssid, bss_conf->bssid);
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->peer_stats_info_enable, 1);
if (ret)
ath10k_warn(ar, "failed to enable peer stats info: %d\n", ret);
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to set vdev %d up: %d\n", ath10k_warn(ar, "failed to set vdev %d up: %d\n",
@ -4529,17 +4534,18 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0; return 0;
} }
static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) static bool ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
{ {
/* It is not clear that allowing gaps in chainmask /* It is not clear that allowing gaps in chainmask
* is helpful. Probably it will not do what user * is helpful. Probably it will not do what user
* is hoping for, so warn in that case. * is hoping for, so warn in that case.
*/ */
if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
return; return true;
ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", ath10k_warn(ar, "mac %s antenna chainmask is invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
dbg, cm); dbg, cm);
return false;
} }
static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
@ -4722,11 +4728,15 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
{ {
int ret; int ret;
bool is_valid_tx_chain_mask, is_valid_rx_chain_mask;
lockdep_assert_held(&ar->conf_mutex); lockdep_assert_held(&ar->conf_mutex);
ath10k_check_chain_mask(ar, tx_ant, "tx"); is_valid_tx_chain_mask = ath10k_check_chain_mask(ar, tx_ant, "tx");
ath10k_check_chain_mask(ar, rx_ant, "rx"); is_valid_rx_chain_mask = ath10k_check_chain_mask(ar, rx_ant, "rx");
if (!is_valid_tx_chain_mask || !is_valid_rx_chain_mask)
return -EINVAL;
ar->cfg_tx_chainmask = tx_ant; ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant; ar->cfg_rx_chainmask = rx_ant;
@ -7224,6 +7234,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath10k_wmi_peer_flush(ar, arvif->vdev_id, ath10k_wmi_peer_flush(ar, arvif->vdev_id,
arvif->bssid, bitmap); arvif->bssid, bitmap);
} }
ath10k_htt_flush_tx(&ar->htt);
} }
return; return;
} }
@ -8294,6 +8305,215 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
peer->removed = true; peer->removed = true;
} }
/* HT MCS parameters with Nss = 1 */
static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss1[] = {
/* MCS L20 L40 S20 S40 */
{0, { 65, 135, 72, 150} },
{1, { 130, 270, 144, 300} },
{2, { 195, 405, 217, 450} },
{3, { 260, 540, 289, 600} },
{4, { 390, 810, 433, 900} },
{5, { 520, 1080, 578, 1200} },
{6, { 585, 1215, 650, 1350} },
{7, { 650, 1350, 722, 1500} }
};
/* HT MCS parameters with Nss = 2 */
static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss2[] = {
/* MCS L20 L40 S20 S40 */
{0, {130, 270, 144, 300} },
{1, {260, 540, 289, 600} },
{2, {390, 810, 433, 900} },
{3, {520, 1080, 578, 1200} },
{4, {780, 1620, 867, 1800} },
{5, {1040, 2160, 1156, 2400} },
{6, {1170, 2430, 1300, 2700} },
{7, {1300, 2700, 1444, 3000} }
};
/* MCS parameters with Nss = 1 */
static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = {
/* MCS L80 S80 L40 S40 L20 S20 */
{0, {293, 325}, {135, 150}, {65, 72} },
{1, {585, 650}, {270, 300}, {130, 144} },
{2, {878, 975}, {405, 450}, {195, 217} },
{3, {1170, 1300}, {540, 600}, {260, 289} },
{4, {1755, 1950}, {810, 900}, {390, 433} },
{5, {2340, 2600}, {1080, 1200}, {520, 578} },
{6, {2633, 2925}, {1215, 1350}, {585, 650} },
{7, {2925, 3250}, {1350, 1500}, {650, 722} },
{8, {3510, 3900}, {1620, 1800}, {780, 867} },
{9, {3900, 4333}, {1800, 2000}, {780, 867} }
};
/*MCS parameters with Nss = 2 */
static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = {
/* MCS L80 S80 L40 S40 L20 S20 */
{0, {585, 650}, {270, 300}, {130, 144} },
{1, {1170, 1300}, {540, 600}, {260, 289} },
{2, {1755, 1950}, {810, 900}, {390, 433} },
{3, {2340, 2600}, {1080, 1200}, {520, 578} },
{4, {3510, 3900}, {1620, 1800}, {780, 867} },
{5, {4680, 5200}, {2160, 2400}, {1040, 1156} },
{6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
{7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
{8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
{9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
};
static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
u8 *flags, u8 *bw)
{
struct ath10k_index_ht_data_rate_type *mcs_rate;
mcs_rate = (struct ath10k_index_ht_data_rate_type *)
((nss == 1) ? &supported_ht_mcs_rate_nss1 :
&supported_ht_mcs_rate_nss2);
if (rate == mcs_rate[mcs].supported_rate[0]) {
*bw = RATE_INFO_BW_20;
} else if (rate == mcs_rate[mcs].supported_rate[1]) {
*bw |= RATE_INFO_BW_40;
} else if (rate == mcs_rate[mcs].supported_rate[2]) {
*bw |= RATE_INFO_BW_20;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (rate == mcs_rate[mcs].supported_rate[3]) {
*bw |= RATE_INFO_BW_40;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
ath10k_warn(ar, "invalid ht params rate %d 100kbps nss %d mcs %d",
rate, nss, mcs);
}
}
static void ath10k_mac_get_rate_flags_vht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
u8 *flags, u8 *bw)
{
struct ath10k_index_vht_data_rate_type *mcs_rate;
mcs_rate = (struct ath10k_index_vht_data_rate_type *)
((nss == 1) ? &supported_vht_mcs_rate_nss1 :
&supported_vht_mcs_rate_nss2);
if (rate == mcs_rate[mcs].supported_VHT80_rate[0]) {
*bw = RATE_INFO_BW_80;
} else if (rate == mcs_rate[mcs].supported_VHT80_rate[1]) {
*bw = RATE_INFO_BW_80;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (rate == mcs_rate[mcs].supported_VHT40_rate[0]) {
*bw = RATE_INFO_BW_40;
} else if (rate == mcs_rate[mcs].supported_VHT40_rate[1]) {
*bw = RATE_INFO_BW_40;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (rate == mcs_rate[mcs].supported_VHT20_rate[0]) {
*bw = RATE_INFO_BW_20;
} else if (rate == mcs_rate[mcs].supported_VHT20_rate[1]) {
*bw = RATE_INFO_BW_20;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
ath10k_warn(ar, "invalid vht params rate %d 100kbps nss %d mcs %d",
rate, nss, mcs);
}
}
static void ath10k_mac_get_rate_flags(struct ath10k *ar, u32 rate,
enum ath10k_phy_mode mode, u8 nss, u8 mcs,
u8 *flags, u8 *bw)
{
if (mode == ATH10K_PHY_MODE_HT) {
*flags = RATE_INFO_FLAGS_MCS;
ath10k_mac_get_rate_flags_ht(ar, rate, nss, mcs, flags, bw);
} else if (mode == ATH10K_PHY_MODE_VHT) {
*flags = RATE_INFO_FLAGS_VHT_MCS;
ath10k_mac_get_rate_flags_vht(ar, rate, nss, mcs, flags, bw);
}
}
static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
u32 bitrate_kbps, struct rate_info *rate)
{
enum ath10k_phy_mode mode = ATH10K_PHY_MODE_LEGACY;
enum wmi_rate_preamble preamble = WMI_TLV_GET_HW_RC_PREAM_V1(rate_code);
u8 nss = WMI_TLV_GET_HW_RC_NSS_V1(rate_code) + 1;
u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
u8 flags = 0, bw = 0;
if (preamble == WMI_RATE_PREAMBLE_HT)
mode = ATH10K_PHY_MODE_HT;
else if (preamble == WMI_RATE_PREAMBLE_VHT)
mode = ATH10K_PHY_MODE_VHT;
ath10k_mac_get_rate_flags(ar, bitrate_kbps / 100, mode, nss, mcs, &flags, &bw);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac parse bitrate preamble %d mode %d nss %d mcs %d flags %x bw %d\n",
preamble, mode, nss, mcs, flags, bw);
rate->flags = flags;
rate->bw = bw;
rate->legacy = bitrate_kbps / 100;
rate->nss = nss;
rate->mcs = mcs;
}
static void ath10k_mac_sta_get_peer_stats_info(struct ath10k *ar,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k_peer *peer;
unsigned long time_left;
int ret;
if (!(ar->hw_params.supports_peer_stats_info &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA))
return;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, arsta->arvif->vdev_id, sta->addr);
spin_unlock_bh(&ar->data_lock);
if (!peer)
return;
reinit_completion(&ar->peer_stats_info_complete);
ret = ath10k_wmi_request_peer_stats_info(ar,
arsta->arvif->vdev_id,
WMI_REQUEST_ONE_PEER_STATS_INFO,
arsta->arvif->bssid,
0);
if (ret && ret != -EOPNOTSUPP) {
ath10k_warn(ar, "could not request peer stats info: %d\n", ret);
return;
}
time_left = wait_for_completion_timeout(&ar->peer_stats_info_complete, 3 * HZ);
if (time_left == 0) {
ath10k_warn(ar, "timed out waiting peer stats info\n");
return;
}
if (arsta->rx_rate_code != 0 && arsta->rx_bitrate_kbps != 0) {
ath10k_mac_parse_bitrate(ar, arsta->rx_rate_code,
arsta->rx_bitrate_kbps,
&sinfo->rxrate);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
arsta->rx_rate_code = 0;
arsta->rx_bitrate_kbps = 0;
}
if (arsta->tx_rate_code != 0 && arsta->tx_bitrate_kbps != 0) {
ath10k_mac_parse_bitrate(ar, arsta->tx_rate_code,
arsta->tx_bitrate_kbps,
&sinfo->txrate);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
arsta->tx_rate_code = 0;
arsta->tx_bitrate_kbps = 0;
}
}
static void ath10k_sta_statistics(struct ieee80211_hw *hw, static void ath10k_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
@ -8305,6 +8525,8 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
if (!ath10k_peer_stats_enabled(ar)) if (!ath10k_peer_stats_enabled(ar))
return; return;
ath10k_debug_fw_stats_request(ar);
sinfo->rx_duration = arsta->rx_duration; sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@ -8320,6 +8542,15 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
} }
sinfo->txrate.flags = arsta->txrate.flags; sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
if (ar->htt.disable_tx_comp) {
sinfo->tx_retries = arsta->tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
sinfo->tx_failed = arsta->tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
} }
static const struct ieee80211_ops ath10k_ops = { static const struct ieee80211_ops ath10k_ops = {
@ -8957,7 +9188,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
ar->hw->wiphy->max_sched_scan_reqs = 1;
ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;

View File

@ -116,7 +116,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static struct ce_attr host_ce_config_wlan[] = { static const struct ce_attr pci_host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */ /* CE0: host->target HTC control and raw streams */
{ {
.flags = CE_ATTR_FLAGS, .flags = CE_ATTR_FLAGS,
@ -222,7 +222,7 @@ static struct ce_attr host_ce_config_wlan[] = {
}; };
/* Target firmware's Copy Engine configuration. */ /* Target firmware's Copy Engine configuration. */
static struct ce_pipe_config target_ce_config_wlan[] = { static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */ /* CE0: host->target HTC control and raw streams */
{ {
.pipenum = __cpu_to_le32(0), .pipenum = __cpu_to_le32(0),
@ -335,7 +335,7 @@ static struct ce_pipe_config target_ce_config_wlan[] = {
* This table is derived from the CE_PCI TABLE, above. * This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware. * It is passed to the Target at startup for use by firmware.
*/ */
static struct service_to_pipe target_service_to_ce_map_wlan[] = { static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
{ {
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@ -1787,6 +1787,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force) int force)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
if (!force) { if (!force) {
@ -1804,7 +1806,7 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
* If at least 50% of the total resources are still available, * If at least 50% of the total resources are still available,
* don't bother checking again yet. * don't bother checking again yet.
*/ */
if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
return; return;
} }
ath10k_ce_per_engine_service(ar, pipe); ath10k_ce_per_engine_service(ar, pipe);
@ -1820,14 +1822,15 @@ static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe) u8 *ul_pipe, u8 *dl_pipe)
{ {
const struct service_to_pipe *entry; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
const struct ce_service_to_pipe *entry;
bool ul_set = false, dl_set = false; bool ul_set = false, dl_set = false;
int i; int i;
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
entry = &target_service_to_ce_map_wlan[i]; entry = &ar_pci->serv_to_pipe[i];
if (__le32_to_cpu(entry->service_id) != service_id) if (__le32_to_cpu(entry->service_id) != service_id)
continue; continue;
@ -2316,6 +2319,7 @@ static int ath10k_bus_get_num_banks(struct ath10k *ar)
int ath10k_pci_init_config(struct ath10k *ar) int ath10k_pci_init_config(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 interconnect_targ_addr; u32 interconnect_targ_addr;
u32 pcie_state_targ_addr = 0; u32 pcie_state_targ_addr = 0;
u32 pipe_cfg_targ_addr = 0; u32 pipe_cfg_targ_addr = 0;
@ -2361,7 +2365,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
} }
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
target_ce_config_wlan, ar_pci->pipe_config,
sizeof(struct ce_pipe_config) * sizeof(struct ce_pipe_config) *
NUM_TARGET_CE_CONFIG_WLAN); NUM_TARGET_CE_CONFIG_WLAN);
@ -2386,8 +2390,8 @@ int ath10k_pci_init_config(struct ath10k *ar)
} }
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
target_service_to_ce_map_wlan, ar_pci->serv_to_pipe,
sizeof(target_service_to_ce_map_wlan)); sizeof(pci_target_service_to_ce_map_wlan));
if (ret != 0) { if (ret != 0) {
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
return ret; return ret;
@ -2459,23 +2463,24 @@ static void ath10k_pci_override_ce_config(struct ath10k *ar)
{ {
struct ce_attr *attr; struct ce_attr *attr;
struct ce_pipe_config *config; struct ce_pipe_config *config;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
/* For QCA6174 we're overriding the Copy Engine 5 configuration, /* For QCA6174 we're overriding the Copy Engine 5 configuration,
* since it is currently used for other feature. * since it is currently used for other feature.
*/ */
/* Override Host's Copy Engine 5 configuration */ /* Override Host's Copy Engine 5 configuration */
attr = &host_ce_config_wlan[5]; attr = &ar_pci->attr[5];
attr->src_sz_max = 0; attr->src_sz_max = 0;
attr->dest_nentries = 0; attr->dest_nentries = 0;
/* Override Target firmware's Copy Engine configuration */ /* Override Target firmware's Copy Engine configuration */
config = &target_ce_config_wlan[5]; config = &ar_pci->pipe_config[5];
config->pipedir = __cpu_to_le32(PIPEDIR_OUT); config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
config->nbytes_max = __cpu_to_le32(2048); config->nbytes_max = __cpu_to_le32(2048);
/* Map from service/endpoint to Copy Engine */ /* Map from service/endpoint to Copy Engine */
target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
} }
int ath10k_pci_alloc_pipes(struct ath10k *ar) int ath10k_pci_alloc_pipes(struct ath10k *ar)
@ -2491,7 +2496,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
pipe->pipe_num = i; pipe->pipe_num = i;
pipe->hif_ce_state = ar; pipe->hif_ce_state = ar;
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
if (ret) { if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret); i, ret);
@ -2504,7 +2509,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
continue; continue;
} }
pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
} }
return 0; return 0;
@ -2520,10 +2525,11 @@ void ath10k_pci_free_pipes(struct ath10k *ar)
int ath10k_pci_init_pipes(struct ath10k *ar) int ath10k_pci_init_pipes(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i, ret; int i, ret;
for (i = 0; i < CE_COUNT; i++) { for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
if (ret) { if (ret) {
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
i, ret); i, ret);
@ -3595,6 +3601,30 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
sizeof(pci_host_ce_config_wlan),
GFP_KERNEL);
if (!ar_pci->attr) {
ret = -ENOMEM;
goto err_free;
}
ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
sizeof(pci_target_ce_config_wlan),
GFP_KERNEL);
if (!ar_pci->pipe_config) {
ret = -ENOMEM;
goto err_free;
}
ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
sizeof(pci_target_service_to_ce_map_wlan),
GFP_KERNEL);
if (!ar_pci->serv_to_pipe) {
ret = -ENOMEM;
goto err_free;
}
ret = ath10k_pci_setup_resource(ar); ret = ath10k_pci_setup_resource(ar);
if (ret) { if (ret) {
ath10k_err(ar, "failed to setup resource: %d\n", ret); ath10k_err(ar, "failed to setup resource: %d\n", ret);
@ -3690,6 +3720,11 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
err_core_destroy: err_core_destroy:
ath10k_core_destroy(ar); ath10k_core_destroy(ar);
err_free:
kfree(ar_pci->attr);
kfree(ar_pci->pipe_config);
kfree(ar_pci->serv_to_pipe);
return ret; return ret;
} }
@ -3715,6 +3750,9 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_sleep_sync(ar); ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar); ath10k_pci_release(ar);
ath10k_core_destroy(ar); ath10k_core_destroy(ar);
kfree(ar_pci->attr);
kfree(ar_pci->pipe_config);
kfree(ar_pci->serv_to_pipe);
} }
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);

View File

@ -183,6 +183,10 @@ struct ath10k_pci {
* this struct. * this struct.
*/ */
struct ath10k_ahb ahb[0]; struct ath10k_ahb ahb[0];
struct ce_attr *attr;
struct ce_pipe_config *pipe_config;
struct ce_service_to_pipe *serv_to_pipe;
}; };
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)

View File

@ -122,8 +122,8 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
int ret; int ret;
int i; int i;
req.msa_addr = qmi->msa_pa; req.msa_addr = ar->msa.paddr;
req.size = qmi->msa_mem_size; req.size = ar->msa.mem_size;
ret = qmi_txn_init(&qmi->qmi_hdl, &txn, ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
wlfw_msa_info_resp_msg_v01_ei, &resp); wlfw_msa_info_resp_msg_v01_ei, &resp);
@ -157,12 +157,12 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
goto out; goto out;
} }
max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size; max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
qmi->nr_mem_region = resp.mem_region_info_len; qmi->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) { for (i = 0; i < resp.mem_region_info_len; i++) {
if (resp.mem_region_info[i].size > qmi->msa_mem_size || if (resp.mem_region_info[i].size > ar->msa.mem_size ||
resp.mem_region_info[i].region_addr > max_mapped_addr || resp.mem_region_info[i].region_addr > max_mapped_addr ||
resp.mem_region_info[i].region_addr < qmi->msa_pa || resp.mem_region_info[i].region_addr < ar->msa.paddr ||
resp.mem_region_info[i].size + resp.mem_region_info[i].size +
resp.mem_region_info[i].region_addr > max_mapped_addr) { resp.mem_region_info[i].region_addr > max_mapped_addr) {
ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n", ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
@ -1006,54 +1006,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
spin_unlock(&qmi->event_lock); spin_unlock(&qmi->event_lock);
} }
static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
{
struct ath10k *ar = qmi->ar;
struct device *dev = ar->dev;
struct device_node *node;
struct resource r;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
of_node_put(node);
qmi->msa_pa = r.start;
qmi->msa_mem_size = resource_size(&r);
qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
MEMREMAP_WT);
if (IS_ERR(qmi->msa_va)) {
dev_err(dev, "failed to map memory region: %pa\n", &r.start);
return PTR_ERR(qmi->msa_va);
}
} else {
qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
&qmi->msa_pa, GFP_KERNEL);
if (!qmi->msa_va) {
ath10k_err(ar, "failed to allocate dma memory for msa region\n");
return -ENOMEM;
}
qmi->msa_mem_size = msa_size;
}
if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
qmi->msa_fixed_perm = true;
ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
&qmi->msa_pa,
qmi->msa_va);
return 0;
}
int ath10k_qmi_init(struct ath10k *ar, u32 msa_size) int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
{ {
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct device *dev = ar->dev;
struct ath10k_qmi *qmi; struct ath10k_qmi *qmi;
int ret; int ret;
@ -1064,9 +1020,8 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
qmi->ar = ar; qmi->ar = ar;
ar_snoc->qmi = qmi; ar_snoc->qmi = qmi;
ret = ath10k_qmi_setup_msa_resources(qmi, msa_size); if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
if (ret) qmi->msa_fixed_perm = true;
goto err;
ret = qmi_handle_init(&qmi->qmi_hdl, ret = qmi_handle_init(&qmi->qmi_hdl,
WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,

View File

@ -93,9 +93,6 @@ struct ath10k_qmi {
spinlock_t event_lock; /* spinlock for qmi event list */ spinlock_t event_lock; /* spinlock for qmi event list */
u32 nr_mem_region; u32 nr_mem_region;
struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS]; struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
dma_addr_t msa_pa;
u32 msa_mem_size;
void *msa_va;
struct ath10k_qmi_chip_info chip_info; struct ath10k_qmi_chip_info chip_info;
struct ath10k_qmi_board_info board_info; struct ath10k_qmi_board_info board_info;
struct ath10k_qmi_soc_info soc_info; struct ath10k_qmi_soc_info soc_info;

View File

@ -542,7 +542,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
int pkt_cnt = 0; int pkt_cnt = 0;
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) { if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n", ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS); n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
@ -1361,23 +1361,117 @@ static void ath10k_rx_indication_async_work(struct work_struct *work)
napi_schedule(&ar->napi); napi_schedule(&ar->napi);
} }
static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
{
struct ath10k *ar = ar_sdio->ar;
unsigned char rtc_state = 0;
int ret = 0;
rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
if (ret) {
ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
return ret;
}
*state = rtc_state & 0x3;
return ret;
}
static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
u32 val;
int retry = ATH10K_CIS_READ_RETRY, ret = 0;
unsigned char rtc_state = 0;
sdio_claim_host(ar_sdio->func);
ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
if (ret) {
ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
ret);
goto release;
}
if (enable_sleep) {
val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
} else {
val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
}
ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
if (ret) {
ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
ret);
}
if (!enable_sleep) {
do {
udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
if (ret) {
ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
break;
}
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
rtc_state);
if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
break;
udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
retry--;
} while (retry > 0);
}
release:
sdio_release_host(ar_sdio->func);
return ret;
}
static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
{
struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
}
static void ath10k_sdio_write_async_work(struct work_struct *work) static void ath10k_sdio_write_async_work(struct work_struct *work)
{ {
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio, struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
wr_async_work); wr_async_work);
struct ath10k *ar = ar_sdio->ar; struct ath10k *ar = ar_sdio->ar;
struct ath10k_sdio_bus_request *req, *tmp_req; struct ath10k_sdio_bus_request *req, *tmp_req;
struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
spin_lock_bh(&ar_sdio->wr_async_lock); spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list); list_del(&req->list);
spin_unlock_bh(&ar_sdio->wr_async_lock); spin_unlock_bh(&ar_sdio->wr_async_lock);
if (req->address >= mbox_info->htc_addr &&
ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
ath10k_sdio_set_mbox_sleep(ar, false);
mod_timer(&ar_sdio->sleep_timer, jiffies +
msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
}
__ath10k_sdio_write_async(ar, req); __ath10k_sdio_write_async(ar, req);
spin_lock_bh(&ar_sdio->wr_async_lock); spin_lock_bh(&ar_sdio->wr_async_lock);
} }
spin_unlock_bh(&ar_sdio->wr_async_lock); spin_unlock_bh(&ar_sdio->wr_async_lock);
if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
ath10k_sdio_set_mbox_sleep(ar, true);
} }
static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr, static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
@ -1444,7 +1538,7 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
/* sdio HIF functions */ /* sdio HIF functions */
static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar) static int ath10k_sdio_disable_intrs(struct ath10k *ar)
{ {
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
@ -1500,7 +1594,7 @@ static int ath10k_sdio_hif_power_up(struct ath10k *ar,
ar_sdio->is_disabled = false; ar_sdio->is_disabled = false;
ret = ath10k_sdio_hif_disable_intrs(ar); ret = ath10k_sdio_disable_intrs(ar);
if (ret) if (ret)
return ret; return ret;
@ -1517,6 +1611,9 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
del_timer_sync(&ar_sdio->sleep_timer);
ath10k_sdio_set_mbox_sleep(ar, true);
/* Disable the card */ /* Disable the card */
sdio_claim_host(ar_sdio->func); sdio_claim_host(ar_sdio->func);
@ -1569,7 +1666,7 @@ static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
return 0; return 0;
} }
static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar) static int ath10k_sdio_enable_intrs(struct ath10k *ar)
{ {
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
@ -1617,33 +1714,6 @@ static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
return ret; return ret;
} }
static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
{
u32 val;
int ret;
ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
if (ret) {
ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
ret);
return ret;
}
if (enable_sleep)
val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
else
val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
if (ret) {
ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
ret);
return ret;
}
return 0;
}
/* HIF diagnostics */ /* HIF diagnostics */
static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf, static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
@ -1679,8 +1749,8 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
return ret; return ret;
} }
static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address, static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
u32 *value) u32 *value)
{ {
__le32 *val; __le32 *val;
int ret; int ret;
@ -1725,7 +1795,7 @@ static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
return 0; return 0;
} }
static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar) static int ath10k_sdio_hif_start_post(struct ath10k *ar)
{ {
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
u32 addr, val; u32 addr, val;
@ -1733,7 +1803,7 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
addr = host_interest_item_address(HI_ITEM(hi_acs_flags)); addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
ret = ath10k_sdio_hif_diag_read32(ar, addr, &val); ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) { if (ret) {
ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret); ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
return ret; return ret;
@ -1749,6 +1819,8 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
ar_sdio->swap_mbox = false; ar_sdio->swap_mbox = false;
} }
ath10k_sdio_set_mbox_sleep(ar, true);
return 0; return 0;
} }
@ -1759,7 +1831,7 @@ static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
addr = host_interest_item_address(HI_ITEM(hi_acs_flags)); addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
ret = ath10k_sdio_hif_diag_read32(ar, addr, &val); ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) { if (ret) {
ath10k_warn(ar, ath10k_warn(ar,
"unable to read hi_acs_flags for htt tx comple : %d\n", ret); "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
@ -1788,7 +1860,7 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
* request before interrupts are disabled. * request before interrupts are disabled.
*/ */
msleep(20); msleep(20);
ret = ath10k_sdio_hif_disable_intrs(ar); ret = ath10k_sdio_disable_intrs(ar);
if (ret) if (ret)
return ret; return ret;
@ -1810,19 +1882,19 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
sdio_release_host(ar_sdio->func); sdio_release_host(ar_sdio->func);
ret = ath10k_sdio_hif_enable_intrs(ar); ret = ath10k_sdio_enable_intrs(ar);
if (ret) if (ret)
ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret); ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
/* Enable sleep and then disable it again */ /* Enable sleep and then disable it again */
ret = ath10k_sdio_hif_set_mbox_sleep(ar, true); ret = ath10k_sdio_set_mbox_sleep(ar, true);
if (ret) if (ret)
return ret; return ret;
/* Wait for 20ms for the written value to take effect */ /* Wait for 20ms for the written value to take effect */
msleep(20); msleep(20);
ret = ath10k_sdio_hif_set_mbox_sleep(ar, false); ret = ath10k_sdio_set_mbox_sleep(ar, false);
if (ret) if (ret)
return ret; return ret;
@ -2029,17 +2101,6 @@ static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
*dl_pipe = 0; *dl_pipe = 0;
} }
/* This op is currently only used by htc_wait_target if the HTC ready
* message times out. It is not applicable for SDIO since there is nothing
* we can do if the HTC ready message does not arrive in time.
* TODO: Make this op non mandatory by introducing a NULL check in the
* hif op wrapper.
*/
static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
u8 pipe, int force)
{
}
static const struct ath10k_hif_ops ath10k_sdio_hif_ops = { static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.tx_sg = ath10k_sdio_hif_tx_sg, .tx_sg = ath10k_sdio_hif_tx_sg,
.diag_read = ath10k_sdio_hif_diag_read, .diag_read = ath10k_sdio_hif_diag_read,
@ -2047,11 +2108,10 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg, .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
.start = ath10k_sdio_hif_start, .start = ath10k_sdio_hif_start,
.stop = ath10k_sdio_hif_stop, .stop = ath10k_sdio_hif_stop,
.swap_mailbox = ath10k_sdio_hif_swap_mailbox, .start_post = ath10k_sdio_hif_start_post,
.get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete, .get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
.map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe, .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
.get_default_pipe = ath10k_sdio_hif_get_default_pipe, .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
.send_complete_check = ath10k_sdio_hif_send_complete_check,
.power_up = ath10k_sdio_hif_power_up, .power_up = ath10k_sdio_hif_power_up,
.power_down = ath10k_sdio_hif_power_down, .power_down = ath10k_sdio_hif_power_down,
#ifdef CONFIG_PM #ifdef CONFIG_PM
@ -2076,6 +2136,8 @@ static int ath10k_sdio_pm_suspend(struct device *device)
if (!device_may_wakeup(ar->dev)) if (!device_may_wakeup(ar->dev))
return 0; return 0;
ath10k_sdio_set_mbox_sleep(ar, true);
pm_flag = MMC_PM_KEEP_POWER; pm_flag = MMC_PM_KEEP_POWER;
ret = sdio_set_host_pm_flags(func, pm_flag); ret = sdio_set_host_pm_flags(func, pm_flag);
@ -2239,6 +2301,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq; goto err_free_wq;
} }
timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
return 0; return 0;
err_free_wq: err_free_wq:

View File

@ -98,6 +98,20 @@
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
enum sdio_mbox_state {
SDIO_MBOX_UNKNOWN_STATE = 0,
SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1,
SDIO_MBOX_SLEEP_STATE = 2,
SDIO_MBOX_AWAKE_STATE = 3,
};
#define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US 125
#define ATH10K_CIS_RTC_STATE_ADDR 0x1138
#define ATH10K_CIS_RTC_STATE_ON 0x01
#define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US 1500
#define ATH10K_CIS_READ_RETRY 10
#define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS 50
/* TODO: remove this and use skb->cb instead, much cleaner approach */ /* TODO: remove this and use skb->cb instead, much cleaner approach */
struct ath10k_sdio_bus_request { struct ath10k_sdio_bus_request {
struct list_head list; struct list_head list;
@ -218,6 +232,8 @@ struct ath10k_sdio {
spinlock_t wr_async_lock; spinlock_t wr_async_lock;
struct work_struct async_work_rx; struct work_struct async_work_rx;
struct timer_list sleep_timer;
enum sdio_mbox_state mbox_state;
}; };
static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar) static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)

View File

@ -11,6 +11,8 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/property.h> #include <linux/property.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/of_address.h>
#include <linux/iommu.h>
#include "ce.h" #include "ce.h"
#include "coredump.h" #include "coredump.h"
@ -356,7 +358,7 @@ static struct ce_pipe_config target_ce_config_wlan[] = {
}, },
}; };
static struct service_to_pipe target_service_to_ce_map_wlan[] = { static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
{ {
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@ -769,7 +771,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe) u8 *ul_pipe, u8 *dl_pipe)
{ {
const struct service_to_pipe *entry; const struct ce_service_to_pipe *entry;
bool ul_set = false, dl_set = false; bool ul_set = false, dl_set = false;
int i; int i;
@ -1393,7 +1395,6 @@ static int ath10k_hw_power_off(struct ath10k *ar)
static void ath10k_msa_dump_memory(struct ath10k *ar, static void ath10k_msa_dump_memory(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data) struct ath10k_fw_crash_data *crash_data)
{ {
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
const struct ath10k_hw_mem_layout *mem_layout; const struct ath10k_hw_mem_layout *mem_layout;
const struct ath10k_mem_region *current_region; const struct ath10k_mem_region *current_region;
struct ath10k_dump_ram_data_hdr *hdr; struct ath10k_dump_ram_data_hdr *hdr;
@ -1419,15 +1420,15 @@ static void ath10k_msa_dump_memory(struct ath10k *ar,
buf_len -= sizeof(*hdr); buf_len -= sizeof(*hdr);
hdr->region_type = cpu_to_le32(current_region->type); hdr->region_type = cpu_to_le32(current_region->type);
hdr->start = cpu_to_le32((unsigned long)ar_snoc->qmi->msa_va); hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
hdr->length = cpu_to_le32(ar_snoc->qmi->msa_mem_size); hdr->length = cpu_to_le32(ar->msa.mem_size);
if (current_region->len < ar_snoc->qmi->msa_mem_size) { if (current_region->len < ar->msa.mem_size) {
memcpy(buf, ar_snoc->qmi->msa_va, current_region->len); memcpy(buf, ar->msa.vaddr, current_region->len);
ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n", ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
current_region->len, ar_snoc->qmi->msa_mem_size); current_region->len, ar->msa.mem_size);
} else { } else {
memcpy(buf, ar_snoc->qmi->msa_va, ar_snoc->qmi->msa_mem_size); memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
} }
} }
@ -1455,6 +1456,155 @@ void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
mutex_unlock(&ar->dump_mutex); mutex_unlock(&ar->dump_mutex);
} }
static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
{
struct device *dev = ar->dev;
struct device_node *node;
struct resource r;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
of_node_put(node);
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
ar->msa.mem_size,
MEMREMAP_WT);
if (IS_ERR(ar->msa.vaddr)) {
dev_err(dev, "failed to map memory region: %pa\n",
&r.start);
return PTR_ERR(ar->msa.vaddr);
}
} else {
ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
&ar->msa.paddr,
GFP_KERNEL);
if (!ar->msa.vaddr) {
ath10k_err(ar, "failed to allocate dma memory for msa region\n");
return -ENOMEM;
}
ar->msa.mem_size = msa_size;
}
ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
&ar->msa.paddr,
ar->msa.vaddr);
return 0;
}
static int ath10k_fw_init(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct device *host_dev = &ar_snoc->dev->dev;
struct platform_device_info info;
struct iommu_domain *iommu_dom;
struct platform_device *pdev;
struct device_node *node;
int ret;
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
if (!node) {
ar_snoc->use_tz = true;
return 0;
}
memset(&info, 0, sizeof(info));
info.fwnode = &node->fwnode;
info.parent = host_dev;
info.name = node->name;
info.dma_mask = DMA_BIT_MASK(32);
pdev = platform_device_register_full(&info);
if (IS_ERR(pdev)) {
of_node_put(node);
return PTR_ERR(pdev);
}
pdev->dev.of_node = node;
ret = of_dma_configure(&pdev->dev, node, true);
if (ret) {
ath10k_err(ar, "dma configure fail: %d\n", ret);
goto err_unregister;
}
ar_snoc->fw.dev = &pdev->dev;
iommu_dom = iommu_domain_alloc(&platform_bus_type);
if (!iommu_dom) {
ath10k_err(ar, "failed to allocate iommu domain\n");
ret = -ENOMEM;
goto err_unregister;
}
ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
if (ret) {
ath10k_err(ar, "could not attach device: %d\n", ret);
goto err_iommu_free;
}
ar_snoc->fw.iommu_domain = iommu_dom;
ar_snoc->fw.fw_start_addr = ar->msa.paddr;
ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
ar->msa.paddr, ar->msa.mem_size,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
ath10k_err(ar, "failed to map firmware region: %d\n", ret);
goto err_iommu_detach;
}
of_node_put(node);
return 0;
err_iommu_detach:
iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
err_iommu_free:
iommu_domain_free(iommu_dom);
err_unregister:
platform_device_unregister(pdev);
of_node_put(node);
return ret;
}
static int ath10k_fw_deinit(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
struct iommu_domain *iommu;
size_t unmapped_size;
if (ar_snoc->use_tz)
return 0;
iommu = ar_snoc->fw.iommu_domain;
unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
mapped_size);
if (unmapped_size != mapped_size)
ath10k_err(ar, "failed to unmap firmware: %zu\n",
unmapped_size);
iommu_detach_device(iommu, ar_snoc->fw.dev);
iommu_domain_free(iommu);
platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
return 0;
}
static const struct of_device_id ath10k_snoc_dt_match[] = { static const struct of_device_id ath10k_snoc_dt_match[] = {
{ .compatible = "qcom,wcn3990-wifi", { .compatible = "qcom,wcn3990-wifi",
.data = &drv_priv, .data = &drv_priv,
@ -1557,16 +1707,31 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq; goto err_free_irq;
} }
ret = ath10k_setup_msa_resources(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
goto err_power_off;
}
ret = ath10k_fw_init(ar);
if (ret) {
ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
goto err_power_off;
}
ret = ath10k_qmi_init(ar, msa_size); ret = ath10k_qmi_init(ar, msa_size);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret); ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
goto err_power_off; goto err_fw_deinit;
} }
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0; return 0;
err_fw_deinit:
ath10k_fw_deinit(ar);
err_power_off: err_power_off:
ath10k_hw_power_off(ar); ath10k_hw_power_off(ar);
@ -1598,6 +1763,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_core_unregister(ar); ath10k_core_unregister(ar);
ath10k_hw_power_off(ar); ath10k_hw_power_off(ar);
ath10k_fw_deinit(ar);
ath10k_snoc_free_irq(ar); ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar); ath10k_snoc_release_resource(ar);
ath10k_qmi_deinit(ar); ath10k_qmi_deinit(ar);

View File

@ -55,6 +55,13 @@ struct regulator_bulk_data;
struct ath10k_snoc { struct ath10k_snoc {
struct platform_device *dev; struct platform_device *dev;
struct ath10k *ar; struct ath10k *ar;
unsigned int use_tz;
struct ath10k_firmware {
struct device *dev;
dma_addr_t fw_start_addr;
struct iommu_domain *iommu_domain;
size_t mapped_mem_size;
} fw;
void __iomem *mem; void __iomem *mem;
dma_addr_t mem_pa; dma_addr_t mem_pa;
struct ath10k_snoc_target_info target_info; struct ath10k_snoc_target_info target_info;

View File

@ -693,17 +693,6 @@ static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id,
return 0; return 0;
} }
/* This op is currently only used by htc_wait_target if the HTC ready
* message times out. It is not applicable for USB since there is nothing
* we can do if the HTC ready message does not arrive in time.
* TODO: Make this op non mandatory by introducing a NULL check in the
* hif op wrapper.
*/
static void ath10k_usb_hif_send_complete_check(struct ath10k *ar,
u8 pipe, int force)
{
}
static int ath10k_usb_hif_power_up(struct ath10k *ar, static int ath10k_usb_hif_power_up(struct ath10k *ar,
enum ath10k_firmware_mode fw_mode) enum ath10k_firmware_mode fw_mode)
{ {
@ -737,7 +726,6 @@ static const struct ath10k_hif_ops ath10k_usb_hif_ops = {
.stop = ath10k_usb_hif_stop, .stop = ath10k_usb_hif_stop,
.map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe, .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe,
.get_default_pipe = ath10k_usb_hif_get_default_pipe, .get_default_pipe = ath10k_usb_hif_get_default_pipe,
.send_complete_check = ath10k_usb_hif_send_complete_check,
.get_free_queue_number = ath10k_usb_hif_get_free_queue_number, .get_free_queue_number = ath10k_usb_hif_get_free_queue_number,
.power_up = ath10k_usb_hif_power_up, .power_up = ath10k_usb_hif_power_up,
.power_down = ath10k_usb_hif_power_down, .power_down = ath10k_usb_hif_power_down,

View File

@ -126,6 +126,13 @@ struct wmi_ops {
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_wmm_params_all_arg *arg); const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
u32 vdev_id,
enum
wmi_peer_stats_info_request_type
type,
u8 *addr,
u32 reset);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type, enum wmi_force_fw_hang_type type,
u32 delay_ms); u32 delay_ms);
@ -1064,6 +1071,29 @@ ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
} }
static inline int
ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
u32 vdev_id,
enum wmi_peer_stats_info_request_type type,
u8 *addr,
u32 reset)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_request_peer_stats_info)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
vdev_id,
type,
addr,
reset);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
}
static inline int static inline int
ath10k_wmi_force_fw_hang(struct ath10k *ar, ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms) enum wmi_force_fw_hang_type type, u32 delay_ms)

View File

@ -219,6 +219,91 @@ static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
complete(&ar->vdev_delete_done); complete(&ar->vdev_delete_done);
} }
static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
const void *ptr, void *data)
{
const struct wmi_tlv_peer_stats_info *stat = ptr;
struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
return -EPROTO;
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
stat->peer_macaddr.addr,
__le32_to_cpu(stat->last_rx_rate_code),
__le32_to_cpu(stat->last_rx_bitrate_kbps));
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
__le32_to_cpu(stat->last_tx_rate_code),
__le32_to_cpu(stat->last_tx_bitrate_kbps));
sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
if (!sta) {
ath10k_warn(ar, "not found station for peer stats\n");
return -EINVAL;
}
arsta = (struct ath10k_sta *)sta->drv_priv;
arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
return 0;
}
static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_tlv_peer_stats_info_ev *ev;
const void *data;
u32 num_peer_stats;
int ret;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
if (!ev || !data) {
kfree(tb);
return -EPROTO;
}
num_peer_stats = __le32_to_cpu(ev->num_peers);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
__le32_to_cpu(ev->vdev_id),
num_peer_stats,
__le32_to_cpu(ev->more_data));
ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
ath10k_wmi_tlv_parse_peer_stats_info, NULL);
if (ret)
ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
kfree(tb);
return 0;
}
static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
complete(&ar->peer_stats_info_complete);
}
static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar, static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -576,6 +661,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_UPDATE_STATS_EVENTID: case WMI_TLV_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb); ath10k_wmi_event_update_stats(ar, skb);
break; break;
case WMI_TLV_PEER_STATS_INFO_EVENTID:
ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
break;
case WMI_TLV_VDEV_START_RESP_EVENTID: case WMI_TLV_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb); ath10k_wmi_event_vdev_start_resp(ar, skb);
break; break;
@ -2897,6 +2985,36 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
return skb; return skb;
} }
static struct sk_buff *
ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
u32 vdev_id,
enum wmi_peer_stats_info_request_type type,
u8 *addr,
u32 reset)
{
struct wmi_tlv_request_peer_stats_info *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
tlv = (void *)skb->data;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->request_type = __cpu_to_le32(type);
if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
ether_addr_copy(cmd->peer_macaddr.addr, addr);
cmd->reset_after_request = reset;
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
return skb;
}
static int static int
ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
dma_addr_t paddr) dma_addr_t paddr)
@ -4113,6 +4231,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID, .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID, .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID, .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
.request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID, .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
.network_list_offload_config_cmdid = .network_list_offload_config_cmdid =
WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
@ -4269,6 +4388,7 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG, .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
.rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE, .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
.peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
}; };
static struct wmi_peer_param_map wmi_tlv_peer_param_map = { static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
@ -4416,6 +4536,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma, .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm, .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
.gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
/* .gen_mgmt_tx = not implemented; HTT is used */ /* .gen_mgmt_tx = not implemented; HTT is used */
.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,

View File

@ -198,6 +198,12 @@ enum wmi_tlv_cmd_id {
WMI_TLV_REQUEST_LINK_STATS_CMDID, WMI_TLV_REQUEST_LINK_STATS_CMDID,
WMI_TLV_START_LINK_STATS_CMDID, WMI_TLV_START_LINK_STATS_CMDID,
WMI_TLV_CLEAR_LINK_STATS_CMDID, WMI_TLV_CLEAR_LINK_STATS_CMDID,
WMI_TLV_CGET_FW_MEM_DUMP_CMDID,
WMI_TLV_CDEBUG_MESG_FLUSH_CMDID,
WMI_TLV_CDIAG_EVENT_LOG_CONFIG_CMDID,
WMI_TLV_CREQUEST_WLAN_STATS_CMDID,
WMI_TLV_CREQUEST_RCPI_CMDID,
WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL), WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID, WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID, WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@ -338,6 +344,13 @@ enum wmi_tlv_event_id {
WMI_TLV_IFACE_LINK_STATS_EVENTID, WMI_TLV_IFACE_LINK_STATS_EVENTID,
WMI_TLV_PEER_LINK_STATS_EVENTID, WMI_TLV_PEER_LINK_STATS_EVENTID,
WMI_TLV_RADIO_LINK_STATS_EVENTID, WMI_TLV_RADIO_LINK_STATS_EVENTID,
WMI_TLV_UPDATE_FW_MEM_DUMP_EVENTID,
WMI_TLV_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
WMI_TLV_INST_RSSI_STATS_EVENTID,
WMI_TLV_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
WMI_TLV_REPORT_STATS_EVENTID,
WMI_TLV_UPDATE_RCPI_EVENTID,
WMI_TLV_PEER_STATS_INFO_EVENTID,
WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL), WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
WMI_TLV_NLO_SCAN_COMPLETE_EVENTID, WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
WMI_TLV_APFIND_EVENTID, WMI_TLV_APFIND_EVENTID,
@ -451,6 +464,7 @@ enum wmi_tlv_pdev_param {
WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD, WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE, WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR, WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE = 0x8b,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX, WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
}; };
@ -2081,6 +2095,94 @@ struct wmi_tlv_stats_ev {
__le32 num_peer_stats_extd; __le32 num_peer_stats_extd;
} __packed; } __packed;
struct wmi_tlv_peer_stats_info_ev {
__le32 vdev_id;
__le32 num_peers;
__le32 more_data;
} __packed;
#define WMI_TLV_MAX_CHAINS 8
struct wmi_tlv_peer_stats_info {
struct wmi_mac_addr peer_macaddr;
struct {
/* lower 32 bits of the tx_bytes value */
__le32 low_32;
/* upper 32 bits of the tx_bytes value */
__le32 high_32;
} __packed tx_bytes;
struct {
/* lower 32 bits of the tx_packets value */
__le32 low_32;
/* upper 32 bits of the tx_packets value */
__le32 high_32;
} __packed tx_packets;
struct {
/* lower 32 bits of the rx_bytes value */
__le32 low_32;
/* upper 32 bits of the rx_bytes value */
__le32 high_32;
} __packed rx_bytes;
struct {
/* lower 32 bits of the rx_packets value */
__le32 low_32;
/* upper 32 bits of the rx_packets value */
__le32 high_32;
} __packed rx_packets;
__le32 tx_retries;
__le32 tx_failed;
/* rate information, it is output of WMI_ASSEMBLE_RATECODE_V1
* (in format of 0x1000RRRR)
* The rate-code is a 4-bytes field in which,
* for given rate, nss and preamble
*
* b'31-b'29 unused / reserved
* b'28 indicate the version of rate-code (1 = RATECODE_V1)
* b'27-b'11 unused / reserved
* b'10-b'8 indicate the preamble (0 OFDM, 1 CCK, 2 HT, 3 VHT)
* b'7-b'5 indicate the NSS (0 - 1x1, 1 - 2x2, 2 - 3x3, 3 - 4x4)
* b'4-b'0 indicate the rate, which is indicated as follows:
* OFDM : 0: OFDM 48 Mbps
* 1: OFDM 24 Mbps
* 2: OFDM 12 Mbps
* 3: OFDM 6 Mbps
* 4: OFDM 54 Mbps
* 5: OFDM 36 Mbps
* 6: OFDM 18 Mbps
* 7: OFDM 9 Mbps
* CCK (pream == 1)
* 0: CCK 11 Mbps Long
* 1: CCK 5.5 Mbps Long
* 2: CCK 2 Mbps Long
* 3: CCK 1 Mbps Long
* 4: CCK 11 Mbps Short
* 5: CCK 5.5 Mbps Short
* 6: CCK 2 Mbps Short
* HT/VHT (pream == 2/3)
* 0..7: MCS0..MCS7 (HT)
* 0..9: MCS0..MCS9 (11AC VHT)
* 0..11: MCS0..MCS11 (11AX VHT)
* rate-code of the last transmission
*/
__le32 last_tx_rate_code;
__le32 last_rx_rate_code;
__le32 last_tx_bitrate_kbps;
__le32 last_rx_bitrate_kbps;
__le32 peer_rssi;
__le32 tx_succeed;
__le32 peer_rssi_per_chain[WMI_TLV_MAX_CHAINS];
} __packed;
#define HW_RATECODE_PREAM_V1_MASK GENMASK(10, 8)
#define WMI_TLV_GET_HW_RC_PREAM_V1(rc) FIELD_GET(HW_RATECODE_PREAM_V1_MASK, rc)
#define HW_RATECODE_NSS_V1_MASK GENMASK(7, 5)
#define WMI_TLV_GET_HW_RC_NSS_V1(rc) FIELD_GET(HW_RATECODE_NSS_V1_MASK, rc)
#define HW_RATECODE_RATE_V1_MASK GENMASK(4, 0)
#define WMI_TLV_GET_HW_RC_RATE_V1(rc) FIELD_GET(HW_RATECODE_RATE_V1_MASK, rc)
struct wmi_tlv_p2p_noa_ev { struct wmi_tlv_p2p_noa_ev {
__le32 vdev_id; __le32 vdev_id;
} __packed; } __packed;
@ -2097,6 +2199,14 @@ struct wmi_tlv_wow_add_del_event_cmd {
__le32 event_bitmap; __le32 event_bitmap;
} __packed; } __packed;
struct wmi_tlv_request_peer_stats_info {
__le32 request_type;
__le32 vdev_id;
/* peer MAC address */
struct wmi_mac_addr peer_macaddr;
__le32 reset_after_request;
} __packed;
/* Command to set/unset chip in quiet mode */ /* Command to set/unset chip in quiet mode */
struct wmi_tlv_set_quiet_cmd { struct wmi_tlv_set_quiet_cmd {
__le32 vdev_id; __le32 vdev_id;

View File

@ -8336,7 +8336,7 @@ ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to stack", pdev->loc_mpdus); "MPDUs delivered to stack", pdev->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Oversized AMSUs", pdev->oversize_amsdu); "Oversized AMSDUs", pdev->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors", pdev->phy_errs); "PHY errors", pdev->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",

View File

@ -940,6 +940,7 @@ struct wmi_cmd_map {
u32 vdev_spectral_scan_configure_cmdid; u32 vdev_spectral_scan_configure_cmdid;
u32 vdev_spectral_scan_enable_cmdid; u32 vdev_spectral_scan_enable_cmdid;
u32 request_stats_cmdid; u32 request_stats_cmdid;
u32 request_peer_stats_info_cmdid;
u32 set_arp_ns_offload_cmdid; u32 set_arp_ns_offload_cmdid;
u32 network_list_offload_config_cmdid; u32 network_list_offload_config_cmdid;
u32 gtk_offload_cmdid; u32 gtk_offload_cmdid;
@ -3798,6 +3799,7 @@ struct wmi_pdev_param_map {
u32 enable_btcoex; u32 enable_btcoex;
u32 rfkill_config; u32 rfkill_config;
u32 rfkill_enable; u32 rfkill_enable;
u32 peer_stats_info_enable;
}; };
#define WMI_PDEV_PARAM_UNSUPPORTED 0 #define WMI_PDEV_PARAM_UNSUPPORTED 0
@ -4578,6 +4580,13 @@ struct wmi_request_stats_cmd {
struct wlan_inst_rssi_args inst_rssi_args; struct wlan_inst_rssi_args inst_rssi_args;
} __packed; } __packed;
enum wmi_peer_stats_info_request_type {
/* request stats of one specified peer */
WMI_REQUEST_ONE_PEER_STATS_INFO = 0x01,
/* request stats of all peers belong to specified VDEV */
WMI_REQUEST_VDEV_ALL_PEER_STATS_INFO = 0x02,
};
/* Suspend option */ /* Suspend option */
enum { enum {
/* suspend */ /* suspend */

View File

@ -60,9 +60,14 @@ static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
WME_AC_VO); WME_AC_VO);
} }
enum ath11k_skb_flags {
ATH11K_SKB_HW_80211_ENCAP = BIT(0),
};
struct ath11k_skb_cb { struct ath11k_skb_cb {
dma_addr_t paddr; dma_addr_t paddr;
u8 eid; u8 eid;
u8 flags;
struct ath11k *ar; struct ath11k *ar;
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
} __packed; } __packed;
@ -392,6 +397,7 @@ struct ath11k_debug {
u32 pktlog_mode; u32 pktlog_mode;
u32 pktlog_peer_valid; u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN]; u8 pktlog_peer_addr[ETH_ALEN];
u32 rx_filter;
}; };
struct ath11k_per_peer_tx_stats { struct ath11k_per_peer_tx_stats {
@ -656,6 +662,9 @@ struct ath11k_base {
u32 fw_crash_counter; u32 fw_crash_counter;
} stats; } stats;
u32 pktlog_defs_checksum; u32 pktlog_defs_checksum;
/* Round robbin based TCL ring selector */
atomic_t tcl_ring_selector;
}; };
struct ath11k_fw_stats_pdev { struct ath11k_fw_stats_pdev {

View File

@ -195,7 +195,7 @@ void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
total_vdevs_started += ar->num_started_vdevs; total_vdevs_started += ar->num_started_vdevs;
} }
is_end = ((++num_vdev) == total_vdevs_started ? true : false); is_end = ((++num_vdev) == total_vdevs_started);
list_splice_tail_init(&stats.vdevs, list_splice_tail_init(&stats.vdevs,
&ar->debug.fw_stats.vdevs); &ar->debug.fw_stats.vdevs);
@ -215,7 +215,7 @@ void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
/* Mark end until we reached the count of all started VDEVs /* Mark end until we reached the count of all started VDEVs
* within the PDEV * within the PDEV
*/ */
is_end = ((++num_bcn) == ar->num_started_vdevs ? true : false); is_end = ((++num_bcn) == ar->num_started_vdevs);
list_splice_tail_init(&stats.bcn, list_splice_tail_init(&stats.bcn,
&ar->debug.fw_stats.bcn); &ar->debug.fw_stats.bcn);
@ -698,6 +698,8 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
tlv_filter = ath11k_mac_mon_status_filter_default; tlv_filter = ath11k_mac_mon_status_filter_default;
} }
ar->debug.rx_filter = tlv_filter.rx_filter;
ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id; ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS, HAL_RXDMA_MONITOR_STATUS,
@ -803,6 +805,9 @@ static const struct file_operations fops_soc_rx_stats = {
int ath11k_debug_pdev_create(struct ath11k_base *ab) int ath11k_debug_pdev_create(struct ath11k_base *ab)
{ {
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k); ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
if (IS_ERR_OR_NULL(ab->debugfs_soc)) { if (IS_ERR_OR_NULL(ab->debugfs_soc)) {

View File

@ -67,7 +67,7 @@ struct debug_htt_stats_req {
u8 peer_addr[ETH_ALEN]; u8 peer_addr[ETH_ALEN];
struct completion cmpln; struct completion cmpln;
u32 buf_len; u32 buf_len;
u8 buf[0]; u8 buf[];
}; };
struct ath_pktlog_hdr { struct ath_pktlog_hdr {
@ -77,9 +77,11 @@ struct ath_pktlog_hdr {
u16 size; u16 size;
u32 timestamp; u32 timestamp;
u32 type_specific_data; u32 type_specific_data;
u8 payload[0]; u8 payload[];
}; };
#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512) #define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024) #define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
@ -188,6 +190,11 @@ static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
return ar->debug.extd_rx_stats; return ar->debug.extd_rx_stats;
} }
static inline int ath11k_debug_rx_filter(struct ath11k *ar)
{
return ar->debug.rx_filter;
}
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir); struct ieee80211_sta *sta, struct dentry *dir);
void void
@ -269,6 +276,11 @@ static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr
return false; return false;
} }
static inline int ath11k_debug_rx_filter(struct ath11k *ar)
{
return 0;
}
static inline void static inline void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta, ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats, struct ath11k_per_peer_tx_stats *peer_stats,

View File

@ -239,7 +239,7 @@ struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
*/ */
struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v { struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
u32 hist_bin_size; u32 hist_bin_size;
u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
}; };
/* == SOC ERROR STATS == */ /* == SOC ERROR STATS == */
@ -550,7 +550,7 @@ struct htt_tx_hwq_stats_cmn_tlv {
struct htt_tx_hwq_difs_latency_stats_tlv_v { struct htt_tx_hwq_difs_latency_stats_tlv_v {
u32 hist_intvl; u32 hist_intvl;
/* histogram of ppdu post to hwsch - > cmd status received */ /* histogram of ppdu post to hwsch - > cmd status received */
u32 difs_latency_hist[0]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */ u32 difs_latency_hist[]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
}; };
/* NOTE: Variable length TLV, use length spec to infer array size */ /* NOTE: Variable length TLV, use length spec to infer array size */
@ -586,7 +586,7 @@ struct htt_tx_hwq_fes_result_stats_tlv_v {
struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v { struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
u32 hist_bin_size; u32 hist_bin_size;
/* Histogram of number of mpdus on tried mpdu */ /* Histogram of number of mpdus on tried mpdu */
u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
}; };
/* NOTE: Variable length TLV, use length spec to infer array size /* NOTE: Variable length TLV, use length spec to infer array size
@ -1584,7 +1584,7 @@ struct htt_pdev_stats_twt_session_tlv {
struct htt_pdev_stats_twt_sessions_tlv { struct htt_pdev_stats_twt_sessions_tlv {
u32 pdev_id; u32 pdev_id;
u32 num_sessions; u32 num_sessions;
struct htt_pdev_stats_twt_session_tlv twt_session[0]; struct htt_pdev_stats_twt_session_tlv twt_session[];
}; };
enum htt_rx_reo_resource_sample_id_enum { enum htt_rx_reo_resource_sample_id_enum {

View File

@ -8,6 +8,8 @@
#include "core.h" #include "core.h"
#include "peer.h" #include "peer.h"
#include "debug.h" #include "debug.h"
#include "dp_tx.h"
#include "debug_htt_stats.h"
void void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta, ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
@ -435,13 +437,22 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
return 0; return 0;
out: out:
vfree(stats_req); vfree(stats_req);
ar->debug.htt_stats.stats_req = NULL;
return ret; return ret;
} }
static int static int
ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file) ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
{ {
struct ieee80211_sta *sta = inode->i_private;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
mutex_lock(&ar->conf_mutex);
vfree(file->private_data); vfree(file->private_data);
ar->debug.htt_stats.stats_req = NULL;
mutex_unlock(&ar->conf_mutex);
return 0; return 0;
} }
@ -749,6 +760,66 @@ static const struct file_operations fops_aggr_mode = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static ssize_t
ath11k_write_htt_peer_stats_reset(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct htt_ext_stats_cfg_params cfg_params = { 0 };
int ret;
u8 type;
ret = kstrtou8_from_user(user_buf, count, 0, &type);
if (ret)
return ret;
if (!type)
return ret;
mutex_lock(&ar->conf_mutex);
cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1),
HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), sta->addr[0]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), sta->addr[1]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), sta->addr[2]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), sta->addr[3]);
cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), sta->addr[4]);
cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), sta->addr[5]);
cfg_params.cfg3 |= ATH11K_HTT_PEER_STATS_RESET;
ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
ATH11K_DBG_HTT_EXT_STATS_PEER_INFO,
&cfg_params,
0ULL);
if (ret) {
ath11k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret);
mutex_unlock(&ar->conf_mutex);
return ret;
}
mutex_unlock(&ar->conf_mutex);
ret = count;
return ret;
}
static const struct file_operations fops_htt_peer_stats_reset = {
.write = ath11k_write_htt_peer_stats_reset,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir) struct ieee80211_sta *sta, struct dentry *dir)
{ {
@ -771,4 +842,9 @@ void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("addba", 0200, dir, sta, &fops_addba); debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp); debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", 0200, dir, sta, &fops_delba); debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET,
ar->ab->wmi_ab.svc_map))
debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
&fops_htt_peer_stats_reset);
} }

View File

@ -880,6 +880,8 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
spin_lock_init(&dp->reo_cmd_lock); spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) { if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
@ -909,8 +911,10 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
dp->tx_ring[i].tx_status_head = 0; dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
if (!dp->tx_ring[i].tx_status) if (!dp->tx_ring[i].tx_status) {
ret = -ENOMEM;
goto fail_cmn_srng_cleanup; goto fail_cmn_srng_cleanup;
}
} }
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)

View File

@ -36,6 +36,7 @@ struct dp_rx_tid {
struct ath11k_base *ab; struct ath11k_base *ab;
}; };
#define DP_REO_DESC_FREE_THRESHOLD 64
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000 #define DP_REO_DESC_FREE_TIMEOUT_MS 1000
struct dp_reo_cache_flush_elem { struct dp_reo_cache_flush_elem {
@ -222,7 +223,13 @@ struct ath11k_dp {
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX]; struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list; struct list_head reo_cmd_list;
struct list_head reo_cmd_cache_flush_list; struct list_head reo_cmd_cache_flush_list;
/* protects access to reo_cmd_list and reo_cmd_cache_flush_list */ u32 reo_cmd_cache_flush_count;
/**
* protects access to below fields,
* - reo_cmd_list
* - reo_cmd_cache_flush_list
* - reo_cmd_cache_flush_count
*/
spinlock_t reo_cmd_lock; spinlock_t reo_cmd_lock;
}; };

View File

@ -252,7 +252,7 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
__le32_to_cpu(rx_desc->mpdu_start_tag)); __le32_to_cpu(rx_desc->mpdu_start_tag));
return tlv_tag == HAL_RX_MPDU_START ? true : false; return tlv_tag == HAL_RX_MPDU_START;
} }
static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
@ -565,6 +565,7 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd_cache, tmp_cache, list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) { &dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list); list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
dma_unmap_single(ab->dev, cmd_cache->data.paddr, dma_unmap_single(ab->dev, cmd_cache->data.paddr,
cmd_cache->data.size, DMA_BIDIRECTIONAL); cmd_cache->data.size, DMA_BIDIRECTIONAL);
kfree(cmd_cache->data.vaddr); kfree(cmd_cache->data.vaddr);
@ -651,15 +652,18 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
spin_lock_bh(&dp->reo_cmd_lock); spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
dp->reo_cmd_cache_flush_count++;
spin_unlock_bh(&dp->reo_cmd_lock); spin_unlock_bh(&dp->reo_cmd_lock);
/* Flush and invalidate aged REO desc from HW cache */ /* Flush and invalidate aged REO desc from HW cache */
spin_lock_bh(&dp->reo_cmd_lock); spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
list) { list) {
if (time_after(jiffies, elem->ts + if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
time_after(jiffies, elem->ts +
msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
list_del(&elem->list); list_del(&elem->list);
dp->reo_cmd_cache_flush_count--;
spin_unlock_bh(&dp->reo_cmd_lock); spin_unlock_bh(&dp->reo_cmd_lock);
ath11k_dp_reo_cache_flush(ab, &elem->data); ath11k_dp_reo_cache_flush(ab, &elem->data);
@ -892,7 +896,7 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL); vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr) { if (!vaddr) {
spin_unlock_bh(&ab->base_lock); spin_unlock_bh(&ab->base_lock);
return -ENOMEM; return -ENOMEM;
@ -2266,6 +2270,7 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
struct sk_buff *last_buf; struct sk_buff *last_buf;
u8 l3_pad_bytes; u8 l3_pad_bytes;
u8 *hdr_status;
u16 msdu_len; u16 msdu_len;
int ret; int ret;
@ -2294,8 +2299,13 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
skb_pull(msdu, HAL_RX_DESC_SIZE); skb_pull(msdu, HAL_RX_DESC_SIZE);
} else if (!rxcb->is_continuation) { } else if (!rxcb->is_continuation) {
if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
ret = -EINVAL; ret = -EINVAL;
ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len); ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
sizeof(struct ieee80211_hdr));
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
sizeof(struct hal_rx_desc));
goto free_out; goto free_out;
} }
skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
@ -2961,8 +2971,8 @@ static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer
return 0; return 0;
mic_fail: mic_fail:
(ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1; (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
(ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1; (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
@ -3390,6 +3400,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
struct sk_buff *msdu; struct sk_buff *msdu;
struct ath11k_skb_rxcb *rxcb; struct ath11k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc; struct hal_rx_desc *rx_desc;
u8 *hdr_status;
u16 msdu_len; u16 msdu_len;
spin_lock_bh(&rx_ring->idr_lock); spin_lock_bh(&rx_ring->idr_lock);
@ -3427,6 +3438,17 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
rx_desc = (struct hal_rx_desc *)msdu->data; rx_desc = (struct hal_rx_desc *)msdu->data;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
sizeof(struct ieee80211_hdr));
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
sizeof(struct hal_rx_desc));
dev_kfree_skb_any(msdu);
goto exit;
}
skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {

View File

@ -9,14 +9,14 @@
#include "hw.h" #include "hw.h"
#include "peer.h" #include "peer.h"
/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
static const u8
ath11k_txq_tcl_ring_map[ATH11K_HW_MAX_QUEUES] = { 0x0, 0x1, 0x2, 0x2 };
static enum hal_tcl_encap_type static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb) ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
{ {
/* TODO: Determine encap type based on vif_type and configuration */ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
return HAL_TCL_ENCAP_TYPE_ETHERNET;
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI; return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
} }
@ -40,8 +40,11 @@ static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb) static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
{ {
struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
if (!ieee80211_is_data_qos(hdr->frame_control)) if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else if (!ieee80211_is_data_qos(hdr->frame_control))
return HAL_DESC_REO_NON_QOS_TID; return HAL_DESC_REO_NON_QOS_TID;
else else
return skb->priority & IEEE80211_QOS_CTL_TID_MASK; return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
@ -84,15 +87,31 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 pool_id; u8 pool_id;
u8 hal_ring_id; u8 hal_ring_id;
int ret; int ret;
u8 ring_selector = 0, ring_map = 0;
bool tcl_ring_retry;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN; return -ESHUTDOWN;
if (!ieee80211_is_data(hdr->frame_control)) if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP; return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
ti.ring_id = ath11k_txq_tcl_ring_map[pool_id];
/* Let the default ring selection be based on a round robin
* fashion where one of the 3 tcl rings are selected based on
* the tcl_ring_selector counter. In case that ring
* is full/busy, we resort to other available rings.
* If all rings are full, we drop the packet.
* //TODO Add throttling logic when all rings are full
*/
ring_selector = atomic_inc_return(&ab->tcl_ring_selector);
tcl_ring_sel:
tcl_ring_retry = false;
ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
ring_map |= BIT(ti.ring_id);
tx_ring = &dp->tx_ring[ti.ring_id]; tx_ring = &dp->tx_ring[ti.ring_id];
@ -101,8 +120,14 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
DP_TX_IDR_SIZE - 1, GFP_ATOMIC); DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock); spin_unlock_bh(&tx_ring->tx_idr_lock);
if (ret < 0) if (ret < 0) {
return -ENOSPC; if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1))
return -ENOSPC;
/* Check if the next ring is available */
ring_selector++;
goto tcl_ring_sel;
}
ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) | ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) | FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
@ -149,7 +174,10 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
* skb_checksum_help() is needed * skb_checksum_help() is needed
*/ */
case HAL_TCL_ENCAP_TYPE_ETHERNET: case HAL_TCL_ENCAP_TYPE_ETHERNET:
/* no need to encap */
break;
case HAL_TCL_ENCAP_TYPE_802_3: case HAL_TCL_ENCAP_TYPE_802_3:
default:
/* TODO: Take care of other encap modes as well */ /* TODO: Take care of other encap modes as well */
ret = -EINVAL; ret = -EINVAL;
goto fail_remove_idr; goto fail_remove_idr;
@ -178,11 +206,21 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (!hal_tcl_desc) { if (!hal_tcl_desc) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring /* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue. * desc because the desc is directly enqueued onto hw queue.
* So add tx packet throttling logic in future if required.
*/ */
ath11k_hal_srng_access_end(ab, tcl_ring); ath11k_hal_srng_access_end(ab, tcl_ring);
spin_unlock_bh(&tcl_ring->lock); spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM; ret = -ENOMEM;
/* Checking for available tcl descritors in another ring in
* case of failure due to full tcl ring now, is better than
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
tcl_ring_retry = true;
ring_selector++;
}
goto fail_unmap_dma; goto fail_unmap_dma;
} }
@ -206,6 +244,9 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id)); FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
spin_unlock_bh(&tx_ring->tx_idr_lock); spin_unlock_bh(&tx_ring->tx_idr_lock);
if (tcl_ring_retry)
goto tcl_ring_sel;
return ret; return ret;
} }
@ -543,8 +584,12 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
/* cmd_num should start from 1, during failure return the error code */
if (cmd_num < 0)
return cmd_num;
/* reo cmd ring descriptors has cmd_num starting from 1 */ /* reo cmd ring descriptors has cmd_num starting from 1 */
if (cmd_num <= 0) if (cmd_num == 0)
return -EINVAL; return -EINVAL;
if (!cb) if (!cb)

View File

@ -477,7 +477,7 @@ enum hal_tlv_tag {
struct hal_tlv_hdr { struct hal_tlv_hdr {
u32 tl; u32 tl;
u8 value[0]; u8 value[];
} __packed; } __packed;
#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0) #define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
@ -1972,7 +1972,7 @@ struct hal_rx_reo_queue {
u32 processed_total_bytes; u32 processed_total_bytes;
u32 info5; u32 info5;
u32 rsvd[3]; u32 rsvd[3];
struct hal_rx_reo_queue_ext ext_desc[0]; struct hal_rx_reo_queue_ext ext_desc[];
} __packed; } __packed;
/* hal_rx_reo_queue /* hal_rx_reo_queue

View File

@ -23,7 +23,7 @@ struct hal_rx_wbm_rel_info {
struct hal_rx_mon_status_tlv_hdr { struct hal_rx_mon_status_tlv_hdr {
u32 hdr; u32 hdr;
u8 value[0]; u8 value[];
}; };
enum hal_rx_su_mu_coding { enum hal_rx_su_mu_coding {

View File

@ -111,7 +111,7 @@ struct ath11k_hw_params {
struct ath11k_fw_ie { struct ath11k_fw_ie {
__le32 id; __le32 id;
__le32 len; __le32 len;
u8 data[0]; u8 data[];
}; };
enum ath11k_bd_ie_board_type { enum ath11k_bd_ie_board_type {

View File

@ -33,6 +33,12 @@
.max_power = 30, \ .max_power = 30, \
} }
/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
MODULE_PARM_DESC(frame_mode,
"Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
static const struct ieee80211_channel ath11k_2ghz_channels[] = { static const struct ieee80211_channel ath11k_2ghz_channels[] = {
CHAN2G(1, 2412, 0), CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0), CHAN2G(2, 2417, 0),
@ -1142,6 +1148,10 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK; arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11; arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
IEEE80211_VHT_MCS_NOT_SUPPORTED)
arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
/* TODO: Check */ /* TODO: Check */
arg->tx_max_mcs_nss = 0xFF; arg->tx_max_mcs_nss = 0xFF;
@ -3682,10 +3692,10 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
{ {
struct sk_buff *msdu = skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath11k *ar = ctx; struct ath11k *ar = ctx;
struct ath11k_base *ab = ar->ab; struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu = skb;
struct ieee80211_tx_info *info;
spin_lock_bh(&ar->txmgmt_idr_lock); spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id); idr_remove(&ar->txmgmt_idr, buf_id);
@ -3725,6 +3735,7 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
{ {
struct ath11k_base *ab = ar->ab; struct ath11k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info;
dma_addr_t paddr; dma_addr_t paddr;
int buf_id; int buf_id;
int ret; int ret;
@ -3736,11 +3747,14 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
if (buf_id < 0) if (buf_id < 0)
return -ENOSPC; return -ENOSPC;
if ((ieee80211_is_action(hdr->frame_control) || info = IEEE80211_SKB_CB(skb);
ieee80211_is_deauth(hdr->frame_control) || if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)) {
ieee80211_is_disassoc(hdr->frame_control)) && if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_has_protected(hdr->frame_control)) { ieee80211_is_deauth(hdr->frame_control) ||
skb_put(skb, IEEE80211_CCMP_MIC_LEN); ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
}
} }
paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
@ -3789,15 +3803,30 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
info = IEEE80211_SKB_CB(skb); info = IEEE80211_SKB_CB(skb);
arvif = ath11k_vif_to_arvif(info->control.vif); if (!info->control.vif) {
ath11k_warn(ar->ab, "no vif found for mgmt frame, flags 0x%x\n",
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); info->control.flags);
if (ret) {
ath11k_warn(ar->ab, "failed to transmit management frame %d\n",
ret);
ieee80211_free_txskb(ar->hw, skb); ieee80211_free_txskb(ar->hw, skb);
continue;
}
arvif = ath11k_vif_to_arvif(info->control.vif);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
arvif->is_started) {
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
arvif->vdev_id, ret);
ieee80211_free_txskb(ar->hw, skb);
} else {
atomic_inc(&ar->num_pending_mgmt_tx);
}
} else { } else {
atomic_inc(&ar->num_pending_mgmt_tx); ath11k_warn(ar->ab,
"dropping mgmt frame for vdev %d, flags 0x%x is_started %d\n",
arvif->vdev_id, info->control.flags,
arvif->is_started);
ieee80211_free_txskb(ar->hw, skb);
} }
} }
} }
@ -3837,6 +3866,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control, struct ieee80211_tx_control *control,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ath11k *ar = hw->priv; struct ath11k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif; struct ieee80211_vif *vif = info->control.vif;
@ -3845,7 +3875,9 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
bool is_prb_rsp; bool is_prb_rsp;
int ret; int ret;
if (ieee80211_is_mgmt(hdr->frame_control)) { if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp); ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) { if (ret) {
@ -3877,8 +3909,10 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
struct htt_rx_ring_tlv_filter tlv_filter = {0}; struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id; u32 ring_id;
if (enable) if (enable) {
tlv_filter = ath11k_mac_mon_status_filter_default; tlv_filter = ath11k_mac_mon_status_filter_default;
tlv_filter.rx_filter = ath11k_debug_rx_filter(ar);
}
ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id; ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
@ -4124,6 +4158,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct vdev_create_params vdev_param = {0}; struct vdev_create_params vdev_param = {0};
struct peer_create_params peer_param; struct peer_create_params peer_param;
u32 param_id, param_value; u32 param_id, param_value;
int hw_encap = 0;
u16 nss; u16 nss;
int i; int i;
int ret; int ret;
@ -4208,6 +4243,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
} }
ar->num_created_vdevs++; ar->num_created_vdevs++;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id; ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_map &= ~(1LL << arvif->vdev_id); ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
@ -4216,7 +4253,22 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE; param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
param_value = ATH11K_HW_TXRX_NATIVE_WIFI; if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET)
switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_AP:
hw_encap = 1;
break;
default:
break;
}
if (ieee80211_set_hw_80211_encap(vif, hw_encap))
param_value = ATH11K_HW_TXRX_ETHERNET;
else
param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value); param_id, param_value);
if (ret) { if (ret) {
@ -4378,6 +4430,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret); arvif->vdev_id, ret);
ar->num_created_vdevs--; ar->num_created_vdevs--;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << (arvif->vdev_id); ab->free_vdev_map |= 1LL << (arvif->vdev_id);
@ -4643,6 +4697,8 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
} }
ar->num_started_vdevs++; ar->num_started_vdevs++;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
/* Enable CAC Flag in the driver by checking the channel DFS cac time, /* Enable CAC Flag in the driver by checking the channel DFS cac time,
* i.e dfs_cac_ms value which will be valid only for radar channels * i.e dfs_cac_ms value which will be valid only for radar channels
@ -4701,6 +4757,8 @@ static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
WARN_ON(ar->num_started_vdevs == 0); WARN_ON(ar->num_started_vdevs == 0);
ar->num_started_vdevs--; ar->num_started_vdevs--;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
@ -5891,6 +5949,9 @@ int ath11k_mac_register(struct ath11k_base *ab)
int i; int i;
int ret; int ret;
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
for (i = 0; i < ab->num_radios; i++) { for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i]; pdev = &ab->pdevs[i];
ar = pdev->ar; ar = pdev->ar;

View File

@ -39,7 +39,7 @@ struct wmi_cmd_hdr {
struct wmi_tlv { struct wmi_tlv {
u32 header; u32 header;
u8 value[0]; u8 value[];
} __packed; } __packed;
#define WMI_TLV_LEN GENMASK(15, 0) #define WMI_TLV_LEN GENMASK(15, 0)
@ -1976,6 +1976,43 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174, WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175, WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176, WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
WMI_TLV_SERVICE_FETCH_TX_PN = 181,
WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
WMI_TLV_SERVICE_VOW_ENABLE = 197,
WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
WMI_TLV_SERVICE_BROADCAST_TWT = 199,
WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
WMI_TLV_SERVICE_PS_TDCC = 201,
WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202,
WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_MAX_EXT_SERVICE WMI_MAX_EXT_SERVICE
@ -4568,6 +4605,9 @@ enum wmi_sta_ps_param_rx_wake_policy {
WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1, WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
}; };
/* Do not change existing values! Used by ath11k_frame_mode parameter
* module parameter.
*/
enum ath11k_hw_txrx_mode { enum ath11k_hw_txrx_mode {
ATH11K_HW_TXRX_RAW = 0, ATH11K_HW_TXRX_RAW = 0,
ATH11K_HW_TXRX_NATIVE_WIFI = 1, ATH11K_HW_TXRX_NATIVE_WIFI = 1,

View File

@ -501,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) { if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
/* too many PHY errors - we have to raise immunity */ /* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false; bool ofdm_flag = as->ofdm_errors > ofdm_high;
ath5k_ani_raise_immunity(ah, as, ofdm_flag); ath5k_ani_raise_immunity(ah, as, ofdm_flag);
ath5k_ani_period_restart(as); ath5k_ani_period_restart(as);

View File

@ -160,7 +160,7 @@ enum ath6kl_fw_capability {
struct ath6kl_fw_ie { struct ath6kl_fw_ie {
__le32 id; __le32 id;
__le32 len; __le32 len;
u8 data[0]; u8 data[];
}; };
enum ath6kl_hw_flags { enum ath6kl_hw_flags {
@ -406,7 +406,7 @@ struct ath6kl_mgmt_buff {
u32 id; u32 id;
bool no_cck; bool no_cck;
size_t len; size_t len;
u8 buf[0]; u8 buf[];
}; };
struct ath6kl_sta { struct ath6kl_sta {

View File

@ -30,7 +30,7 @@ struct ath6kl_fwlog_slot {
__le32 length; __le32 length;
/* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */ /* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */
u8 payload[0]; u8 payload[];
}; };
#define ATH6KL_FWLOG_MAX_ENTRIES 20 #define ATH6KL_FWLOG_MAX_ENTRIES 20

View File

@ -199,7 +199,7 @@ struct hif_scatter_req {
u32 scat_q_depth; u32 scat_q_depth;
struct hif_scatter_item scat_list[0]; struct hif_scatter_item scat_list[];
}; };
struct ath6kl_irq_proc_registers { struct ath6kl_irq_proc_registers {

View File

@ -19,6 +19,8 @@
#include "ar9002_phy.h" #include "ar9002_phy.h"
#define AR9285_CLCAL_REDO_THRESH 1 #define AR9285_CLCAL_REDO_THRESH 1
/* AGC & I/Q calibrations time limit, ms */
#define AR9002_CAL_MAX_TIME 30000
enum ar9002_cal_types { enum ar9002_cal_types {
ADC_GAIN_CAL = BIT(0), ADC_GAIN_CAL = BIT(0),
@ -37,9 +39,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
break; break;
case ADC_GAIN_CAL: case ADC_GAIN_CAL:
case ADC_DC_CAL: case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ /* Run even/odd ADCs calibrations for HT40 channels only */
if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && if (IS_CHAN_HT40(chan))
IS_CHAN_HT20(chan)))
supported = true; supported = true;
break; break;
} }
@ -105,6 +106,14 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah,
} else { } else {
ar9002_hw_setup_calibration(ah, currCal); ar9002_hw_setup_calibration(ah, currCal);
} }
} else if (time_after(jiffies, ah->cal_start_time +
msecs_to_jiffies(AR9002_CAL_MAX_TIME))) {
REG_CLR_BIT(ah, AR_PHY_TIMING_CTRL4(0),
AR_PHY_TIMING_CTRL4_DO_CAL);
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"calibration timeout\n");
currCal->calState = CAL_WAITING; /* Try later */
iscaldone = true;
} }
} else if (!(caldata->CalValid & currCal->calData->calType)) { } else if (!(caldata->CalValid & currCal->calData->calType)) {
ath9k_hw_reset_calibration(ah, currCal); ath9k_hw_reset_calibration(ah, currCal);
@ -664,8 +673,13 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
int ret; int ret;
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF); nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata) if (ah->caldata) {
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags); nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (longcal) /* Remember to not miss */
set_bit(LONGCAL_PENDING, &ah->caldata->cal_flags);
else if (test_bit(LONGCAL_PENDING, &ah->caldata->cal_flags))
longcal = true; /* Respin a previous one */
}
percal_pending = (currCal && percal_pending = (currCal &&
(currCal->calState == CAL_RUNNING || (currCal->calState == CAL_RUNNING ||
@ -675,9 +689,24 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal)) if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal))
return 0; return 0;
ah->cal_list_curr = currCal = currCal->calNext; /* Looking for next waiting calibration if any */
if (currCal->calState == CAL_WAITING) for (currCal = currCal->calNext; currCal != ah->cal_list_curr;
ath9k_hw_reset_calibration(ah, currCal); currCal = currCal->calNext) {
if (currCal->calState == CAL_WAITING)
break;
}
if (currCal->calState == CAL_WAITING) {
percal_pending = true;
ah->cal_list_curr = currCal;
} else {
percal_pending = false;
ah->cal_list_curr = ah->cal_list;
}
}
/* Do not start a next calibration if the longcal is in action */
if (percal_pending && !nfcal && !longcal) {
ath9k_hw_reset_calibration(ah, currCal);
return 0; return 0;
} }
@ -701,6 +730,9 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
} }
if (longcal) { if (longcal) {
if (ah->caldata)
clear_bit(LONGCAL_PENDING,
&ah->caldata->cal_flags);
ath9k_hw_start_nfcal(ah, false); ath9k_hw_start_nfcal(ah, false);
/* Do periodic PAOffset Cal */ /* Do periodic PAOffset Cal */
ar9002_hw_pa_cal(ah, false); ar9002_hw_pa_cal(ah, false);
@ -858,9 +890,6 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ath9k_hw_loadnf(ah, chan); ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true); ath9k_hw_start_nfcal(ah, true);
if (ah->caldata)
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
/* Enable IQ, ADC Gain and ADC DC offset CALs */ /* Enable IQ, ADC Gain and ADC DC offset CALs */

View File

@ -176,6 +176,7 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
ath9k_hw_setup_calibration(ah, currCal); ath9k_hw_setup_calibration(ah, currCal);
ah->cal_start_time = jiffies;
currCal->calState = CAL_RUNNING; currCal->calState = CAL_RUNNING;
for (i = 0; i < AR5416_MAX_CHAINS; i++) { for (i = 0; i < AR5416_MAX_CHAINS; i++) {
@ -209,14 +210,17 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true; return true;
} }
if (!(ah->supp_cals & currCal->calData->calType)) currCal = ah->cal_list;
return true; do {
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
currCal->calData->calType,
ah->curchan->chan->center_freq);
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n", ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calData->calType, ah->curchan->chan->center_freq); currCal->calState = CAL_WAITING;
ah->caldata->CalValid &= ~currCal->calData->calType; currCal = currCal->calNext;
currCal->calState = CAL_WAITING; } while (currCal != ah->cal_list);
return false; return false;
} }

View File

@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* which are not PHY_ERROR (short radar pulses have a length of 3) * which are not PHY_ERROR (short radar pulses have a length of 3)
*/ */
if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
ath_warn(common, ath_dbg(common, ANY,
"Short RX data len, dropping (dlen: %d)\n", "Short RX data len, dropping (dlen: %d)\n",
rs_datalen); rs_datalen);
goto rx_next; goto rx_next;
} }

View File

@ -427,6 +427,7 @@ enum ath9k_cal_flags {
TXIQCAL_DONE, TXIQCAL_DONE,
TXCLCAL_DONE, TXCLCAL_DONE,
SW_PKDET_DONE, SW_PKDET_DONE,
LONGCAL_PENDING,
}; };
struct ath9k_hw_cal_data { struct ath9k_hw_cal_data {
@ -833,6 +834,7 @@ struct ath_hw {
/* Calibration */ /* Calibration */
u32 supp_cals; u32 supp_cals;
unsigned long cal_start_time;
struct ath9k_cal_list iq_caldata; struct ath9k_cal_list iq_caldata;
struct ath9k_cal_list adcgain_caldata; struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata; struct ath9k_cal_list adcdc_caldata;

View File

@ -338,9 +338,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
if (SUPP(CARL9170FW_WLANTX_CAB)) { if (SUPP(CARL9170FW_WLANTX_CAB)) {
if_comb_types |= if_comb_types |= BIT(NL80211_IFTYPE_AP);
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO);
#ifdef CONFIG_MAC80211_MESH #ifdef CONFIG_MAC80211_MESH
if_comb_types |= if_comb_types |=

View File

@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar,
ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
(vif->type != NL80211_IFTYPE_AP)); (vif->type != NL80211_IFTYPE_AP));
/* While the driver supports HW offload in a single /* The driver used to have P2P GO+CLIENT support,
* P2P client configuration, it doesn't support HW * but since this was dropped and we don't know if
* offload in the favourit, concurrent P2P GO+CLIENT * there are any gremlins lurking in the shadows,
* configuration. Hence, HW offload will always be * so best we keep HW offload disabled for P2P.
* disabled for P2P.
*/ */
ar->disable_offload |= vif->p2p; ar->disable_offload |= vif->p2p;
@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_STATION) if (vif->type == NL80211_IFTYPE_STATION)
break; break;
/* P2P GO [master] use-case
* Because the P2P GO station is selected dynamically
* by all participating peers of a WIFI Direct network,
* the driver has be able to change the main interface
* operating mode on the fly.
*/
if (main_vif->p2p && vif->p2p &&
vif->type == NL80211_IFTYPE_AP) {
old_main = main_vif;
break;
}
err = -EBUSY; err = -EBUSY;
rcu_read_unlock(); rcu_read_unlock();