Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.20. Major changes:

ath10k

* retrieve MAC address from system firmware if provided

* support extended board data download for dual-band QCA9984

* extended per sta tx statistics support via debugfs

* average ack rssi support for data frames

* speed up QCA6174 and QCA9377 firmware download using diag Copy Engine

* HTT High Latency mode support needed by SDIO and USB support

* get STA power save state via debugfs

ath9k

* add reset functionality for airtime station debugfs file
This commit is contained in:
Kalle Valo 2018-10-04 08:33:42 +03:00
commit 09afaba1c3
45 changed files with 2427 additions and 808 deletions

View File

@ -42,7 +42,8 @@ config ATH10K_USB
config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
depends on ATH10K && ARCH_QCOM
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
---help---
This module adds support for integrated WCN3990 chip connected
to system NOC(SNOC). Currently work in progress and will not

View File

@ -750,7 +750,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
enum ath10k_hw_rev hw_rev;
size_t size;
int ret;
u32 chip_id;
struct ath10k_bus_params bus_params;
of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
if (!of_id) {
@ -806,14 +806,15 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ath10k_pci_ce_deinit(ar);
chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
ret = -ENODEV;
goto err_halt_device;
}
ret = ath10k_core_register(ar, chip_id);
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_halt_device;

View File

@ -459,3 +459,26 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
return ret;
}
int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
{
struct bmi_cmd cmd;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
int ret;
if (ar->bmi.done_sent) {
ath10k_warn(ar, "bmi set start command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_SET_APP_START);
cmd.set_app_start.addr = __cpu_to_le32(address);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
return ret;
}
return 0;
}

View File

@ -86,6 +86,10 @@ enum bmi_cmd_id {
#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
/* Dual-band Extended Board ID */
#define BMI_PARAM_GET_EXT_BOARD_ID 0x40000
#define ATH10K_BMI_EXT_BOARD_ID_SUPPORT 0x40000
#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
@ -93,6 +97,7 @@ enum bmi_cmd_id {
#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15
#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
#define ATH10K_BMI_EBOARD_ID_STATUS_MASK 0xff
struct bmi_cmd {
__le32 id; /* enum bmi_cmd_id */
@ -190,6 +195,35 @@ struct bmi_target_info {
u32 type;
};
struct bmi_segmented_file_header {
__le32 magic_num;
__le32 file_flags;
u8 data[];
};
struct bmi_segmented_metadata {
__le32 addr;
__le32 length;
u8 data[];
};
#define BMI_SGMTFILE_MAGIC_NUM 0x544d4753 /* "SGMT" */
#define BMI_SGMTFILE_FLAG_COMPRESS 1
/* Special values for bmi_segmented_metadata.length (all have high bit set) */
/* end of segmented data */
#define BMI_SGMTFILE_DONE 0xffffffff
/* Board Data segment */
#define BMI_SGMTFILE_BDDATA 0xfffffffe
/* set beginning address */
#define BMI_SGMTFILE_BEGINADDR 0xfffffffd
/* immediate function execution */
#define BMI_SGMTFILE_EXEC 0xfffffffc
/* in jiffies */
#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
@ -239,4 +273,6 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val);
int ath10k_bmi_set_start(struct ath10k *ar, u32 address);
#endif /* _BMI_H_ */

View File

@ -1280,10 +1280,17 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr;
int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ce_state = &ce->ce_states[ce_id];
if (ce_state->attr_flags & CE_ATTR_POLL)
continue;
ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_ce_error_intr_disable(ar, ctrl_addr);
@ -1300,11 +1307,14 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
int ce_id;
struct ath10k_ce_pipe *ce_state;
/* Skip the last copy engine, CE7 the diagnostic window, as that
* uses polling and isn't initialized for interrupts.
/* Enable interrupts for copy engine that
* are not using polling mode.
*/
for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
ce_state = &ce->ce_states[ce_id];
if (ce_state->attr_flags & CE_ATTR_POLL)
continue;
ath10k_ce_per_engine_handler_adjust(ce_state);
}
}

View File

@ -275,16 +275,19 @@ void ath10k_ce_free_rri(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
#define CE_ATTR_NO_SNOOP 1
#define CE_ATTR_NO_SNOOP BIT(0)
/* Byte swap data words */
#define CE_ATTR_BYTE_SWAP_DATA 2
#define CE_ATTR_BYTE_SWAP_DATA BIT(1)
/* Swizzle descriptors? */
#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
#define CE_ATTR_SWIZZLE_DESCRIPTORS BIT(2)
/* no interrupt on copy completion */
#define CE_ATTR_DIS_INTR 8
#define CE_ATTR_DIS_INTR BIT(3)
/* no interrupt, only polling */
#define CE_ATTR_POLL BIT(4)
/* Attributes of an instance of a Copy Engine */
struct ce_attr {

File diff suppressed because it is too large Load Diff

View File

@ -92,14 +92,6 @@
struct ath10k;
enum ath10k_bus {
ATH10K_BUS_PCI,
ATH10K_BUS_AHB,
ATH10K_BUS_SDIO,
ATH10K_BUS_USB,
ATH10K_BUS_SNOC,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
{
switch (bus) {
@ -461,6 +453,36 @@ struct ath10k_sta_tid_stats {
unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX];
};
enum ath10k_counter_type {
ATH10K_COUNTER_TYPE_BYTES,
ATH10K_COUNTER_TYPE_PKTS,
ATH10K_COUNTER_TYPE_MAX,
};
enum ath10k_stats_type {
ATH10K_STATS_TYPE_SUCC,
ATH10K_STATS_TYPE_FAIL,
ATH10K_STATS_TYPE_RETRY,
ATH10K_STATS_TYPE_AMPDU,
ATH10K_STATS_TYPE_MAX,
};
struct ath10k_htt_data_stats {
u64 legacy[ATH10K_COUNTER_TYPE_MAX][ATH10K_LEGACY_NUM];
u64 ht[ATH10K_COUNTER_TYPE_MAX][ATH10K_HT_MCS_NUM];
u64 vht[ATH10K_COUNTER_TYPE_MAX][ATH10K_VHT_MCS_NUM];
u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM];
u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM];
u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM];
};
struct ath10k_htt_tx_stats {
struct ath10k_htt_data_stats stats[ATH10K_STATS_TYPE_MAX];
u64 tx_duration;
u64 ba_fails;
u64 ack_fails;
};
struct ath10k_sta {
struct ath10k_vif *arvif;
@ -474,6 +496,7 @@ struct ath10k_sta {
struct work_struct update_wk;
u64 rx_duration;
struct ath10k_htt_tx_stats *tx_stats;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
@ -482,6 +505,8 @@ struct ath10k_sta {
/* Protected with ar->data_lock */
struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1];
#endif
/* Protected with ar->data_lock */
u32 peer_ps_state;
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
@ -607,6 +632,7 @@ struct ath10k_debug {
u32 reg_addr;
u32 nf_cal_period;
void *cal_data;
u32 enable_extd_tx_stats;
};
enum ath10k_state {
@ -861,6 +887,9 @@ struct ath10k_fw_components {
const struct firmware *board;
const void *board_data;
size_t board_len;
const struct firmware *ext_board;
const void *ext_board_data;
size_t ext_board_len;
struct ath10k_fw_file fw_file;
};
@ -880,6 +909,16 @@ struct ath10k_per_peer_tx_stats {
u32 reserved2;
};
enum ath10k_dev_type {
ATH10K_DEV_TYPE_LL,
ATH10K_DEV_TYPE_HL,
};
struct ath10k_bus_params {
u32 chip_id;
enum ath10k_dev_type dev_type;
};
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@ -890,6 +929,7 @@ struct ath10k {
enum ath10k_hw_rev hw_rev;
u16 dev_id;
u32 chip_id;
enum ath10k_dev_type dev_type;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@ -908,6 +948,8 @@ struct ath10k {
u32 low_5ghz_chan;
u32 high_5ghz_chan;
bool ani_enabled;
/* protected by conf_mutex */
u8 ps_state_enable;
bool p2p;
@ -947,7 +989,9 @@ struct ath10k {
bool bmi_ids_valid;
u8 bmi_board_id;
u8 bmi_eboard_id;
u8 bmi_chip_id;
bool ext_bid_supported;
char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
} id;
@ -1003,6 +1047,7 @@ struct ath10k {
struct completion install_key_done;
int last_wmi_vdev_start_status;
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
@ -1167,7 +1212,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw_components);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
int ath10k_core_register(struct ath10k *ar,
const struct ath10k_bus_params *bus_params);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */

View File

@ -2042,6 +2042,61 @@ static const struct file_operations fops_btcoex = {
.open = simple_open
};
static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
u32 filter;
int ret;
if (kstrtouint_from_user(ubuf, count, 0, &filter))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ar->debug.enable_extd_tx_stats = filter;
ret = count;
goto out;
}
if (filter == ar->debug.enable_extd_tx_stats) {
ret = count;
goto out;
}
ar->debug.enable_extd_tx_stats = filter;
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file,
char __user *ubuf,
size_t count, loff_t *ppos)
{
char buf[32];
struct ath10k *ar = file->private_data;
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
ar->debug.enable_extd_tx_stats);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_enable_extd_tx_stats = {
.read = ath10k_read_enable_extd_tx_stats,
.write = ath10k_write_enable_extd_tx_stats,
.open = simple_open
};
static ssize_t ath10k_write_peer_stats(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
@ -2343,6 +2398,85 @@ static const struct file_operations fops_warm_hw_reset = {
.llseek = default_llseek,
};
static void ath10k_peer_ps_state_disable(void *data,
struct ieee80211_sta *sta)
{
struct ath10k *ar = data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
spin_lock_bh(&ar->data_lock);
arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
spin_unlock_bh(&ar->data_lock);
}
static ssize_t ath10k_write_ps_state_enable(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int ret;
u32 param;
u8 ps_state_enable;
if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
return -EINVAL;
if (ps_state_enable > 1 || ps_state_enable < 0)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->ps_state_enable == ps_state_enable) {
ret = count;
goto exit;
}
param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable;
ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable);
if (ret) {
ath10k_warn(ar, "failed to enable ps_state_enable: %d\n",
ret);
goto exit;
}
ar->ps_state_enable = ps_state_enable;
if (!ar->ps_state_enable)
ieee80211_iterate_stations_atomic(ar->hw,
ath10k_peer_ps_state_disable,
ar);
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath10k_read_ps_state_enable(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int len = 0;
char buf[32];
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
ar->ps_state_enable);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_ps_state_enable = {
.read = ath10k_read_ps_state_enable,
.write = ath10k_write_ps_state_enable,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@ -2454,10 +2588,15 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar,
&fops_btcoex);
if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar,
&fops_peer_stats);
debugfs_create_file("enable_extd_tx_stats", 0644,
ar->debug.debugfs_phy, ar,
&fops_enable_extd_tx_stats);
}
debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_checksums);
@ -2474,6 +2613,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
&fops_warm_hw_reset);
debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ps_state_enable);
return 0;
}

View File

@ -128,6 +128,10 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return ar->debug.fw_dbglog_level;
}
static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
{
return ar->debug.enable_extd_tx_stats;
}
#else
static inline int ath10k_debug_start(struct ath10k *ar)
@ -190,6 +194,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return 0;
}
static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
{
return 0;
}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL

View File

@ -460,6 +460,33 @@ static const struct file_operations fops_peer_debug_trigger = {
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_read_peer_ps_state(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[20];
int len = 0;
spin_lock_bh(&ar->data_lock);
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
arsta->peer_ps_state);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_peer_ps_state = {
.open = simple_open,
.read = ath10k_dbg_sta_read_peer_ps_state,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static char *get_err_str(enum ath10k_pkt_rx_err i)
{
switch (i) {
@ -626,9 +653,105 @@ static const struct file_operations fops_tid_stats_dump = {
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
struct ath10k_htt_data_stats *stats;
const char *str_name[ATH10K_STATS_TYPE_MAX] = {"succ", "fail",
"retry", "ampdu"};
const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
int len = 0, i, j, k, retval = 0;
const int size = 2 * 4096;
char *buf;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
stats = &arsta->tx_stats->stats[k];
len += scnprintf(buf + len, size - len, "%s_%s\n",
str_name[k],
str[j]);
len += scnprintf(buf + len, size - len,
" VHT MCS %s\n",
str[j]);
for (i = 0; i < ATH10K_VHT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ",
stats->vht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, " HT MCS %s\n",
str[j]);
for (i = 0; i < ATH10K_HT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ", stats->ht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" BW %s (20,40,80,160 MHz)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->bw[j][0], stats->bw[j][1],
stats->bw[j][2], stats->bw[j][3]);
len += scnprintf(buf + len, size - len,
" NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->nss[j][0], stats->nss[j][1],
stats->nss[j][2], stats->nss[j][3]);
len += scnprintf(buf + len, size - len,
" GI %s (LGI,SGI)\n",
str[j]);
len += scnprintf(buf + len, size - len, " %llu %llu\n",
stats->gi[j][0], stats->gi[j][1]);
len += scnprintf(buf + len, size - len,
" legacy rate %s (1,2 ... Mbps)\n ",
str[j]);
for (i = 0; i < ATH10K_LEGACY_NUM; i++)
len += scnprintf(buf + len, size - len, "%llu ",
stats->legacy[j][i]);
len += scnprintf(buf + len, size - len, "\n");
}
}
len += scnprintf(buf + len, size - len,
"\nTX duration\n %llu usecs\n",
arsta->tx_stats->tx_duration);
len += scnprintf(buf + len, size - len,
"BA fails\n %llu\n", arsta->tx_stats->ba_fails);
len += scnprintf(buf + len, size - len,
"ack fails\n %llu\n", arsta->tx_stats->ack_fails);
spin_unlock_bh(&ar->data_lock);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
mutex_unlock(&ar->conf_mutex);
return retval;
}
static const struct file_operations fops_tx_stats = {
.read = ath10k_dbg_sta_dump_tx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
struct ath10k *ar = hw->priv;
debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
@ -637,4 +760,11 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
&fops_peer_debug_trigger);
debugfs_create_file("dump_tid_stats", 0400, dir, sta,
&fops_tid_stats_dump);
if (ath10k_peer_stats_enabled(ar) &&
ath10k_debug_is_extd_tx_stats_enabled(ar))
debugfs_create_file("tx_stats", 0400, dir, sta,
&fops_tx_stats);
debugfs_create_file("peer_ps_state", 0400, dir, sta,
&fops_peer_ps_state);
}

View File

@ -53,6 +53,7 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
if (htc->ar->dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
@ -137,12 +138,15 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
skb_cb->eid = eid;
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
if (ar->dev_type != ATH10K_DEV_TYPE_HL) {
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret) {
ret = -EIO;
goto err_credits;
}
}
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = skb;
@ -157,6 +161,7 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return 0;
err_unmap:
if (ar->dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
@ -803,8 +808,11 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status)
if (status) {
ath10k_warn(ar, "unsupported HTC service id: %d\n",
ep->service_id);
return status;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
@ -838,6 +846,56 @@ struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
return skb;
}
static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
static int ath10k_htc_pktlog_connect(struct ath10k *ar)
{
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_svc_conn_req conn_req;
int status;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = NULL;
conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
conn_req.ep_ops.ep_tx_credits = NULL;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
if (status) {
ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
status);
return status;
}
return 0;
}
static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
{
u8 ul_pipe_id;
u8 dl_pipe_id;
int status;
status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
&ul_pipe_id,
&dl_pipe_id);
if (status) {
ath10k_warn(ar, "unsupported HTC service id: %d\n",
ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
return false;
}
return true;
}
int ath10k_htc_start(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
@ -871,6 +929,14 @@ int ath10k_htc_start(struct ath10k_htc *htc)
return status;
}
if (ath10k_htc_pktlog_svc_supported(ar)) {
status = ath10k_htc_pktlog_connect(ar);
if (status) {
ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
return status;
}
}
return 0;
}

View File

@ -29,7 +29,6 @@
#include "htc.h"
#include "hw.h"
#include "rx_desc.h"
#include "hw.h"
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@ -577,6 +576,8 @@ struct htt_mgmt_tx_completion {
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
struct htt_rx_indication_hdr {
u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
__le16 peer_id;
@ -719,6 +720,15 @@ struct htt_rx_indication {
struct htt_rx_indication_mpdu_range mpdu_ranges[0];
} __packed;
/* High latency version of the RX indication */
struct htt_rx_indication_hl {
struct htt_rx_indication_hdr hdr;
struct htt_rx_indication_ppdu ppdu;
struct htt_rx_indication_prefix prefix;
struct fw_rx_desc_hl fw_desc;
struct htt_rx_indication_mpdu_range mpdu_ranges[0];
} __packed;
static inline struct htt_rx_indication_mpdu_range *
htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
{
@ -731,6 +741,18 @@ static inline struct htt_rx_indication_mpdu_range *
return ptr;
}
static inline struct htt_rx_indication_mpdu_range *
htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
{
void *ptr = rx_ind;
ptr += sizeof(rx_ind->hdr)
+ sizeof(rx_ind->ppdu)
+ sizeof(rx_ind->prefix)
+ sizeof(rx_ind->fw_desc);
return ptr;
}
enum htt_rx_flush_mpdu_status {
HTT_RX_FLUSH_MPDU_DISCARD = 0,
HTT_RX_FLUSH_MPDU_REORDER = 1,
@ -840,7 +862,7 @@ struct htt_data_tx_completion {
} __packed;
} __packed;
u8 num_msdus;
u8 rsvd0;
u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
__le16 msdus[0]; /* variable length based on %num_msdus */
} __packed;
@ -1641,6 +1663,7 @@ struct htt_resp {
struct htt_mgmt_tx_completion mgmt_tx_completion;
struct htt_data_tx_completion data_tx_completion;
struct htt_rx_indication rx_ind;
struct htt_rx_indication_hl rx_ind_hl;
struct htt_rx_fragment_indication rx_frag_ind;
struct htt_rx_peer_map peer_map;
struct htt_rx_peer_unmap peer_unmap;
@ -1994,6 +2017,31 @@ struct htt_rx_desc {
u8 msdu_payload[0];
};
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00008000
#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 15
#define HTT_RX_DESC_HL_INFO_FRAGMENT_MASK 0x00010000
#define HTT_RX_DESC_HL_INFO_FRAGMENT_LSB 16
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
struct htt_rx_desc_base_hl {
__le32 info; /* HTT_RX_DESC_HL_INFO_ */
};
struct htt_rx_chan_info {
__le16 primary_chan_center_freq_mhz;
__le16 contig_chan1_center_freq_mhz;
__le16 contig_chan2_center_freq_mhz;
u8 phy_mode;
u8 reserved;
} __packed;
#define HTT_RX_DESC_ALIGN 8
#define HTT_MAC_ADDR_LEN 6

View File

@ -265,6 +265,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
struct ath10k_htt *htt = &ar->htt;
int ret;
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
return 0;
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
@ -279,6 +282,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL)
return;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
skb_queue_purge(&htt->rx_msdus_q);
@ -570,6 +576,9 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
return 0;
htt->rx_confused = false;
/* XXX: The fill level could be changed during runtime in response to
@ -1846,7 +1855,115 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return 0;
}
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
struct htt_rx_indication_hl *rx,
struct sk_buff *skb)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct fw_rx_desc_hl *fw_desc;
struct ieee80211_hdr *hdr;
struct ieee80211_rx_status *rx_status;
u16 peer_id;
u8 rx_desc_len;
int num_mpdu_ranges;
size_t tot_hdr_len;
struct ieee80211_channel *ch;
peer_id = __le16_to_cpu(rx->hdr.peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
spin_unlock_bh(&ar->data_lock);
if (!peer)
ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
fw_desc = &rx->fw_desc;
rx_desc_len = fw_desc->len;
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
* same limitiation here as well.
*/
if (num_mpdu_ranges > 1)
ath10k_warn(ar,
"Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
num_mpdu_ranges);
if (mpdu_ranges->mpdu_range_status !=
HTT_RX_IND_MPDU_STATUS_OK) {
ath10k_warn(ar, "MPDU range status: %d\n",
mpdu_ranges->mpdu_range_status);
goto err;
}
/* Strip off all headers before the MAC header before delivery to
* mac80211
*/
tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
sizeof(rx->ppdu) + sizeof(rx->prefix) +
sizeof(rx->fw_desc) +
sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
skb_pull(skb, tot_hdr_len);
hdr = (struct ieee80211_hdr *)skb->data;
rx_status = IEEE80211_SKB_RXCB(skb);
rx_status->chains |= BIT(0);
rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rx->ppdu.combined_rssi;
rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
if (!ch)
ch = ar->tgt_oper_chan;
spin_unlock_bh(&ar->data_lock);
if (ch) {
rx_status->band = ch->band;
rx_status->freq = ch->center_freq;
}
if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
else
rx_status->flag |= RX_FLAG_AMSDU_MORE;
/* Not entirely sure about this, but all frames from the chipset has
* the protected flag set even though they have already been decrypted.
* Unmasking this flag is necessary in order for mac80211 not to drop
* the frame.
* TODO: Verify this is always the case or find out a way to check
* if there has been hw decryption.
*/
if (ieee80211_has_protected(hdr->frame_control)) {
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
rx_status->flag |= RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
}
ieee80211_rx_ni(ar->hw, skb);
/* We have delivered the skb to the upper layers (mac80211) so we
* must not free it.
*/
return false;
err:
/* Tell the caller that it must free the skb since we have not
* consumed it
*/
return true;
}
static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
struct ath10k *ar = htt->ar;
@ -1884,7 +2001,9 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
struct htt_resp *resp = (struct htt_resp *)skb->data;
struct htt_tx_done tx_done = {};
int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
__le16 msdu_id;
__le16 msdu_id, *msdus;
bool rssi_enabled = false;
u8 msdu_count = 0;
int i;
switch (status) {
@ -1908,10 +2027,30 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
msdu_count = resp->data_tx_completion.num_msdus;
if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI)
rssi_enabled = true;
for (i = 0; i < msdu_count; i++) {
msdus = resp->data_tx_completion.msdus;
msdu_id = msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
if (rssi_enabled) {
/* Total no of MSDUs should be even,
* if odd MSDUs are sent firmware fills
* last msdu id with 0xffff
*/
if (msdu_count & 0x01) {
msdu_id = msdus[msdu_count + i + 1];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
} else {
msdu_id = msdus[msdu_count + i];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
}
}
/* kfifo_put: In practice firmware shouldn't fire off per-CE
* interrupt and main interrupt (MSI/-X range case) for the same
* HTC service so it should be safe to use kfifo_put w/o lock.
@ -2488,7 +2627,7 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
static inline bool is_valid_legacy_rate(u8 rate)
static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
{
static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
18, 24, 36, 48, 54};
@ -2496,10 +2635,116 @@ static inline bool is_valid_legacy_rate(u8 rate)
for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
if (rate == legacy_rates[i])
return true;
return i;
}
return false;
ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
return -EINVAL;
}
static void
ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_sta *arsta,
struct ath10k_per_peer_tx_stats *pstats,
u8 legacy_rate_idx)
{
struct rate_info *txrate = &arsta->txrate;
struct ath10k_htt_tx_stats *tx_stats;
int ht_idx, gi, mcs, bw, nss;
if (!arsta->tx_stats)
return;
tx_stats = arsta->tx_stats;
gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI);
ht_idx = txrate->mcs + txrate->nss * 8;
mcs = txrate->mcs;
bw = txrate->bw;
nss = txrate->nss;
#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) {
STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
} else if (txrate->flags == RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
} else {
mcs = legacy_rate_idx;
if (mcs < 0)
return;
STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
}
if (ATH10K_HW_AMPDU(pstats->flags)) {
tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
if (txrate->flags == RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
pstats->succ_pkts + pstats->retry_pkts;
} else {
STATS_OP_FMT(AMPDU).vht[0][mcs] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).vht[1][mcs] +=
pstats->succ_pkts + pstats->retry_pkts;
}
STATS_OP_FMT(AMPDU).bw[0][bw] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).nss[0][nss] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).gi[0][gi] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).bw[1][bw] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).nss[1][nss] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).gi[1][gi] +=
pstats->succ_pkts + pstats->retry_pkts;
} else {
tx_stats->ack_fails +=
ATH10K_HW_BA_FAIL(pstats->flags);
}
STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
}
static void
@ -2508,7 +2753,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_per_peer_tx_stats *peer_stats)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
u8 rate = 0, sgi;
u8 rate = 0, rate_idx = 0, sgi;
struct rate_info txrate;
lockdep_assert_held(&ar->data_lock);
@ -2536,17 +2781,12 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
if (!is_valid_legacy_rate(rate)) {
ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
rate);
return;
}
/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
rate *= 10;
if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
rate = rate - 5;
if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
rate = 5;
rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
if (rate_idx < 0)
return;
arsta->txrate.legacy = rate;
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
@ -2561,6 +2801,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->txrate.nss = txrate.nss;
arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx);
}
static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
@ -2702,7 +2946,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
return ath10k_htt_rx_proc_rx_ind_hl(htt,
&resp->rx_ind_hl,
skb);
else
ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
@ -2986,11 +3235,16 @@ static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
};
static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
};
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
if (ar->hw_params.target_64bit)
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
htt->rx_ops = &htt_rx_ops_hl;
else if (ar->hw_params.target_64bit)
htt->rx_ops = &htt_rx_ops_64;
else
htt->rx_ops = &htt_rx_ops_32;

View File

@ -495,6 +495,9 @@ int ath10k_htt_tx_start(struct ath10k_htt *htt)
if (htt->tx_mem_allocated)
return 0;
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
return 0;
ret = ath10k_htt_tx_alloc_buf(htt);
if (ret)
goto free_idr_pending_tx;
@ -934,6 +937,57 @@ static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
return 0;
}
static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring32 *ring;
const int num_rx_ring = 1;
u16 flags;
int len;
int ret;
/*
* the HW expects the buffer to be an integral number of 4-byte
* "words"
*/
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
ring = &cmd->rx_setup_32.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
cmd->rx_setup_32.hdr.num_rings = 1;
flags = 0;
flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
memset(ring, 0, sizeof(*ring));
ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring->flags = __cpu_to_le16(flags);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu)
@ -1123,6 +1177,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
return 0;
err_unmap_msdu:
if (ar->dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
@ -1134,6 +1189,94 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
return res;
}
#define HTT_TX_HL_NEEDED_HEADROOM \
(unsigned int)(sizeof(struct htt_cmd_hdr) + \
sizeof(struct htt_data_tx_desc) + \
sizeof(struct ath10k_htc_hdr))
static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
int res, data_len;
struct htt_cmd_hdr *cmd_hdr;
struct htt_data_tx_desc *tx_desc;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *tmp_skb;
bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
u8 flags0 = 0;
u16 flags1 = 0;
data_len = msdu->len;
switch (txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* fall through */
case ATH10K_HW_TXRX_ETHERNET:
flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
break;
}
if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL &&
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
}
/* Prepend the HTT header and TX desc struct to the data message
* and realloc the skb if it does not have enough headroom.
*/
if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
tmp_skb = msdu;
ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
"Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
kfree_skb(tmp_skb);
if (!msdu) {
ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
res = -ENOMEM;
goto out;
}
}
skb_push(msdu, sizeof(*cmd_hdr));
skb_push(msdu, sizeof(*tx_desc));
cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
tx_desc->flags0 = flags0;
tx_desc->flags1 = __cpu_to_le16(flags1);
tx_desc->len = __cpu_to_le16(data_len);
tx_desc->id = 0;
tx_desc->frags_paddr = 0; /* always zero */
/* Initialize peer_id to INVALID_PEER because this is NOT
* Reinjection path
*/
tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu);
out:
return res;
}
static int ath10k_htt_tx_32(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu)
@ -1561,11 +1704,19 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
};
static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
.htt_tx = ath10k_htt_tx_hl,
};
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
if (ar->hw_params.target_64bit)
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
htt->tx_ops = &htt_tx_ops_hl;
else if (ar->hw_params.target_64bit)
htt->tx_ops = &htt_tx_ops_64;
else
htt->tx_ops = &htt_tx_ops_32;

View File

@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include "core.h"
#include "hw.h"
#include "hif.h"
@ -918,6 +919,196 @@ static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
return 0;
}
/* Program CPU_ADDR_MSB to allow different memory
* region access.
*/
static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
{
u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS;
ath10k_hif_write32(ar, address, msb);
}
/* 1. Write to memory region of target, such as IRAM adn DRAM.
* 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
* can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
* 3. In order to access the region other than the above,
* we need to set the value of register CPU_ADDR_MSB.
* 4. Target memory access space is limited to 1M size. If the size is larger
* than 1M, need to split it and program CPU_ADDR_MSB accordingly.
*/
static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar,
const void *buffer,
u32 address,
u32 length)
{
u32 addr = address & REGION_ACCESS_SIZE_MASK;
int ret, remain_size, size;
const u8 *buf;
ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address));
if (addr + length > REGION_ACCESS_SIZE_LIMIT) {
size = REGION_ACCESS_SIZE_LIMIT - addr;
remain_size = length - size;
ret = ath10k_hif_diag_write(ar, address, buffer, size);
if (ret) {
ath10k_warn(ar,
"failed to download the first %d bytes segment to address:0x%x: %d\n",
size, address, ret);
goto done;
}
/* Change msb to the next memory region*/
ath10k_hw_map_target_mem(ar,
CPU_ADDR_MSB_REGION_VAL(address) + 1);
buf = buffer + size;
ret = ath10k_hif_diag_write(ar,
address & ~REGION_ACCESS_SIZE_MASK,
buf, remain_size);
if (ret) {
ath10k_warn(ar,
"failed to download the second %d bytes segment to address:0x%x: %d\n",
remain_size,
address & ~REGION_ACCESS_SIZE_MASK,
ret);
goto done;
}
} else {
ret = ath10k_hif_diag_write(ar, address, buffer, length);
if (ret) {
ath10k_warn(ar,
"failed to download the only %d bytes segment to address:0x%x: %d\n",
length, address, ret);
goto done;
}
}
done:
/* Change msb to DRAM */
ath10k_hw_map_target_mem(ar,
CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS));
return ret;
}
static int ath10k_hw_diag_segment_download(struct ath10k *ar,
const void *buffer,
u32 address,
u32 length)
{
if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT)
/* Needs to change MSB for memory write */
return ath10k_hw_diag_segment_msb_download(ar, buffer,
address, length);
else
return ath10k_hif_diag_write(ar, address, buffer, length);
}
int ath10k_hw_diag_fast_download(struct ath10k *ar,
u32 address,
const void *buffer,
u32 length)
{
const u8 *buf = buffer;
bool sgmt_end = false;
u32 base_addr = 0;
u32 base_len = 0;
u32 left = 0;
struct bmi_segmented_file_header *hdr;
struct bmi_segmented_metadata *metadata;
int ret = 0;
if (length < sizeof(*hdr))
return -EINVAL;
/* check firmware header. If it has no correct magic number
* or it's compressed, returns error.
*/
hdr = (struct bmi_segmented_file_header *)buf;
if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Not a supported firmware, magic_num:0x%x\n",
hdr->magic_num);
return -EINVAL;
}
if (hdr->file_flags != 0) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Not a supported firmware, file_flags:0x%x\n",
hdr->file_flags);
return -EINVAL;
}
metadata = (struct bmi_segmented_metadata *)hdr->data;
left = length - sizeof(*hdr);
while (left > 0) {
if (left < sizeof(*metadata)) {
ath10k_warn(ar, "firmware segment is truncated: %d\n",
left);
ret = -EINVAL;
break;
}
base_addr = __le32_to_cpu(metadata->addr);
base_len = __le32_to_cpu(metadata->length);
buf = metadata->data;
left -= sizeof(*metadata);
switch (base_len) {
case BMI_SGMTFILE_BEGINADDR:
/* base_addr is the start address to run */
ret = ath10k_bmi_set_start(ar, base_addr);
base_len = 0;
break;
case BMI_SGMTFILE_DONE:
/* no more segment */
base_len = 0;
sgmt_end = true;
ret = 0;
break;
case BMI_SGMTFILE_BDDATA:
case BMI_SGMTFILE_EXEC:
ath10k_warn(ar,
"firmware has unsupported segment:%d\n",
base_len);
ret = -EINVAL;
break;
default:
if (base_len > left) {
/* sanity check */
ath10k_warn(ar,
"firmware has invalid segment length, %d > %d\n",
base_len, left);
ret = -EINVAL;
break;
}
ret = ath10k_hw_diag_segment_download(ar,
buf,
base_addr,
base_len);
if (ret)
ath10k_warn(ar,
"failed to download firmware via diag interface:%d\n",
ret);
break;
}
if (ret || sgmt_end)
break;
metadata = (struct bmi_segmented_metadata *)(buf + base_len);
left -= base_len;
}
if (ret == 0)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot firmware fast diag download successfully.\n");
return ret;
}
const struct ath10k_hw_ops qca988x_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
};

View File

@ -21,6 +21,14 @@
#include "targaddrs.h"
enum ath10k_bus {
ATH10K_BUS_PCI,
ATH10K_BUS_AHB,
ATH10K_BUS_SDIO,
ATH10K_BUS_USB,
ATH10K_BUS_SNOC,
};
#define ATH10K_FW_DIR "ath10k"
#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac)
@ -109,6 +117,7 @@ enum qca9377_chip_id_rev {
#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9984_HW_1_0_EBOARD_DATA_FILE "eboard.bin"
#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA9888 2.0 defines */
@ -221,6 +230,7 @@ enum ath10k_fw_htt_op_version {
enum ath10k_bd_ie_type {
/* contains sub IEs of enum ath10k_bd_ie_board_type */
ATH10K_BD_IE_BOARD = 0,
ATH10K_BD_IE_BOARD_EXT = 1,
};
enum ath10k_bd_ie_board_type {
@ -389,6 +399,11 @@ extern const struct ath10k_hw_ce_regs qcax_ce_regs;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
int ath10k_hw_diag_fast_download(struct ath10k *ar,
u32 address,
const void *buffer,
u32 length);
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
@ -501,6 +516,7 @@ struct ath10k_hw_clk_params {
struct ath10k_hw_params {
u32 id;
u16 dev_id;
enum ath10k_bus bus;
const char *name;
u32 patch_load_addr;
int uart_pin;
@ -539,6 +555,8 @@ struct ath10k_hw_params {
const char *dir;
const char *board;
size_t board_size;
const char *eboard;
size_t ext_board_size;
size_t board_ext_size;
} fw;
@ -594,6 +612,9 @@ struct ath10k_hw_params {
* to avoid it sending spurious acks.
*/
bool hw_filter_reset_required;
/* target supporting fw download via diag ce */
bool fw_diag_ce_download;
};
struct htt_rx_desc;
@ -1129,4 +1150,15 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
/* qca6174 PLL offset/mask end */
/* CPU_ADDR_MSB is a register, bit[3:0] is to specify which memory
* region is accessed. The memory region size is 1M.
* If host wants to access 0xX12345 at target, then CPU_ADDR_MSB[3:0]
* is 0xX.
* The following MACROs are defined to get the 0xX and the size limit.
*/
#define CPU_ADDR_MSB_REGION_MASK GENMASK(23, 20)
#define CPU_ADDR_MSB_REGION_VAL(X) FIELD_GET(CPU_ADDR_MSB_REGION_MASK, X)
#define REGION_ACCESS_SIZE_LIMIT 0x100000
#define REGION_ACCESS_SIZE_MASK (REGION_ACCESS_SIZE_LIMIT - 1)
#endif /* _HW_H_ */

View File

@ -30,7 +30,6 @@
#include "htt.h"
#include "txrx.h"
#include "testmode.h"
#include "wmi.h"
#include "wmi-tlv.h"
#include "wmi-ops.h"
#include "wow.h"
@ -157,6 +156,22 @@ u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
return 0;
}
static int ath10k_mac_get_rate_hw_value(int bitrate)
{
int i;
u8 hw_value_prefix = 0;
if (ath10k_mac_bitrate_is_cck(bitrate))
hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
for (i = 0; i < sizeof(ath10k_rates); i++) {
if (ath10k_rates[i].bitrate == bitrate)
return hw_value_prefix | ath10k_rates[i].hw_value;
}
return -EINVAL;
}
static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
{
switch ((mcs_map >> (2 * nss)) & 0x3) {
@ -968,7 +983,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
if (time_left == 0)
return -ETIMEDOUT;
return 0;
return ar->last_wmi_vdev_start_status;
}
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
@ -5452,9 +5467,10 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
u8 rate;
int rateidx, ret = 0;
u8 rate, basic_rate_idx;
int rateidx, ret = 0, hw_rate_code;
enum nl80211_band band;
const struct ieee80211_supported_band *sband;
mutex_lock(&ar->conf_mutex);
@ -5660,6 +5676,30 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
mutex_unlock(&ar->conf_mutex);
return;
}
sband = ar->hw->wiphy->bands[def.chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
if (hw_rate_code < 0) {
ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
mutex_unlock(&ar->conf_mutex);
return;
}
vdev_param = ar->wmi.vdev_param->mgmt_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
if (ret)
ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
}
mutex_unlock(&ar->conf_mutex);
}
@ -6216,6 +6256,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
@ -6244,6 +6285,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
GFP_KERNEL);
if (!arsta->tx_stats)
goto exit;
}
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
@ -6329,6 +6377,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
kfree(arsta->tx_stats);
if (sta->tdls) {
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
sta,
@ -6769,23 +6820,17 @@ static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
void ath10k_mac_wait_tx_complete(struct ath10k *ar)
{
struct ath10k *ar = hw->priv;
bool skip;
long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs
*/
if (drop)
return;
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_WEDGED)
goto skip;
return;
time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
@ -6804,8 +6849,18 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (time_left == 0 || skip)
ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
skip, ar->state, time_left);
}
skip:
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
if (drop)
return;
mutex_lock(&ar->conf_mutex);
ath10k_mac_wait_tx_complete(ar);
mutex_unlock(&ar->conf_mutex);
}
@ -8149,6 +8204,24 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
},
};
static const struct
ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
{
.limits = ath10k_10_4_if_limits,
.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
.max_interfaces = 16,
.num_different_channels = 1,
.beacon_int_infra_match = true,
.beacon_int_min_gcd = 100,
#ifdef CONFIG_ATH10K_DFS_CERTIFIED
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
#endif
},
};
static void ath10k_get_arvif_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@ -8311,6 +8384,10 @@ int ath10k_mac_register(struct ath10k *ar)
void *channels;
int ret;
if (!is_valid_ether_addr(ar->mac_addr)) {
ath10k_warn(ar, "invalid MAC address; choosing random\n");
eth_random_addr(ar->mac_addr);
}
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
SET_IEEE80211_DEV(ar->hw, ar->dev);
@ -8465,6 +8542,10 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT);
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
@ -8508,6 +8589,13 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10_4_if_comb);
if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
ar->wmi.svc_map)) {
ar->hw->wiphy->iface_combinations =
ath10k_10_4_bcn_int_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb);
}
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:

View File

@ -82,6 +82,7 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
u16 peer_id,
u8 tid);
int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
void ath10k_mac_wait_tx_complete(struct ath10k *ar);
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)

View File

@ -192,7 +192,7 @@ static struct ce_attr host_ce_config_wlan[] = {
/* CE7: ce_diag, the Diagnostic Window */
{
.flags = CE_ATTR_FLAGS,
.flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
.src_nentries = 2,
.src_sz_max = DIAG_TRANSFER_LIMIT,
.dest_nentries = 2,
@ -870,6 +870,21 @@ static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
return val;
}
/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
* Support to access target space below 1M for qca6174 and qca9377.
* If target space is below 1M, the bit[20] of converted CE addr is 0.
* Otherwise bit[20] of converted CE addr is 1.
*/
static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
& 0x7ff) << 21;
val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
return val;
}
static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
@ -931,6 +946,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
/* The address supplied by the caller is in the
* Target CPU virtual address space.
*
* In order to use this address with the diagnostic CE,
* convert it from Target CPU virtual address space
* to CE address space
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
remaining_bytes = nbytes;
ce_data = ce_data_base;
while (remaining_bytes) {
@ -942,16 +966,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
/* Request CE to send from Target(!) address to Host buffer */
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
*
* In order to use this address with the diagnostic CE,
* convert it from Target CPU virtual address space
* to CE address space
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
0);
if (ret)
@ -960,8 +974,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@ -972,9 +988,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
(void **)&buf,
&completed_nbytes)
!= 0) {
mdelay(1);
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@ -1119,9 +1136,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
mdelay(1);
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@ -1132,9 +1150,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
(void **)&buf,
&completed_nbytes)
!= 0) {
mdelay(1);
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@ -1839,7 +1858,7 @@ int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
}
}
if (WARN_ON(!ul_set || !dl_set))
if (!ul_set || !dl_set)
return -ENOENT;
return 0;
@ -3482,7 +3501,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k *ar;
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
u32 chip_id;
struct ath10k_bus_params bus_params;
bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
@ -3510,7 +3529,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
@ -3538,7 +3557,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = NULL;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
default:
WARN_ON(1);
@ -3618,19 +3637,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_free_irq;
}
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
goto err_free_irq;
}
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
pdev->device, bus_params.chip_id);
goto err_free_irq;
}
ret = ath10k_core_register(ar, chip_id);
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_free_irq;

View File

@ -207,7 +207,8 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define CDC_WAR_DATA_CE 4
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
#define DIAG_ACCESS_CE_TIMEOUT_US 10000 /* 10 ms */
#define DIAG_ACCESS_CE_WAIT_US 50
void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);

View File

@ -1277,4 +1277,19 @@ struct fw_rx_desc_base {
u8 info0;
} __packed;
#define FW_RX_DESC_FLAGS_FIRST_MSDU (1 << 0)
#define FW_RX_DESC_FLAGS_LAST_MSDU (1 << 1)
#define FW_RX_DESC_C3_FAILED (1 << 2)
#define FW_RX_DESC_C4_FAILED (1 << 3)
#define FW_RX_DESC_IPV6 (1 << 4)
#define FW_RX_DESC_TCP (1 << 5)
#define FW_RX_DESC_UDP (1 << 6)
struct fw_rx_desc_hl {
u8 info0;
u8 version;
u8 len;
u8 flags;
} __packed;
#endif /* _RX_DESC_H_ */

View File

@ -1941,7 +1941,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
struct ath10k_sdio *ar_sdio;
struct ath10k *ar;
enum ath10k_hw_rev hw_rev;
u32 chip_id, dev_id_base;
u32 dev_id_base;
struct ath10k_bus_params bus_params;
int ret, i;
/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
@ -2035,9 +2036,10 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with SDIO */
chip_id = 0;
ret = ath10k_core_register(ar, chip_id);
bus_params.chip_id = 0;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_free_wq;

View File

@ -62,6 +62,7 @@ static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static const struct ath10k_snoc_drv_priv drv_priv = {
.hw_rev = ATH10K_HW_WCN3990,
@ -171,7 +172,7 @@ static struct ce_attr host_ce_config_wlan[] = {
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath10k_snoc_htt_htc_rx_cb,
.recv_cb = ath10k_snoc_pktlog_rx_cb,
},
};
@ -436,6 +437,14 @@ static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
}
/* Called by lower (CE) layer when data is received from the Target.
* WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
*/
static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
{
ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
}
static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
{
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
@ -616,7 +625,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
}
}
if (WARN_ON(!ul_set || !dl_set))
if (!ul_set || !dl_set)
return -ENOENT;
return 0;
@ -722,14 +731,15 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
static void ath10k_snoc_hif_stop(struct ath10k *ar)
{
ath10k_snoc_irq_disable(ar);
ath10k_snoc_buffer_cleanup(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
ath10k_snoc_buffer_cleanup(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
}
static int ath10k_snoc_hif_start(struct ath10k *ar)
{
napi_enable(&ar->napi);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@ -792,7 +802,6 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar)
goto err_wlan_enable;
}
napi_enable(&ar->napi);
return 0;
err_wlan_enable:
@ -1274,6 +1283,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
struct ath10k *ar;
int ret;
u32 i;
struct ath10k_bus_params bus_params;
of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
if (!of_id) {
@ -1341,7 +1351,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq;
}
ret = ath10k_core_register(ar, drv_data->hw_rev);
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = drv_data->hw_rev;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_hw_power_off;

View File

@ -484,6 +484,10 @@ struct host_interest {
#define QCA99X0_BOARD_DATA_SZ 12288
#define QCA99X0_BOARD_EXT_DATA_SZ 0
/* Dual band extended board data */
#define QCA99X0_EXT_BOARD_DATA_SZ 2048
#define EXT_BOARD_ADDRESS_OFFSET 0x3000
#define QCA4019_BOARD_DATA_SZ 12064
#define QCA4019_BOARD_EXT_DATA_SZ 0

View File

@ -95,6 +95,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
if (ar->dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
ath10k_report_offchan_tx(htt->ar, msdu);

View File

@ -983,7 +983,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
struct usb_device *dev = interface_to_usbdev(interface);
int ret, vendor_id, product_id;
enum ath10k_hw_rev hw_rev;
u32 chip_id;
struct ath10k_bus_params bus_params;
/* Assumption: All USB based chipsets (so far) are QCA9377 based.
* If there will be newer chipsets that does not use the hw reg
@ -1016,9 +1016,10 @@ static int ath10k_usb_probe(struct usb_interface *interface,
ar->id.vendor = vendor_id;
ar->id.device = product_id;
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with USB */
chip_id = 0;
ret = ath10k_core_register(ar, chip_id);
bus_params.chip_id = 0;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret);
goto err;

View File

@ -19,7 +19,6 @@
#include "debug.h"
#include "mac.h"
#include "hw.h"
#include "mac.h"
#include "wmi.h"
#include "wmi-ops.h"
#include "wmi-tlv.h"
@ -1569,7 +1568,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
if (ar->hw_params.num_peers)
cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
else
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
@ -1582,6 +1584,9 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
}
cfg->num_peer_keys = __cpu_to_le32(2);
if (ar->hw_params.num_peers)
cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
else
cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
cfg->tx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_chain_mask = __cpu_to_le32(0x7);

View File

@ -1307,7 +1307,8 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
.peer_sta_ps_statechg_enable =
WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
@ -2342,7 +2343,12 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
dma_unmap_single(ar->dev, pkt_addr->paddr,
msdu->len, DMA_FROM_DEVICE);
info = IEEE80211_SKB_CB(msdu);
info->flags |= status;
if (status)
info->flags &= ~IEEE80211_TX_STAT_ACK;
else
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(ar->hw, msdu);
ret = 0;
@ -2482,7 +2488,8 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
ieee80211_rx(ar->hw, skb);
ieee80211_rx_ni(ar->hw, skb);
return 0;
}
@ -3242,18 +3249,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_vdev_start_ev_arg arg = {};
int ret;
u32 status;
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
ar->last_wmi_vdev_start_status = 0;
ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
return;
ar->last_wmi_vdev_start_status = ret;
goto out;
}
if (WARN_ON(__le32_to_cpu(arg.status)))
return;
status = __le32_to_cpu(arg.status);
if (WARN_ON_ONCE(status)) {
ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
status, (status == WMI_VDEV_START_CHAN_INVALID) ?
"chan-invalid" : "unknown");
/* Setup is done one way or another though, so we should still
* do the completion, so don't return here.
*/
ar->last_wmi_vdev_start_status = -EINVAL;
}
out:
complete(&ar->vdev_setup_done);
}
@ -4780,6 +4800,13 @@ ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
}
}
if (pream == -1) {
ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
pream_idx, __le32_to_cpu(ev->chan_freq));
tpc = 0;
goto out;
}
if (pream == 4)
tpc = min_t(u8, ev->rates_array[rate_idx],
ev->max_reg_allow_pow[ch]);
@ -5022,6 +5049,36 @@ ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
}
}
static void
ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_peer_sta_ps_state_chg_event *ev;
struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
u8 peer_addr[ETH_ALEN];
lockdep_assert_held(&ar->data_lock);
ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
if (!sta) {
ath10k_warn(ar, "failed to find station entry %pM\n",
peer_addr);
goto exit;
}
arsta = (struct ath10k_sta *)sta->drv_priv;
arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
exit:
rcu_read_unlock();
}
void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@ -5455,6 +5512,7 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
arg.mac_addr,
__le32_to_cpu(arg.status));
if (is_zero_ether_addr(ar->mac_addr))
ether_addr_copy(ar->mac_addr, arg.mac_addr);
complete(&ar->wmi.unified_ready);
return 0;
@ -5951,6 +6009,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@ -6068,6 +6129,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
ath10k_wmi_event_dfs_status_check(ar, skb);
break;
case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;

View File

@ -203,6 +203,8 @@ enum wmi_service {
WMI_SERVICE_TPC_STATS_FINAL,
WMI_SERVICE_RESET_CHIP,
WMI_SERVICE_SPOOF_MAC_SUPPORT,
WMI_SERVICE_TX_DATA_ACK_RSSI,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
/* keep last */
WMI_SERVICE_MAX,
@ -350,6 +352,13 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT,
WMI_10_4_SERVICE_TPC_STATS_FINAL,
WMI_10_4_SERVICE_CFR_CAPTURE_SUPPORT,
WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
WMI_10_4_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_LEGACY,
WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT,
WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL,
WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
};
static inline char *wmi_service_name(int service_id)
@ -463,6 +472,8 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
SVCSTR(WMI_SERVICE_RESET_CHIP);
SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI);
SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT);
default:
return NULL;
}
@ -771,6 +782,10 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL,
WMI_SERVICE_TPC_STATS_FINAL, len);
SVCMAP(WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
WMI_SERVICE_TX_DATA_ACK_RSSI, len);
SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len);
}
#undef SVCMAP
@ -2924,6 +2939,7 @@ enum wmi_coex_version {
* @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
* enable/disable
* @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
* @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
*/
enum wmi_10_4_feature_mask {
WMI_10_4_LTEU_SUPPORT = BIT(0),
@ -2939,6 +2955,7 @@ enum wmi_10_4_feature_mask {
WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10),
WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11),
WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12),
WMI_10_4_TX_DATA_ACK_RSSI = BIT(16),
};
@ -4153,6 +4170,13 @@ enum wmi_tpc_pream_5ghz {
WMI_TPC_PREAM_5GHZ_HTCUP,
};
#define WMI_PEER_PS_STATE_DISABLED 2
struct wmi_peer_sta_ps_state_chg_event {
struct wmi_mac_addr peer_macaddr;
__le32 peer_ps_state;
} __packed;
struct wmi_pdev_chanlist_update_event {
/* number of channels */
__le32 num_chan;
@ -4958,10 +4982,15 @@ enum wmi_rate_preamble {
#define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1)
#define ATH10K_HW_RATECODE(rate, nss, preamble) \
(((preamble) << 6) | ((nss) << 4) | (rate))
#define ATH10K_HW_AMPDU(flags) ((flags) & 0x1)
#define ATH10K_HW_BA_FAIL(flags) (((flags) >> 1) & 0x3)
#define VHT_MCS_NUM 10
#define VHT_BW_NUM 4
#define VHT_NSS_NUM 4
#define ATH10K_VHT_MCS_NUM 10
#define ATH10K_BW_NUM 4
#define ATH10K_NSS_NUM 4
#define ATH10K_LEGACY_NUM 12
#define ATH10K_GI_NUM 2
#define ATH10K_HT_MCS_NUM 32
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@ -6642,11 +6671,17 @@ struct wmi_ch_info_ev_arg {
__le32 rx_frame_count;
};
/* From 10.4 firmware, not sure all have the same values. */
enum wmi_vdev_start_status {
WMI_VDEV_START_OK = 0,
WMI_VDEV_START_CHAN_INVALID,
};
struct wmi_vdev_start_ev_arg {
__le32 vdev_id;
__le32 req_id;
__le32 resp_type; /* %WMI_VDEV_RESP_ */
__le32 status;
__le32 status; /* See wmi_vdev_start_status enum above */
};
struct wmi_peer_kick_ev_arg {

View File

@ -374,6 +374,8 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto cleanup;
}
ath10k_mac_wait_tx_complete(ar);
ret = ath10k_wow_enable(ar);
if (ret) {
ath10k_warn(ar, "failed to start wow: %d\n", ret);

View File

@ -710,8 +710,8 @@ static bool check_device_tree(struct ath6kl *ar)
for_each_compatible_node(node, NULL, "atheros,ath6kl") {
board_id = of_get_property(node, board_id_prop, NULL);
if (board_id == NULL) {
ath6kl_warn("No \"%s\" property on %s node.\n",
board_id_prop, node->name);
ath6kl_warn("No \"%s\" property on %pOFn node.\n",
board_id_prop, node);
continue;
}
snprintf(board_filename, sizeof(board_filename),

View File

@ -1074,6 +1074,7 @@ struct ath_softc {
struct ath_spec_scan_priv spec_priv;
struct ieee80211_vif *tx99_vif;
struct sk_buff *tx99_skb;
bool tx99_state;
s16 tx99_power;

View File

@ -144,6 +144,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("BEACONS", rx_beacons);
RXS_ERR("FRAGS", rx_frags);
RXS_ERR("SPECTRAL", rx_spectral);
RXS_ERR("SPECTRAL SMPL GOOD", rx_spectral_sample_good);
RXS_ERR("SPECTRAL SMPL ERR", rx_spectral_sample_err);
RXS_ERR("CRC ERR", crc_err);
RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);

View File

@ -39,6 +39,8 @@
* @rx_beacons: No. of beacons received.
* @rx_frags: No. of rx-fragements received.
* @rx_spectral: No of spectral packets received.
* @rx_spectral_sample_good: No. of good spectral samples
* @rx_spectral_sample_err: No. of good spectral samples
*/
struct ath_rx_stats {
u32 rx_pkts_all;
@ -58,6 +60,8 @@ struct ath_rx_stats {
u32 rx_beacons;
u32 rx_frags;
u32 rx_spectral;
u32 rx_spectral_sample_good;
u32 rx_spectral_sample_err;
};
#ifdef CONFIG_ATH9K_COMMON_DEBUG

View File

@ -59,8 +59,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
max_index = spectral_max_index(mag_info->all_bins,
SPECTRAL_HT20_NUM_BINS);
max_index = spectral_max_index_ht20(mag_info->all_bins);
max_magnitude = spectral_max_magnitude(mag_info->all_bins);
max_exp = mag_info->max_exp & 0xf;
@ -72,7 +71,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
return -1;
if (sample[max_index] != (max_magnitude >> max_exp))
if ((sample[max_index] & 0xf8) != ((max_magnitude >> max_exp) & 0xf8))
return -1;
else
return 0;
@ -100,12 +99,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
lower_max_index = spectral_max_index(mag_info->lower_bins,
SPECTRAL_HT20_40_NUM_BINS);
lower_max_index = spectral_max_index_ht40(mag_info->lower_bins);
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
upper_max_index = spectral_max_index(mag_info->upper_bins,
SPECTRAL_HT20_40_NUM_BINS);
upper_max_index = spectral_max_index_ht40(mag_info->upper_bins);
max_exp = mag_info->max_exp & 0xf;
@ -117,19 +114,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
((upper_max_index < 1) || (lower_max_index < 1)))
return -1;
/* Some time hardware messes up the index and adds
* the index of the middle point (dc_pos). Try to fix it.
*/
if ((upper_max_index - dc_pos > 0) &&
(sample[upper_max_index] == (upper_mag >> max_exp)))
upper_max_index -= dc_pos;
if ((lower_max_index - dc_pos > 0) &&
(sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
lower_max_index -= dc_pos;
if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
(sample[lower_max_index] != (lower_mag >> max_exp)))
if (((sample[upper_max_index + dc_pos] & 0xf8) !=
((upper_mag >> max_exp) & 0xf8)) ||
((sample[lower_max_index] & 0xf8) !=
((lower_mag >> max_exp) & 0xf8)))
return -1;
else
return 0;
@ -169,8 +157,7 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
magnitude = spectral_max_magnitude(mag_info->all_bins);
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
max_index = spectral_max_index(mag_info->all_bins,
SPECTRAL_HT20_NUM_BINS);
max_index = spectral_max_index_ht20(mag_info->all_bins);
fft_sample_20.max_index = max_index;
bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
@ -188,7 +175,8 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
magnitude >> max_exp,
max_index);
if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
if ((fft_sample_20.data[max_index] & 0xf8) !=
((magnitude >> max_exp) & 0xf8)) {
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
ret = -1;
}
@ -302,12 +290,10 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
lower_max_index = spectral_max_index(mag_info->lower_bins,
SPECTRAL_HT20_40_NUM_BINS);
lower_max_index = spectral_max_index_ht40(mag_info->lower_bins);
fft_sample_40.lower_max_index = lower_max_index;
upper_max_index = spectral_max_index(mag_info->upper_bins,
SPECTRAL_HT20_40_NUM_BINS);
upper_max_index = spectral_max_index_ht40(mag_info->upper_bins);
fft_sample_40.upper_max_index = upper_max_index;
lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
@ -331,29 +317,13 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
upper_mag >> max_exp,
upper_max_index);
/* Some time hardware messes up the index and adds
* the index of the middle point (dc_pos). Try to fix it.
*/
if ((upper_max_index - dc_pos > 0) &&
(fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
upper_max_index -= dc_pos;
fft_sample_40.upper_max_index = upper_max_index;
}
if ((lower_max_index - dc_pos > 0) &&
(fft_sample_40.data[lower_max_index - dc_pos] ==
(lower_mag >> max_exp))) {
lower_max_index -= dc_pos;
fft_sample_40.lower_max_index = lower_max_index;
}
/* Check if we got the expected magnitude values at
* the expected bins
*/
if ((fft_sample_40.data[upper_max_index + dc_pos]
!= (upper_mag >> max_exp)) ||
(fft_sample_40.data[lower_max_index]
!= (lower_mag >> max_exp))) {
if (((fft_sample_40.data[upper_max_index + dc_pos] & 0xf8)
!= ((upper_mag >> max_exp) & 0xf8)) ||
((fft_sample_40.data[lower_max_index] & 0xf8)
!= ((lower_mag >> max_exp) & 0xf8))) {
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
ret = -1;
}
@ -411,7 +381,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
ath_dbg(common, SPECTRAL_SCAN,
"Calculated new upper max 0x%X at %i\n",
tmp_mag, i);
tmp_mag, fft_sample_40.upper_max_index);
} else
for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
if (fft_sample_40.data[i] == (upper_mag >> max_exp))
@ -501,6 +471,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
struct ath_hw *ah = spec_priv->ah;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
struct ath_softc *sc = (struct ath_softc *)common->priv;
u8 num_bins, *vdata = (u8 *)hdr;
struct ath_radar_info *radar_info;
int len = rs->rs_datalen;
@ -649,9 +620,14 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
sample_buf, sample_len,
sample_bytes);
fft_handler(rs, spec_priv, sample_buf,
ret = fft_handler(rs, spec_priv, sample_buf,
tsf, freq, chan_type);
if (ret == 0)
RX_STAT_INC(rx_spectral_sample_good);
else
RX_STAT_INC(rx_spectral_sample_err);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
/* Mix the received bins to the /dev/random
@ -665,6 +641,11 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
ret = fft_handler(rs, spec_priv, sample_start,
tsf, freq, chan_type);
if (ret == 0)
RX_STAT_INC(rx_spectral_sample_good);
else
RX_STAT_INC(rx_spectral_sample_err);
/* Mix the received bins to the /dev/random
* pool
*/
@ -675,7 +656,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
* loop.
*/
if (len <= fft_len + 2)
break;
return 1;
sample_start = &vdata[i + 1];

View File

@ -145,6 +145,23 @@ static inline u8 spectral_max_index(u8 *bins, int num_bins)
return m;
}
static inline u8 spectral_max_index_ht40(u8 *bins)
{
u8 idx;
idx = spectral_max_index(bins, SPECTRAL_HT20_40_NUM_BINS);
/* positive values and zero are starting at the beginning
* of the data field.
*/
return idx % (SPECTRAL_HT20_40_NUM_BINS / 2);
}
static inline u8 spectral_max_index_ht20(u8 *bins)
{
return spectral_max_index(bins, SPECTRAL_HT20_NUM_BINS);
}
/* return the bitmap weight from the all/upper/lower bins */
static inline u8 spectral_bitmap_weight(u8 *bins)
{

View File

@ -990,19 +990,6 @@ static int read_file_dump_nfcal(struct seq_file *file, void *data)
return 0;
}
static int open_file_dump_nfcal(struct inode *inode, struct file *f)
{
return single_open(f, read_file_dump_nfcal, inode->i_private);
}
static const struct file_operations fops_dump_nfcal = {
.read = seq_read,
.open = open_file_dump_nfcal,
.owner = THIS_MODULE,
.llseek = seq_lseek,
.release = single_release,
};
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)

View File

@ -286,9 +286,25 @@ static ssize_t read_airtime(struct file *file, char __user *user_buf,
return retval;
}
static ssize_t
write_airtime_reset_stub(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath_node *an = file->private_data;
struct ath_airtime_stats *astats;
int i;
astats = &an->airtime_stats;
astats->rx_airtime = 0;
astats->tx_airtime = 0;
for (i = 0; i < 4; i++)
an->airtime_deficit[i] = ATH_AIRTIME_QUANTUM;
return count;
}
static const struct file_operations fops_airtime = {
.read = read_airtime,
.write = write_airtime_reset_stub,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@ -304,5 +320,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
debugfs_create_file("node_aggr", 0444, dir, an, &fops_node_aggr);
debugfs_create_file("node_recv", 0444, dir, an, &fops_node_recv);
debugfs_create_file("airtime", 0444, dir, an, &fops_airtime);
debugfs_create_file("airtime", 0644, dir, an, &fops_airtime);
}

View File

@ -1251,8 +1251,13 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_node *an = &avp->mcast_node;
if (IS_ENABLED(CONFIG_ATH9K_TX99))
if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
return -EOPNOTSUPP;
}
sc->tx99_vif = vif;
}
mutex_lock(&sc->mutex);
@ -1337,6 +1342,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath9k_p2p_remove_vif(sc, vif);
sc->cur_chan->nvifs--;
sc->tx99_vif = NULL;
if (!ath9k_is_chanctx_enabled())
list_del(&avp->list);

View File

@ -54,6 +54,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct sk_buff *skb;
struct ath_vif *avp;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
@ -71,11 +72,17 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
if (sc->tx99_vif) {
avp = (struct ath_vif *) sc->tx99_vif->drv_priv;
hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
}
tx_info = IEEE80211_SKB_CB(skb);
memset(tx_info, 0, sizeof(*tx_info));
rate = &tx_info->control.rates[0];
tx_info->band = sc->cur_chan->chandef.chan->band;
tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
tx_info->control.vif = sc->tx99_vif;
rate->count = 1;
if (ah->curchan && IS_CHAN_HT(ah->curchan)) {
rate->flags |= IEEE80211_TX_RC_MCS;

View File

@ -2973,7 +2973,7 @@ int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
return -EINVAL;
}
ath_set_rates(NULL, NULL, bf);
ath_set_rates(sc->tx99_vif, NULL, bf);
ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

View File

@ -190,7 +190,7 @@ static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
{
int rc = 0;
unsigned long start, data_comp_to;
unsigned long data_comp_to;
wil_dbg_pm(wil, "suspend keep radio on\n");
@ -232,7 +232,6 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
}
/* Wait for completion of the pending RX packets */
start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil->txrx_ops.is_rx_idle(wil)) {

View File

@ -455,7 +455,7 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
*/
static inline bool wil_cid_valid(u8 cid)
{
return (cid >= 0 && cid < WIL6210_MAX_CID);
return cid < WIL6210_MAX_CID;
}
struct wil6210_mbox_ring {

View File

@ -1177,7 +1177,7 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
u8 vri = evt->ring_index;
struct wireless_dev *wdev = vif_to_wdev(vif);
struct wil_sta_info *sta;
int cid;
u8 cid;
struct key_params params;
wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);