Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.9. Major changes:

ath10k

* add nl80211 testmode support for 10.4 firmware
* hide kernel addresses from logs using %pK format specifier
* implement NAPI support
* enable peer stats by default

ath9k

* use ieee80211_tx_status_noskb where possible

wil6210

* extract firmware capabilities from the firmware file

ath6kl

* enable firmware crash dumps on the AR6004

ath-current is also merged to fix a conflict in ath10k.
This commit is contained in:
Kalle Valo 2016-09-14 19:34:50 +03:00
commit af1afc2957
47 changed files with 2125 additions and 549 deletions

View File

@ -462,13 +462,13 @@ static void ath10k_ahb_halt_chip(struct ath10k *ar)
static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
{
struct ath10k *ar = arg;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
ath10k_pci_disable_and_clear_legacy_irq(ar);
tasklet_schedule(&ar_pci->intr_tq);
ath10k_pci_irq_msi_fw_mask(ar);
napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
@ -577,7 +577,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
ar_ahb->mem, ar_ahb->mem_len,
ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
return 0;
@ -717,6 +717,9 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
synchronize_irq(ar_ahb->irq);
ath10k_pci_flush(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
}
static int ath10k_ahb_hif_power_up(struct ath10k *ar)
@ -748,6 +751,7 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce_deinit;
}
napi_enable(&ar->napi);
return 0;
@ -831,7 +835,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
goto err_resource_deinit;
}
ath10k_pci_init_irq_tasklets(ar);
ath10k_pci_init_napi(ar);
ret = ath10k_ahb_request_irq_legacy(ar);
if (ret)

View File

@ -221,7 +221,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@ -287,7 +287,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
"bmi fast download address 0x%x buffer 0x%p length %d\n",
"bmi fast download address 0x%x buffer 0x%pK length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);

View File

@ -840,7 +840,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot init ce src ring id %d entries %d base_addr %p\n",
"boot init ce src ring id %d entries %d base_addr %pK\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
@ -874,7 +874,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
"boot ce dest ring id %d entries %d base_addr %pK\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;

View File

@ -60,7 +60,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
@ -68,6 +67,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
},
{
.id = QCA9887_HW_1_0_VERSION,
@ -79,7 +79,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 2116,
.fw = {
.dir = QCA9887_HW_1_0_FW_DIR,
@ -87,6 +86,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -104,6 +104,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -114,7 +115,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
@ -122,6 +122,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
},
{
.id = QCA6174_HW_3_0_VERSION,
@ -132,7 +133,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
@ -140,6 +140,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
},
{
.id = QCA6174_HW_3_2_VERSION,
@ -150,7 +151,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
/* uses same binaries as hw3.0 */
@ -159,6 +159,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@ -171,7 +172,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0xf,
.rx_chain_mask = 0xf,
.max_spatial_stream = 4,
@ -182,6 +182,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@ -194,7 +196,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0xf,
.rx_chain_mask = 0xf,
.max_spatial_stream = 4,
@ -205,6 +206,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@ -216,7 +219,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.continuous_frag_desc = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 3,
.rx_chain_mask = 3,
.max_spatial_stream = 2,
@ -227,6 +229,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@ -244,6 +248,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@ -261,6 +266,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@ -274,7 +280,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 125000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0x3,
.rx_chain_mask = 0x3,
.max_spatial_stream = 2,
@ -285,6 +290,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA4019_BOARD_DATA_SZ,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
},
};
@ -304,6 +311,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
[ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@ -699,7 +707,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
if (!ar->running_fw->fw_file.otp_data ||
!ar->running_fw->fw_file.otp_len) {
ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
ar->running_fw->fw_file.otp_data,
ar->running_fw->fw_file.otp_len);
return 0;
@ -745,7 +753,7 @@ static int ath10k_download_fw(struct ath10k *ar)
data = ar->running_fw->fw_file.firmware_data;
data_len = ar->running_fw->fw_file.firmware_len;
ret = ath10k_swap_code_seg_configure(ar);
ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
if (ret) {
ath10k_err(ar, "failed to configure fw code swap: %d\n",
ret);
@ -753,7 +761,7 @@ static int ath10k_download_fw(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot uploading firmware image %p len %d\n",
"boot uploading firmware image %pK len %d\n",
data, data_len);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
@ -787,7 +795,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (!IS_ERR(ar->pre_cal_file))
release_firmware(ar->pre_cal_file);
ath10k_swap_code_seg_release(ar);
ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
ar->normal_mode_fw.fw_file.otp_data = NULL;
ar->normal_mode_fw.fw_file.otp_len = 0;
@ -1497,14 +1505,14 @@ static void ath10k_core_restart(struct work_struct *work)
ieee80211_stop_queues(ar->hw);
ath10k_drain_tx(ar);
complete_all(&ar->scan.started);
complete_all(&ar->scan.completed);
complete_all(&ar->scan.on_channel);
complete_all(&ar->offchan_tx_completed);
complete_all(&ar->install_key_done);
complete_all(&ar->vdev_setup_done);
complete_all(&ar->thermal.wmi_sync);
complete_all(&ar->bss_survey_done);
complete(&ar->scan.started);
complete(&ar->scan.completed);
complete(&ar->scan.on_channel);
complete(&ar->offchan_tx_completed);
complete(&ar->install_key_done);
complete(&ar->vdev_setup_done);
complete(&ar->thermal.wmi_sync);
complete(&ar->bss_survey_done);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
@ -1705,6 +1713,55 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return 0;
}
static int ath10k_core_reset_rx_filter(struct ath10k *ar)
{
int ret;
int vdev_id;
int vdev_type;
int vdev_subtype;
const u8 *vdev_addr;
vdev_id = 0;
vdev_type = WMI_VDEV_TYPE_STA;
vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
vdev_addr = ar->mac_addr;
ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
vdev_addr);
if (ret) {
ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
return ret;
}
ret = ath10k_wmi_vdev_delete(ar, vdev_id);
if (ret) {
ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
return ret;
}
/* WMI and HTT may use separate HIF pipes and are not guaranteed to be
* serialized properly implicitly.
*
* Moreover (most) WMI commands have no explicit acknowledges. It is
* possible to infer it implicitly by poking firmware with echo
* command - getting a reply means all preceding comments have been
* (mostly) processed.
*
* In case of vdev create/delete this is sufficient.
*
* Without this it's possible to end up with a race when HTT Rx ring is
* started before vdev create/delete hack is complete allowing a short
* window of opportunity to receive (and Tx ACK) a bunch of frames.
*/
ret = ath10k_wmi_barrier(ar);
if (ret) {
ath10k_err(ar, "failed to ping firmware: %d\n", ret);
return ret;
}
return 0;
}
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw)
{
@ -1872,6 +1929,25 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
/* Some firmware revisions do not properly set up hardware rx filter
* registers.
*
* A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
* is filled with 0s instead of 1s allowing HW to respond with ACKs to
* any frames that matches MAC_PCU_RX_FILTER which is also
* misconfigured to accept anything.
*
* The ADDR1 is programmed using internal firmware structure field and
* can't be (easily/sanely) reached from the driver explicitly. It is
* possible to implicitly make it correct by creating a dummy vdev and
* then deleting it.
*/
status = ath10k_core_reset_rx_filter(ar);
if (status) {
ath10k_err(ar, "failed to reset rx filter: %d\n", status);
goto err_hif_stop;
}
/* If firmware indicates Full Rx Reorder support it must be used in a
* slightly different manner. Let HTT code know.
*/
@ -2031,7 +2107,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
ret = ath10k_swap_code_seg_init(ar);
ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to initialize code swap segment: %d\n",
ret);
@ -2072,6 +2148,9 @@ static void ath10k_core_register_work(struct work_struct *work)
struct ath10k *ar = container_of(work, struct ath10k, register_work);
int status;
/* peer stats are enabled by default */
set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err(ar, "could not probe fw (%d)\n", status);
@ -2249,6 +2328,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
init_dummy_netdev(&ar->napi_dev);
ret = ath10k_debug_create(ar);
if (ret)
goto err_free_aux_wq;

View File

@ -65,6 +65,10 @@
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
/* NAPI poll budget */
#define ATH10K_NAPI_BUDGET 64
#define ATH10K_NAPI_QUOTA_LIMIT 60
struct ath10k;
enum ath10k_bus {
@ -142,6 +146,7 @@ struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
struct completion barrier;
wait_queue_head_t tx_credits_wq;
DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
struct wmi_cmd_map *cmd;
@ -440,7 +445,7 @@ struct ath10k_debug {
struct completion tpc_complete;
/* protected by conf_mutex */
u32 fw_dbglog_mask;
u64 fw_dbglog_mask;
u32 fw_dbglog_level;
u32 pktlog_filter;
u32 reg_addr;
@ -551,6 +556,13 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
/* Older firmware with HTT delivers incorrect tx status for null func
* frames to driver, but this fixed in 10.2 and 10.4 firmware versions.
* Also this workaround results in reporting of incorrect null func
* status for 10.4. This flag is used to skip the workaround.
*/
ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@ -663,6 +675,15 @@ struct ath10k_fw_file {
const void *codeswap_data;
size_t codeswap_len;
/* The original idea of struct ath10k_fw_file was that it only
* contains struct firmware and pointers to various parts (actual
* firmware binary, otp, metadata etc) of the file. This seg_info
* is actually created separate but as this is used similarly as
* the other firmware components it's more convenient to have it
* here.
*/
struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
};
struct ath10k_fw_components {
@ -715,53 +736,7 @@ struct ath10k {
struct ath10k_htc htc;
struct ath10k_htt htt;
struct ath10k_hw_params {
u32 id;
u16 dev_id;
const char *name;
u32 patch_load_addr;
int uart_pin;
u32 otp_exe_param;
/* Type of hw cycle counter wraparound logic, for more info
* refer enum ath10k_hw_cc_wraparound_type.
*/
enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
/* Some of chip expects fragment descriptor to be continuous
* memory for any TX operation. Set continuous_frag_desc flag
* for the hardware which have such requirement.
*/
bool continuous_frag_desc;
/* CCK hardware rate table mapping for the newer chipsets
* like QCA99X0, QCA4019 got revised. The CCK h/w rate values
* are in a proper order with respect to the rate/preamble
*/
bool cck_rate_map_rev2;
u32 channel_counters_freq_hz;
/* Mgmt tx descriptors threshold for limiting probe response
* frames.
*/
u32 max_probe_resp_desc_thres;
/* The padding bytes's location is different on various chips */
enum ath10k_hw_4addr_pad hw_4addr_pad;
u32 tx_chain_mask;
u32 rx_chain_mask;
u32 max_spatial_stream;
u32 cal_data_len;
struct ath10k_hw_params_fw {
const char *dir;
const char *board;
size_t board_size;
size_t board_ext_size;
} fw;
} hw_params;
struct ath10k_hw_params hw_params;
/* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
struct ath10k_fw_components normal_mode_fw;
@ -774,10 +749,6 @@ struct ath10k {
const struct firmware *pre_cal_file;
const struct firmware *cal_file;
struct {
struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
} swap;
struct {
u32 vendor;
u32 device;
@ -936,6 +907,10 @@ struct ath10k {
struct ath10k_thermal thermal;
struct ath10k_wow wow;
/* NAPI */
struct net_device napi_dev;
struct napi_struct napi;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};

View File

@ -1228,9 +1228,9 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
unsigned int len;
char buf[64];
char buf[96];
len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
@ -1242,15 +1242,16 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
int ret;
char buf[64];
unsigned int log_level, mask;
char buf[96];
unsigned int log_level;
u64 mask;
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
ret = sscanf(buf, "%x %u", &mask, &log_level);
ret = sscanf(buf, "%llx %u", &mask, &log_level);
if (!ret)
return -EINVAL;

View File

@ -44,7 +44,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
return skb;
}
@ -62,7 +62,7 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
{
struct ath10k *ar = ep->htc->ar;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
ath10k_htc_restore_tx_skb(ep->htc, skb);
@ -404,7 +404,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
goto out;
}
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);

View File

@ -1665,7 +1665,6 @@ struct ath10k_htt {
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */
struct tasklet_struct txrx_compl_task;
struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
struct sk_buff_head tx_fetch_ind_q;
@ -1798,5 +1797,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt,
struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
#endif

View File

@ -34,7 +34,6 @@
#define HTT_RX_RING_REFILL_RESCHED_MS 5
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
@ -226,7 +225,6 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->txrx_compl_task);
skb_queue_purge(&htt->rx_compl_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
@ -520,9 +518,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
skb_queue_head_init(&htt->tx_fetch_ind_q);
atomic_set(&htt->num_mpdus_ready, 0);
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@ -931,7 +926,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@ -958,7 +953,7 @@ static void ath10k_process_rx(struct ath10k *ar,
trace_ath10k_rx_hdr(ar, skb->data, skb->len);
trace_ath10k_rx_payload(ar, skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
@ -1056,9 +1051,11 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
int l3_pad_bytes;
/* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
@ -1072,19 +1069,12 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
*/
/* pull decapped header and copy SA & DA */
if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
/* The QCA99X0 4 address mode pad 2 bytes at the
* beginning of MSDU
*/
hdr = (struct ieee80211_hdr *)(msdu->data + 2);
/* The skb length need be extended 2 as the 2 bytes at the tail
* be excluded due to the padding
*/
skb_put(msdu, 2);
} else {
hdr = (struct ieee80211_hdr *)(msdu->data);
}
rxd = (void *)msdu->data - sizeof(*rxd);
l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
@ -1151,6 +1141,8 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
void *rfc1042;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
int l3_pad_bytes;
struct htt_rx_desc *rxd;
/* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@ -1161,6 +1153,11 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
if (WARN_ON_ONCE(!rfc1042))
return;
rxd = (void *)msdu->data - sizeof(*rxd);
l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, l3_pad_bytes);
/* pull decapped header and copy SA & DA */
eth = (struct ethhdr *)msdu->data;
ether_addr_copy(da, eth->h_dest);
@ -1191,6 +1188,8 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
struct htt_rx_desc *rxd;
/* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr
@ -1198,7 +1197,11 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
* [payload]
*/
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
rxd = (void *)msdu->data - sizeof(*rxd);
l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@ -1525,9 +1528,9 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
static struct ieee80211_rx_status rx_status;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
int ret;
int ret, num_msdus;
__skb_queue_head_init(&amsdu);
@ -1549,13 +1552,14 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return ret;
}
ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
num_msdus = skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
return 0;
return num_msdus;
}
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@ -1579,15 +1583,6 @@ static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
mpdu_count += mpdu_ranges[i].mpdu_count;
atomic_add(mpdu_count, &htt->num_mpdus_ready);
tasklet_schedule(&htt->txrx_compl_task);
}
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
{
atomic_inc(&htt->num_mpdus_ready);
tasklet_schedule(&htt->txrx_compl_task);
}
static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
@ -1772,14 +1767,15 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
RX_FLAG_MMIC_STRIPPED;
}
static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
struct sk_buff_head *list)
static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
struct sk_buff_head *list)
{
struct ath10k_htt *htt = &ar->htt;
struct ieee80211_rx_status *status = &htt->rx_status;
struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu;
size_t offset;
int num_msdu = 0;
while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have
@ -1819,10 +1815,12 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu);
num_msdu++;
}
return num_msdu;
}
static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (void *)skb->data;
@ -1835,12 +1833,12 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
u8 tid;
bool offload;
bool frag;
int ret;
int ret, num_msdus = 0;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused)
return;
return -EIO;
skb_pull(skb, sizeof(resp->hdr));
skb_pull(skb, sizeof(resp->rx_in_ord_ind));
@ -1859,7 +1857,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
return;
return -EINVAL;
}
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
@ -1870,14 +1868,14 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
return;
return -EIO;
}
/* Offloaded frames are very different and need to be handled
* separately.
*/
if (offload)
ath10k_htt_rx_h_rx_offload(ar, &list);
num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
@ -1890,6 +1888,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
@ -1902,9 +1901,10 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
htt->rx_confused = true;
__skb_queue_purge(&list);
return;
return -EIO;
}
}
return num_msdus;
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@ -2267,7 +2267,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
tasklet_schedule(&htt->txrx_compl_task);
break;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
@ -2284,7 +2283,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
ath10k_htt_rx_frag_handler(htt);
atomic_inc(&htt->num_mpdus_ready);
break;
}
case HTT_T2H_MSG_TYPE_TEST:
@ -2320,8 +2319,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
tasklet_schedule(&htt->txrx_compl_task);
__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@ -2347,7 +2345,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
tasklet_schedule(&htt->txrx_compl_task);
break;
}
case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
@ -2376,27 +2373,77 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
}
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
static void ath10k_htt_txrx_compl_task(unsigned long ptr)
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
struct ath10k *ar = htt->ar;
struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {};
struct sk_buff_head rx_ind_q;
struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
unsigned long flags;
int num_mpdus;
int quota = 0, done, num_rx_msdus;
bool resched_napi = false;
__skb_queue_head_init(&rx_ind_q);
__skb_queue_head_init(&tx_ind_q);
spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
/* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
* process it first to utilize full available quota.
*/
while (quota < budget) {
if (skb_queue_empty(&htt->rx_in_ord_compl_q))
break;
spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
if (!skb) {
resched_napi = true;
goto exit;
}
spin_lock_bh(&htt->rx_ring.lock);
num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
if (num_rx_msdus < 0) {
resched_napi = true;
goto exit;
}
dev_kfree_skb_any(skb);
if (num_rx_msdus > 0)
quota += num_rx_msdus;
if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
!skb_queue_empty(&htt->rx_in_ord_compl_q)) {
resched_napi = true;
goto exit;
}
}
while (quota < budget) {
/* no more data to receive */
if (!atomic_read(&htt->num_mpdus_ready))
break;
num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
if (num_rx_msdus < 0) {
resched_napi = true;
goto exit;
}
quota += num_rx_msdus;
atomic_dec(&htt->num_mpdus_ready);
if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
atomic_read(&htt->num_mpdus_ready)) {
resched_napi = true;
goto exit;
}
}
/* From NAPI documentation:
* The napi poll() function may also process TX completions, in which
* case if it processes the entire TX ring then it should count that
* work as the rest of the budget.
*/
if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
quota = budget;
/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
* From kfifo_get() documentation:
@ -2406,27 +2453,24 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
while (kfifo_get(&htt->txdone_fifo, &tx_done))
ath10k_txrx_tx_unref(htt, &tx_done);
ath10k_mac_tx_push_pending(ar);
spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
while ((skb = __skb_dequeue(&tx_ind_q))) {
ath10k_htt_rx_tx_fetch_ind(ar, skb);
dev_kfree_skb_any(skb);
}
num_mpdus = atomic_read(&htt->num_mpdus_ready);
while (num_mpdus) {
if (ath10k_htt_rx_handle_amsdu(htt))
break;
num_mpdus--;
atomic_dec(&htt->num_mpdus_ready);
}
while ((skb = __skb_dequeue(&rx_ind_q))) {
spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
dev_kfree_skb_any(skb);
}
exit:
ath10k_htt_rx_msdu_buff_replenish(htt);
/* In case of rx failure or more data to read, report budget
* to reschedule NAPI poll
*/
done = resched_napi ? budget : quota;
return done;
}
EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);

View File

@ -390,8 +390,6 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
int size;
tasklet_kill(&htt->txrx_compl_task);
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);

View File

@ -219,3 +219,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
survey->time = CCNT_TO_MSEC(ar, cc);
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
}
const struct ath10k_hw_ops qca988x_ops = {
};
static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
{
return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
RX_MSDU_END_INFO1_L3_HDR_PAD);
}
const struct ath10k_hw_ops qca99x0_ops = {
.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
};

View File

@ -338,11 +338,6 @@ enum ath10k_hw_rate_rev2_cck {
ATH10K_HW_RATE_REV2_CCK_SP_11M,
};
enum ath10k_hw_4addr_pad {
ATH10K_HW_4ADDR_PAD_AFTER,
ATH10K_HW_4ADDR_PAD_BEFORE,
};
enum ath10k_hw_cc_wraparound_type {
ATH10K_HW_CC_WRAP_DISABLED = 0,
@ -363,6 +358,77 @@ enum ath10k_hw_cc_wraparound_type {
ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
};
struct ath10k_hw_params {
u32 id;
u16 dev_id;
const char *name;
u32 patch_load_addr;
int uart_pin;
u32 otp_exe_param;
/* Type of hw cycle counter wraparound logic, for more info
* refer enum ath10k_hw_cc_wraparound_type.
*/
enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
/* Some of chip expects fragment descriptor to be continuous
* memory for any TX operation. Set continuous_frag_desc flag
* for the hardware which have such requirement.
*/
bool continuous_frag_desc;
/* CCK hardware rate table mapping for the newer chipsets
* like QCA99X0, QCA4019 got revised. The CCK h/w rate values
* are in a proper order with respect to the rate/preamble
*/
bool cck_rate_map_rev2;
u32 channel_counters_freq_hz;
/* Mgmt tx descriptors threshold for limiting probe response
* frames.
*/
u32 max_probe_resp_desc_thres;
u32 tx_chain_mask;
u32 rx_chain_mask;
u32 max_spatial_stream;
u32 cal_data_len;
struct ath10k_hw_params_fw {
const char *dir;
const char *board;
size_t board_size;
size_t board_ext_size;
} fw;
/* qca99x0 family chips deliver broadcast/multicast management
* frames encrypted and expect software do decryption.
*/
bool sw_decrypt_mcast_mgmt;
const struct ath10k_hw_ops *hw_ops;
};
struct htt_rx_desc;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
};
extern const struct ath10k_hw_ops qca988x_ops;
extern const struct ath10k_hw_ops qca99x0_ops;
static inline int
ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
struct htt_rx_desc *rxd)
{
if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
return 0;
}
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2

View File

@ -824,7 +824,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
*/
for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
if (ar->peer_map[i] == peer) {
ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
peer->addr, peer, i);
ar->peer_map[i] = NULL;
}
@ -3255,6 +3255,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
if (ar->htt.target_version_major < 3 &&
(ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
ar->running_fw->fw_file.fw_features) &&
!test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
ar->running_fw->fw_file.fw_features))
return ATH10K_HW_TXRX_MGMT;
@ -3524,7 +3526,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
skb);
skb_queue_tail(&ar->offchan_tx_queue, skb);
@ -3586,7 +3588,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
@ -3643,7 +3645,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
skb);
if (!peer && tmp_peer_created) {
@ -3777,7 +3779,9 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
enum ath10k_hw_txrx_mode txmode;
enum ath10k_mac_tx_path txpath;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
size_t skb_len;
bool is_mgmt, is_presp;
int ret;
spin_lock_bh(&ar->htt.tx_lock);
@ -3801,6 +3805,22 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
skb_len = skb->len;
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
if (is_mgmt) {
hdr = (struct ieee80211_hdr *)skb->data;
is_presp = ieee80211_is_probe_resp(hdr->frame_control);
spin_lock_bh(&ar->htt.tx_lock);
ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
if (ret) {
ath10k_htt_tx_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock);
return ret;
}
spin_unlock_bh(&ar->htt.tx_lock);
}
ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
if (unlikely(ret)) {
@ -3808,6 +3828,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt);
if (is_mgmt)
ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock);
return ret;
@ -3894,7 +3916,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
ar->scan.roc_freq = 0;
ath10k_offchan_tx_purge(ar);
cancel_delayed_work(&ar->scan.timeout);
complete_all(&ar->scan.completed);
complete(&ar->scan.completed);
break;
}
}
@ -4100,13 +4122,29 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_txq *artxq = (void *)txq->drv_priv;
struct ieee80211_txq *f_txq;
struct ath10k_txq *f_artxq;
int ret = 0;
int max = 16;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))
list_add_tail(&artxq->list, &ar->txqs);
f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
list_del_init(&f_artxq->list);
while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
ret = ath10k_mac_tx_push_txq(hw, f_txq);
if (ret)
break;
}
if (ret != -ENOENT)
list_add_tail(&f_artxq->list, &ar->txqs);
spin_unlock_bh(&ar->txqs_lock);
ath10k_mac_tx_push_pending(ar);
ath10k_htt_tx_txq_update(hw, f_txq);
ath10k_htt_tx_txq_update(hw, txq);
}
@ -5186,7 +5224,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
ret = ath10k_monitor_recalc(ar);
if (ret)
ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
mutex_unlock(&ar->conf_mutex);
}
@ -5984,8 +6022,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
* Existing station deletion.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
@ -6001,7 +6039,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
continue;
if (peer->sta == sta) {
ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
sta->addr, peer, i, arvif->vdev_id);
peer->sta = NULL;
@ -6538,7 +6576,7 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
goto exit;
}
ath10k_mac_update_bss_chan_survey(ar, survey->channel);
ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
@ -7134,7 +7172,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx add freq %hu width %d ptr %p\n",
"mac chanctx add freq %hu width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@ -7158,7 +7196,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx remove freq %hu width %d ptr %p\n",
"mac chanctx remove freq %hu width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@ -7223,7 +7261,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx change freq %hu width %d ptr %p changed %x\n",
"mac chanctx change freq %hu width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@ -7281,7 +7319,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx assign ptr %p vdev_id %i\n",
"mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
if (WARN_ON(arvif->is_started)) {
@ -7342,7 +7380,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx unassign ptr %p vdev_id %i\n",
"mac chanctx unassign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);

View File

@ -1506,12 +1506,10 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
ath10k_ce_per_engine_service(ar, pipe);
}
void ath10k_pci_kill_tasklet(struct ath10k *ar)
static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
tasklet_kill(&ar_pci->intr_tq);
del_timer_sync(&ar_pci->rx_post_retry);
}
@ -1570,7 +1568,7 @@ void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
ul_pipe, dl_pipe);
}
static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
u32 val;
@ -1693,14 +1691,12 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
struct sk_buff *skb;
int i;
ar = pci_pipe->hif_ce_state;
ar_pci = ath10k_pci_priv(ar);
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->src_ring;
@ -1753,7 +1749,7 @@ void ath10k_pci_ce_deinit(struct ath10k *ar)
void ath10k_pci_flush(struct ath10k *ar)
{
ath10k_pci_kill_tasklet(ar);
ath10k_pci_rx_retry_sync(ar);
ath10k_pci_buffer_cleanup(ar);
}
@ -1780,6 +1776,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
@ -2533,6 +2531,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce;
}
napi_enable(&ar->napi);
return 0;
@ -2725,7 +2724,7 @@ static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
return 0;
err_free:
kfree(data);
kfree(caldata);
return -EINVAL;
}
@ -2772,35 +2771,53 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_NONE;
}
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
ath10k_pci_disable_and_clear_legacy_irq(ar);
}
tasklet_schedule(&ar_pci->intr_tq);
ath10k_pci_disable_and_clear_legacy_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
static void ath10k_pci_tasklet(unsigned long data)
static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
{
struct ath10k *ar = (struct ath10k *)data;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
int done = 0;
if (ath10k_pci_has_fw_crashed(ar)) {
ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
return;
napi_complete(ctx);
return done;
}
ath10k_ce_per_engine_service_any(ar);
/* Re-enable legacy irq that was disabled in the irq handler */
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
done = ath10k_htt_txrx_compl_task(ar, budget);
if (done < budget) {
napi_complete(ctx);
/* In case of MSI, it is possible that interrupts are received
* while NAPI poll is inprogress. So pending interrupts that are
* received after processing all copy engine pipes by NAPI poll
* will not be handled again. This is causing failure to
* complete boot sequence in x86 platform. So before enabling
* interrupts safer to check for pending interrupts for
* immediate servicing.
*/
if (CE_INTERRUPT_SUMMARY(ar)) {
napi_reschedule(ctx);
goto out;
}
ath10k_pci_enable_legacy_irq(ar);
ath10k_pci_irq_msi_fw_unmask(ar);
}
out:
return done;
}
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
@ -2858,11 +2875,10 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
free_irq(ar_pci->pdev->irq, ar);
}
void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
void ath10k_pci_init_napi(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
ATH10K_NAPI_BUDGET);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@ -2870,7 +2886,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ath10k_pci_init_irq_tasklets(ar);
ath10k_pci_init_napi(ar);
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
ath10k_info(ar, "limiting irq mode to: %d\n",
@ -3062,7 +3078,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
goto err_master;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
return 0;
err_master:
@ -3131,7 +3147,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
void ath10k_pci_release_resource(struct ath10k *ar)
{
ath10k_pci_kill_tasklet(ar);
ath10k_pci_rx_retry_sync(ar);
netif_napi_del(&ar->napi);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
}
@ -3162,7 +3179,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
break;
case QCA9887_1_0_DEVICE_ID:
dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
hw_rev = ATH10K_HW_QCA9887;
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
@ -3298,7 +3314,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
err_free_irq:
ath10k_pci_free_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_rx_retry_sync(ar);
err_deinit_irq:
ath10k_pci_deinit_irq(ar);

View File

@ -177,8 +177,6 @@ struct ath10k_pci {
/* Operating interrupt mode */
enum ath10k_pci_irq_mode oper_irq_mode;
struct tasklet_struct intr_tq;
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
/* Copy Engine used for Diagnostic Accesses */
@ -294,8 +292,7 @@ void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_rx_replenish_retry(unsigned long ptr);
void ath10k_pci_ce_deinit(struct ath10k *ar);
void ath10k_pci_init_irq_tasklets(struct ath10k *ar);
void ath10k_pci_kill_tasklet(struct ath10k *ar);
void ath10k_pci_init_napi(struct ath10k *ar);
int ath10k_pci_init_pipes(struct ath10k *ar);
int ath10k_pci_init_config(struct ath10k *ar);
void ath10k_pci_rx_post(struct ath10k *ar);
@ -303,6 +300,7 @@ void ath10k_pci_flush(struct ath10k *ar);
void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
bool ath10k_pci_irq_pending(struct ath10k *ar);
void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
int ath10k_pci_wait_for_target_init(struct ath10k *ar);
int ath10k_pci_setup_resource(struct ath10k *ar);
void ath10k_pci_release_resource(struct ath10k *ar);

View File

@ -134,17 +134,18 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
return seg_info;
}
int ath10k_swap_code_seg_configure(struct ath10k *ar)
int ath10k_swap_code_seg_configure(struct ath10k *ar,
const struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info = NULL;
if (!ar->swap.firmware_swap_code_seg_info)
if (!fw_file->firmware_swap_code_seg_info)
return 0;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
seg_info = ar->swap.firmware_swap_code_seg_info;
seg_info = fw_file->firmware_swap_code_seg_info;
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
&seg_info->seg_hw_info,
@ -158,28 +159,29 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar)
return 0;
}
void ath10k_swap_code_seg_release(struct ath10k *ar)
void ath10k_swap_code_seg_release(struct ath10k *ar,
struct ath10k_fw_file *fw_file)
{
ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
/* FIXME: these two assignments look to bein wrong place! Shouldn't
* they be in ath10k_core_free_firmware_files() like the rest?
*/
ar->normal_mode_fw.fw_file.codeswap_data = NULL;
ar->normal_mode_fw.fw_file.codeswap_len = 0;
fw_file->codeswap_data = NULL;
fw_file->codeswap_len = 0;
ar->swap.firmware_swap_code_seg_info = NULL;
fw_file->firmware_swap_code_seg_info = NULL;
}
int ath10k_swap_code_seg_init(struct ath10k *ar)
int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info;
const void *codeswap_data;
size_t codeswap_len;
codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
codeswap_data = fw_file->codeswap_data;
codeswap_len = fw_file->codeswap_len;
if (!codeswap_len || !codeswap_data)
return 0;
@ -200,7 +202,7 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
return ret;
}
ar->swap.firmware_swap_code_seg_info = seg_info;
fw_file->firmware_swap_code_seg_info = seg_info;
return 0;
}

View File

@ -23,6 +23,8 @@
/* Currently only one swap segment is supported */
#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
struct ath10k_fw_file;
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
@ -58,8 +60,11 @@ struct ath10k_swap_code_seg_info {
dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
};
int ath10k_swap_code_seg_configure(struct ath10k *ar);
void ath10k_swap_code_seg_release(struct ath10k *ar);
int ath10k_swap_code_seg_init(struct ath10k *ar);
int ath10k_swap_code_seg_configure(struct ath10k *ar,
const struct ath10k_fw_file *fw_file);
void ath10k_swap_code_seg_release(struct ath10k *ar,
struct ath10k_fw_file *fw_file);
int ath10k_swap_code_seg_init(struct ath10k *ar,
struct ath10k_fw_file *fw_file);
#endif

View File

@ -23,6 +23,7 @@
#include "wmi.h"
#include "hif.h"
#include "hw.h"
#include "core.h"
#include "testmode_i.h"
@ -45,7 +46,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode event wmi cmd_id %d skb %p skb->len %d\n",
"testmode event wmi cmd_id %d skb %pK skb->len %d\n",
cmd_id, skb, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
@ -240,6 +241,18 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err;
}
if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
ret = ath10k_swap_code_seg_init(ar,
&ar->testmode.utf_mode_fw.fw_file);
if (ret) {
ath10k_warn(ar,
"failed to init utf code swap segment: %d\n",
ret);
goto err_release_utf_mode_fw;
}
}
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
@ -279,6 +292,11 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
ath10k_hif_power_down(ar);
err_release_utf_mode_fw:
if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
ar->testmode.utf_mode_fw.fw_file.codeswap_len)
ath10k_swap_code_seg_release(ar,
&ar->testmode.utf_mode_fw.fw_file);
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
@ -301,6 +319,11 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock);
if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
ar->testmode.utf_mode_fw.fw_file.codeswap_len)
ath10k_swap_code_seg_release(ar,
&ar->testmode.utf_mode_fw.fw_file);
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
@ -360,7 +383,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
"testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);

View File

@ -192,7 +192,7 @@ int ath10k_thermal_register(struct ath10k *ar)
/* Avoid linking error on devm_hwmon_device_register_with_groups, I
* guess linux/hwmon.h is missing proper stubs. */
if (!config_enabled(CONFIG_HWMON))
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,

View File

@ -44,7 +44,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
@ -119,8 +119,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
ath10k_mac_tx_push_pending(ar);
return 0;
}

View File

@ -51,6 +51,8 @@ struct wmi_ops {
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_echo_ev_arg *arg);
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
@ -123,7 +125,7 @@ struct wmi_ops {
enum wmi_force_fw_hang_type type,
u32 delay_ms);
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
@ -194,6 +196,7 @@ struct wmi_ops {
struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@ -349,6 +352,16 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_echo_ev_arg *arg)
{
if (!ar->wmi.ops->pull_echo_ev)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
}
static inline enum wmi_txbf_conf
ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
{
@ -932,7 +945,7 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
}
static inline int
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
{
struct sk_buff *skb;
@ -1382,4 +1395,20 @@ ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
wmi->cmd->pdev_bss_chan_info_request_cmdid);
}
static inline int
ath10k_wmi_echo(struct ath10k *ar, u32 value)
{
struct ath10k_wmi *wmi = &ar->wmi;
struct sk_buff *skb;
if (!wmi->ops->gen_echo)
return -EOPNOTSUPP;
skb = wmi->ops->gen_echo(ar, value);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
}
#endif

View File

@ -1223,6 +1223,33 @@ ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_echo_ev_arg *arg)
{
const void **tb;
const struct wmi_echo_event *ev;
int ret;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
if (!ev) {
kfree(tb);
return -EPROTO;
}
arg->value = ev->value;
kfree(tb);
return 0;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
{
@ -2441,7 +2468,7 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level) {
struct wmi_tlv_dbglog_cmd *cmd;
struct wmi_tlv *tlv;
@ -3081,6 +3108,34 @@ ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
{
struct wmi_echo_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->value = cpu_to_le32(value);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
return skb;
}
/****************/
/* TLV mappings */
/****************/
@ -3429,6 +3484,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
.pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
@ -3485,6 +3541,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_tlv_op_gen_echo,
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {

View File

@ -29,6 +29,9 @@
#include "p2p.h"
#include "hw.h"
#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
.init_cmdid = WMI_INIT_CMDID,
@ -1874,7 +1877,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
memcpy(cmd->buf, msdu->data, msdu->len);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@ -2240,6 +2243,29 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
return 0;
}
static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
struct ieee80211_hdr *hdr)
{
if (!ieee80211_has_protected(hdr->frame_control))
return false;
/* FW delivers WEP Shared Auth frame with Protected Bit set and
* encrypted payload. However in case of PMF it delivers decrypted
* frames with Protected Bit set.
*/
if (ieee80211_is_auth(hdr->frame_control))
return false;
/* qca99x0 based FW delivers broadcast or multicast management frames
* (ex: group privacy action frames in mesh) as encrypted payload.
*/
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
ar->hw_params.sw_decrypt_mcast_mgmt)
return false;
return true;
}
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@ -2326,11 +2352,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_handle_wep_reauth(ar, skb, status);
/* FW delivers WEP Shared Auth frame with Protected Bit set and
* encrypted payload. However in case of PMF it delivers decrypted
* frames with Protected Bit set. */
if (ieee80211_has_protected(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control)) {
if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
status->flag |= RX_FLAG_DECRYPTED;
if (!ieee80211_is_action(hdr->frame_control) &&
@ -2347,7 +2369,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
@ -2495,7 +2517,21 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
struct wmi_echo_ev_arg arg = {};
int ret;
ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse echo: %d\n", ret);
return;
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event echo value 0x%08x\n",
le32_to_cpu(arg.value));
if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
complete(&ar->wmi.barrier);
}
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
@ -3527,7 +3563,6 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to map beacon: %d\n",
ret);
dev_kfree_skb_any(bcn);
ret = -EIO;
goto skip;
}
@ -4792,6 +4827,17 @@ static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_echo_ev_arg *arg)
{
struct wmi_echo_event *ev = (void *)skb->data;
arg->value = ev->value;
return 0;
}
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_rdy_ev_arg arg = {};
@ -5124,6 +5170,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_2_event_id id;
bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@ -5133,6 +5180,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
consumed = ath10k_tm_event_wmi(ar, id, skb);
/* Ready event must be handled normally also in UTF mode so that we
* know the UTF firmware has booted, others we are just bypass WMI
* events to testmode.
*/
if (consumed && id != WMI_10_2_READY_EVENTID) {
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi testmode consumed 0x%x\n", id);
goto out;
}
switch (id) {
case WMI_10_2_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
@ -5248,6 +5307,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_4_event_id id;
bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@ -5257,6 +5317,18 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
consumed = ath10k_tm_event_wmi(ar, id, skb);
/* Ready event must be handled normally also in UTF mode so that we
* know the UTF firmware has booted, others we are just bypass WMI
* events to testmode.
*/
if (consumed && id != WMI_10_4_READY_EVENTID) {
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi testmode consumed 0x%x\n", id);
goto out;
}
switch (id) {
case WMI_10_4_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
@ -5306,6 +5378,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
case WMI_10_4_WDS_PEER_EVENTID:
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
@ -6863,7 +6936,7 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
}
static struct sk_buff *
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level)
{
struct wmi_dbglog_cfg_cmd *cmd;
@ -6900,6 +6973,44 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
return skb;
}
static struct sk_buff *
ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level)
{
struct wmi_10_4_dbglog_cfg_cmd *cmd;
struct sk_buff *skb;
u32 cfg;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
if (module_enable) {
cfg = SM(log_level,
ATH10K_DBGLOG_CFG_LOG_LVL);
} else {
/* set back defaults, all modules with WARN level */
cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
ATH10K_DBGLOG_CFG_LOG_LVL);
module_enable = ~0;
}
cmd->module_enable = __cpu_to_le64(module_enable);
cmd->module_valid = __cpu_to_le64(~0);
cmd->config_enable = __cpu_to_le32(cfg);
cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
__le64_to_cpu(cmd->module_enable),
__le64_to_cpu(cmd->module_valid),
__le32_to_cpu(cmd->config_enable),
__le32_to_cpu(cmd->config_valid));
return skb;
}
static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
{
@ -7649,6 +7760,48 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
return skb;
}
static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
{
struct wmi_echo_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
cmd = (struct wmi_echo_cmd *)skb->data;
cmd->value = cpu_to_le32(value);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi echo value 0x%08x\n", value);
return skb;
}
int
ath10k_wmi_barrier(struct ath10k *ar)
{
int ret;
int time_left;
spin_lock_bh(&ar->data_lock);
reinit_completion(&ar->wmi.barrier);
spin_unlock_bh(&ar->data_lock);
ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
if (ret) {
ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
return ret;
}
time_left = wait_for_completion_timeout(&ar->wmi.barrier,
ATH10K_WMI_BARRIER_TIMEOUT_HZ);
if (!time_left)
return -ETIMEDOUT;
return 0;
}
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@ -7665,6 +7818,7 @@ static const struct wmi_ops wmi_ops = {
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@ -7709,6 +7863,7 @@ static const struct wmi_ops wmi_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_op_gen_echo,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@ -7738,6 +7893,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@ -7777,6 +7933,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_op_gen_echo,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@ -7796,6 +7953,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
.gen_echo = ath10k_wmi_op_gen_echo,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@ -7807,6 +7965,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@ -7862,6 +8021,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
.gen_echo = ath10k_wmi_op_gen_echo,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@ -7873,6 +8033,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@ -7968,7 +8129,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
.gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
@ -7980,10 +8141,12 @@ static const struct wmi_ops wmi_10_4_ops = {
.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
/* shared with 10.2 */
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
.gen_echo = ath10k_wmi_op_gen_echo,
};
int ath10k_wmi_attach(struct ath10k *ar)
@ -8036,6 +8199,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
init_completion(&ar->wmi.barrier);
INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);

View File

@ -180,6 +180,7 @@ enum wmi_service {
WMI_SERVICE_MESH_NON_11S,
WMI_SERVICE_PEER_STATS,
WMI_SERVICE_RESTRT_CHNL_SUPPORT,
WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_PULL,
WMI_SERVICE_TX_MODE_DYNAMIC,
@ -305,6 +306,7 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
WMI_10_4_SERVICE_PEER_STATS,
WMI_10_4_SERVICE_MESH_11S,
WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
@ -402,6 +404,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_MESH_NON_11S);
SVCSTR(WMI_SERVICE_PEER_STATS);
SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
@ -652,6 +655,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_PEER_STATS, len);
SVCMAP(WMI_10_4_SERVICE_MESH_11S,
WMI_SERVICE_MESH_11S, len);
SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
@ -6169,6 +6174,20 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
struct wmi_10_4_dbglog_cfg_cmd {
/* bitmask to hold mod id config*/
__le64 module_enable;
/* see ATH10K_DBGLOG_CFG_ */
__le32 config_enable;
/* mask of module id bits to be changed */
__le64 module_valid;
/* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
__le32 config_valid;
} __packed;
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
@ -6296,6 +6315,10 @@ struct wmi_roam_ev_arg {
__le32 rssi;
};
struct wmi_echo_ev_arg {
__le32 value;
};
struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */
__le32 temperature;
@ -6624,5 +6647,6 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
char *buf);
int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
int ath10k_wmi_barrier(struct ath10k *ar);
#endif /* _WMI_H_ */

View File

@ -1449,14 +1449,14 @@ static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
return -EIO;
if (test_bit(CONNECTED, &vif->flags)) {
ar->tx_pwr = 0;
ar->tx_pwr = 255;
if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
return -EIO;
}
wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 255,
5 * HZ);
if (signal_pending(current)) {

View File

@ -64,7 +64,7 @@ int ath6kl_hif_rw_comp_handler(void *context, int status)
}
EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
#define REG_DUMP_COUNT_AR6003 60
#define REGISTER_DUMP_COUNT 60
#define REGISTER_DUMP_LEN_MAX 60
static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
@ -73,9 +73,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
u32 i, address, regdump_addr = 0;
int ret;
if (ar->target_type != TARGET_TYPE_AR6003)
return;
/* the reg dump pointer is copied to the host interest area */
address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
address = TARG_VTOP(ar->target_type, address);
@ -95,7 +92,7 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
/* fetch register dump data */
ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
REGISTER_DUMP_COUNT * (sizeof(u32)));
if (ret) {
ath6kl_warn("failed to get register dump: %d\n", ret);
return;
@ -105,9 +102,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
ar->wiphy->fw_version);
BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
BUILD_BUG_ON(REGISTER_DUMP_COUNT % 4);
for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
for (i = 0; i < REGISTER_DUMP_COUNT; i += 4) {
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
i,
le32_to_cpu(regdump_val[i]),

View File

@ -260,8 +260,8 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
int cur_bin;
int upper, lower, cur_vit_mask;
int i;
int8_t mask_m[123];
int8_t mask_p[123];
int8_t mask_m[123] = {0};
int8_t mask_p[123] = {0};
int8_t mask_amt;
int tmp_mask;
static const int pilot_mask_reg[4] = {
@ -274,9 +274,6 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
};
static const int inc[4] = { 0, 100, 0, 0 };
memset(&mask_m, 0, sizeof(int8_t) * 123);
memset(&mask_p, 0, sizeof(int8_t) * 123);
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
@ -302,7 +299,7 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
upper = bin + 120;
lower = bin - 120;
for (i = 0; i < 123; i++) {
for (i = 0; i < ARRAY_SIZE(mask_m); i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);

View File

@ -3252,7 +3252,8 @@ static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
int i;
for (i = 0; i < mdata_size / 2; i++, data++)
ath9k_hw_nvram_read(ah, i, data);
if (!ath9k_hw_nvram_read(ah, i, data))
return -EIO;
return 0;
}
@ -3282,7 +3283,8 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
if (ath9k_hw_use_flash(ah)) {
u8 txrx;
ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
if (ar9300_eeprom_restore_flash(ah, mptr, mdata_size))
return -EIO;
/* check if eeprom contains valid data */
eep = (struct ar9300_eeprom *) mptr;

View File

@ -22,7 +22,7 @@
#ifdef CONFIG_MAC80211_LEDS
void ath_fill_led_pin(struct ath_softc *sc)
static void ath_fill_led_pin(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;

View File

@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
return -EINVAL;
}
ath9k_gpio_cap_init(ah);
if (AR_SREV_9485(ah) ||
AR_SREV_9285(ah) ||
AR_SREV_9330(ah) ||
@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
ath9k_gpio_cap_init(ah);
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
else

View File

@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (!ath_complete_reset(sc, false))
ah->reset_power_on = false;
if (ah->led_pin >= 0)
if (ah->led_pin >= 0) {
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 1 : 0);
ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
}
/*
* Reset key cache to sane defaults (all entries cleared) instead of
@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
if (ah->led_pin >= 0)
if (ah->led_pin >= 0) {
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 0 : 1);
ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
}
ath_prepare_reset(sc);
@ -919,7 +924,7 @@ static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
} else {
if (iter_data->primary_beacon_vif->type != NL80211_IFTYPE_AP &&
vif->type == NL80211_IFTYPE_AP)
iter_data->primary_beacon_vif = vif;
iter_data->primary_beacon_vif = vif;
}
iter_data->beacons = true;
@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
bool changed = (iter_data.primary_sta != ctx->primary_sta);
if (iter_data.primary_sta) {
iter_data.primary_beacon_vif = iter_data.primary_sta;
iter_data.beacons = true;
ath9k_set_assoc_state(sc, iter_data.primary_sta,
changed);
@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
ret = ath9k_sta_add(hw, vif, sta);
ath_dbg(common, CONFIG,
"Add station: %pM\n", sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST) {
ret = ath9k_sta_remove(hw, vif, sta);
ath_dbg(common, CONFIG,
"Remove station: %pM\n", sta->addr);

View File

@ -50,9 +50,11 @@ static u16 bits_per_symbol[][2] = {
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq);
int tx_flags, struct ath_txq *txq,
struct ieee80211_sta *sta);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
struct ieee80211_sta *sta,
struct ath_tx_status *ts, int txok);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal);
@ -77,6 +79,22 @@ enum {
/* Aggregation logic */
/*********************/
static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->status.status_driver_data[0];
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
ieee80211_tx_status(hw, skb);
return;
}
if (sta)
ieee80211_tx_status_noskb(hw, sta, info);
dev_kfree_skb(skb);
}
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
__acquires(&txq->axq_lock)
{
@ -92,6 +110,7 @@ void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
__releases(&txq->axq_lock)
{
struct ieee80211_hw *hw = sc->hw;
struct sk_buff_head q;
struct sk_buff *skb;
@ -100,7 +119,7 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
spin_unlock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&q)))
ieee80211_tx_status(sc->hw, skb);
ath_tx_status(hw, skb);
}
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
@ -253,7 +272,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
list_add_tail(&bf->list, &bf_head);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
}
if (sendbar) {
@ -318,12 +337,12 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf) {
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL);
continue;
}
list_add_tail(&bf->list, &bf_head);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
}
}
@ -426,15 +445,14 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
struct ieee80211_sta *sta,
struct ath_atx_tid *tid,
struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
struct ieee80211_sta *sta;
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
struct sk_buff_head bf_pending;
@ -460,12 +478,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
for (i = 0; i < ts->ts_rateindex; i++)
retries += rates[i].count;
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
if (!sta) {
rcu_read_unlock();
INIT_LIST_HEAD(&bf_head);
while (bf) {
bf_next = bf->bf_next;
@ -473,7 +486,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!bf->bf_state.stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, ts, 0);
bf = bf_next;
}
@ -481,7 +494,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
an = (struct ath_node *)sta->drv_priv;
tid = ath_get_skb_tid(sc, an, skb);
seq_first = tid->seq_start;
isba = ts->ts_flags & ATH9K_TX_BA;
@ -583,7 +595,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ts);
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
!txfail);
} else {
if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
@ -604,7 +616,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq,
&bf_head, ts, 0);
&bf_head, NULL, ts,
0);
bar_index = max_t(int, bar_index,
ATH_BA_INDEX(seq_first, seqno));
break;
@ -648,8 +661,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_txq_lock(sc, txq);
}
rcu_read_unlock();
if (needreset)
ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}
@ -664,7 +675,11 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *info;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct ath_atx_tid *tid = NULL;
bool txok, flush;
txok = !(ts->ts_status & ATH9K_TXERR_MASK);
@ -677,6 +692,16 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc,
ts->ts_rateindex);
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
if (sta) {
struct ath_node *an = (struct ath_node *)sta->drv_priv;
tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
}
if (!bf_isampdu(bf)) {
if (!flush) {
info = IEEE80211_SKB_CB(bf->bf_mpdu);
@ -685,9 +710,9 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
}
ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
ath_tx_complete_aggr(sc, txq, bf, bf_head, sta, tid, ts, txok);
if (!flush)
ath_txq_schedule(sc, txq);
@ -923,7 +948,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
list_add(&bf->list, &bf_head);
__skb_unlink(skb, *q);
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
}
@ -1832,6 +1857,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
*/
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
rcu_read_lock();
ath_txq_lock(sc, txq);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@ -1850,6 +1876,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
ath_drain_txq_list(sc, txq, &txq->axq_q);
ath_txq_unlock_complete(sc, txq);
rcu_read_unlock();
}
bool ath_drain_all_txq(struct ath_softc *sc)
@ -2472,7 +2499,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq)
int tx_flags, struct ath_txq *txq,
struct ieee80211_sta *sta)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@ -2492,15 +2520,17 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
padpos = ieee80211_hdrlen(hdr->frame_control);
padsize = padpos & 3;
if (padsize && skb->len>padpos+padsize) {
/*
* Remove MAC header padding before giving the frame back to
* mac80211.
*/
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
padpos = ieee80211_hdrlen(hdr->frame_control);
padsize = padpos & 3;
if (padsize && skb->len>padpos+padsize) {
/*
* Remove MAC header padding before giving the frame back to
* mac80211.
*/
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
}
spin_lock_irqsave(&sc->sc_pm_lock, flags);
@ -2515,12 +2545,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
__skb_queue_tail(&txq->complete_q, skb);
ath_txq_skb_done(sc, txq, skb);
tx_info->status.status_driver_data[0] = sta;
__skb_queue_tail(&txq->complete_q, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
struct ieee80211_sta *sta,
struct ath_tx_status *ts, int txok)
{
struct sk_buff *skb = bf->bf_mpdu;
@ -2548,7 +2580,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
complete(&sc->paprd_complete);
} else {
ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
ath_tx_complete(sc, skb, tx_flags, txq);
ath_tx_complete(sc, skb, tx_flags, txq, sta);
}
skip_tx_complete:
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@ -2700,10 +2732,12 @@ void ath_tx_tasklet(struct ath_softc *sc)
u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
int i;
rcu_read_lock();
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
ath_tx_processq(sc, &sc->tx.txq[i]);
}
rcu_read_unlock();
}
void ath_tx_edma_tasklet(struct ath_softc *sc)
@ -2717,6 +2751,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
struct list_head *fifo_list;
int status;
rcu_read_lock();
for (;;) {
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
@ -2787,6 +2822,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
ath_txq_unlock_complete(sc, txq);
}
rcu_read_unlock();
}
/*****************/

View File

@ -670,6 +670,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
ar->readlen = outlen;
spin_unlock_bh(&ar->cmd_lock);
reinit_completion(&ar->cmd_wait);
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
@ -778,10 +779,7 @@ void carl9170_usb_stop(struct ar9170 *ar)
spin_lock_bh(&ar->cmd_lock);
ar->readlen = 0;
spin_unlock_bh(&ar->cmd_lock);
complete_all(&ar->cmd_wait);
/* This is required to prevent an early completion on _start */
reinit_completion(&ar->cmd_wait);
complete(&ar->cmd_wait);
/*
* Note:

View File

@ -354,10 +354,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
__func__, wdev, wdev->iftype);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
mutex_unlock(&wil->p2p_wdev_mutex);
return -EAGAIN;
}
mutex_unlock(&wil->p2p_wdev_mutex);
/* check we are client side */
switch (wdev->iftype) {
@ -760,14 +763,11 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
return rc;
}
static struct wil_tid_crypto_rx_single *
wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
enum wmi_key_usage key_usage, const u8 *mac_addr)
static struct wil_sta_info *
wil_find_sta_by_key_usage(struct wil6210_priv *wil,
enum wmi_key_usage key_usage, const u8 *mac_addr)
{
int cid = -EINVAL;
int tid = 0;
struct wil_sta_info *s;
struct wil_tid_crypto_rx *c;
if (key_usage == WMI_KEY_USE_TX_GROUP)
return NULL; /* not needed */
@ -778,18 +778,72 @@ wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
else if (key_usage == WMI_KEY_USE_RX_GROUP)
cid = wil_find_cid_by_idx(wil, 0);
if (cid < 0) {
wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
key_usage_str[key_usage], key_index);
wil_err(wil, "No CID for %pM %s\n", mac_addr,
key_usage_str[key_usage]);
return ERR_PTR(cid);
}
s = &wil->sta[cid];
if (key_usage == WMI_KEY_USE_PAIRWISE)
c = &s->tid_crypto_rx[tid];
else
c = &s->group_crypto_rx;
return &wil->sta[cid];
}
return &c->key_id[key_index];
static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
struct wil_sta_info *cs,
struct key_params *params)
{
struct wil_tid_crypto_rx_single *cc;
int tid;
if (!cs)
return;
switch (key_usage) {
case WMI_KEY_USE_PAIRWISE:
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
cc = &cs->tid_crypto_rx[tid].key_id[key_index];
if (params->seq)
memcpy(cc->pn, params->seq,
IEEE80211_GCMP_PN_LEN);
else
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
cc->key_set = true;
}
break;
case WMI_KEY_USE_RX_GROUP:
cc = &cs->group_crypto_rx.key_id[key_index];
if (params->seq)
memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
else
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
cc->key_set = true;
break;
default:
break;
}
}
static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
struct wil_sta_info *cs)
{
struct wil_tid_crypto_rx_single *cc;
int tid;
if (!cs)
return;
switch (key_usage) {
case WMI_KEY_USE_PAIRWISE:
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
cc = &cs->tid_crypto_rx[tid].key_id[key_index];
cc->key_set = false;
}
break;
case WMI_KEY_USE_RX_GROUP:
cc = &cs->group_crypto_rx.key_id[key_index];
cc->key_set = false;
break;
default:
break;
}
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
@ -801,24 +855,26 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
key_index,
key_usage,
mac_addr);
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
mac_addr);
if (!params) {
wil_err(wil, "NULL params\n");
return -EINVAL;
}
wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
if (IS_ERR(cc)) {
if (IS_ERR(cs)) {
wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
__func__, mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
if (cc)
cc->key_set = false;
wil_del_rx_key(key_index, key_usage, cs);
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
@ -831,13 +887,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
params->key, key_usage);
if ((rc == 0) && cc) {
if (params->seq)
memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
else
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
cc->key_set = true;
}
if (!rc)
wil_set_crypto_rx(key_index, key_usage, cs, params);
return rc;
}
@ -849,20 +900,18 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
key_index,
key_usage,
mac_addr);
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
mac_addr);
wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
key_usage_str[key_usage], key_index);
if (IS_ERR(cc))
if (IS_ERR(cs))
wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
mac_addr, key_usage_str[key_usage], key_index);
if (!IS_ERR_OR_NULL(cc))
cc->key_set = false;
if (!IS_ERR_OR_NULL(cs))
wil_del_rx_key(key_index, key_usage, cs);
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
@ -1363,23 +1412,16 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
u8 started;
struct wil_p2p_info *p2p = &wil->p2p;
if (!p2p->p2p_dev_started)
return;
wil_dbg_misc(wil, "%s: entered\n", __func__);
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(wil);
if (started && wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
};
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
wil->radio_wdev = wil->wdev;
}
wil_p2p_stop_radio_operations(wil);
p2p->p2p_dev_started = 0;
mutex_unlock(&wil->mutex);
wil->p2p.p2p_dev_started = 0;
}
static struct cfg80211_ops wil_cfg80211_ops = {
@ -1464,14 +1506,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev)
set_wiphy_dev(wdev->wiphy, dev);
wil_wiphy_init(wdev->wiphy);
rc = wiphy_register(wdev->wiphy);
if (rc < 0)
goto out_failed_reg;
return wdev;
out_failed_reg:
wiphy_free(wdev->wiphy);
out:
kfree(wdev);
@ -1487,7 +1523,6 @@ void wil_wdev_free(struct wil6210_priv *wil)
if (!wdev)
return;
wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
}
@ -1498,11 +1533,11 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
mutex_lock(&wil->p2p_wdev_mutex);
p2p_wdev = wil->p2p_wdev;
wil->p2p_wdev = NULL;
wil->radio_wdev = wil_to_wdev(wil);
mutex_unlock(&wil->p2p_wdev_mutex);
if (p2p_wdev) {
wil->p2p_wdev = NULL;
wil->radio_wdev = wil_to_wdev(wil);
cfg80211_unregister_wdev(p2p_wdev);
kfree(p2p_wdev);
}
mutex_unlock(&wil->p2p_wdev_mutex);
}

View File

@ -1553,6 +1553,56 @@ static const struct file_operations fops_led_blink_time = {
.open = simple_open,
};
/*---------FW capabilities------------*/
static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX,
wil->fw_capabilities);
return 0;
}
static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_fw_capabilities_debugfs_show,
inode->i_private);
}
static const struct file_operations fops_fw_capabilities = {
.open = wil_fw_capabilities_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
};
/*---------FW version------------*/
static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
if (wil->fw_version[0])
seq_printf(s, "%s\n", wil->fw_version);
else
seq_puts(s, "N/A\n");
return 0;
}
static int wil_fw_version_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_fw_version_debugfs_show,
inode->i_private);
}
static const struct file_operations fops_fw_version = {
.open = wil_fw_version_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
};
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@ -1603,6 +1653,8 @@ static const struct {
{"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
{"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
{"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
{"fw_capabilities", S_IRUGO, &fops_fw_capabilities},
{"fw_version", S_IRUGO, &fops_fw_version},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@ -1643,7 +1695,6 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(privacy, S_IRUGO, doff_u32),
WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
WIL_FIELD(fw_version, S_IRUGO, doff_u32),
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -58,6 +58,15 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
u8 data[0]; /* free-form data [data_size], see above */
} __packed;
/* FW capabilities encoded inside a comment record */
#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
/* identifies capabilities record */
__le32 magic;
/* capabilities (variable size), see enum wmi_fw_capability */
u8 capabilities[0];
};
/* perform action
* data_size = @head.size - offsetof(struct wil_fw_record_action, data)
*/
@ -93,6 +102,9 @@ struct wil_fw_record_verify { /* type == wil_fw_verify */
/* file header
* First record of every file
*/
/* the FW version prefix in the comment */
#define WIL_FW_VERSION_PREFIX "FW version: "
#define WIL_FW_VERSION_PREFIX_LEN (sizeof(WIL_FW_VERSION_PREFIX) - 1)
struct wil_fw_record_file_header {
__le32 signature ; /* Wilocity signature */
__le32 reserved;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
* Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -118,6 +118,12 @@ static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
return (int)dlen;
}
static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
size_t size)
{
return 0;
}
static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
size_t size)
{
@ -126,6 +132,27 @@ static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
return 0;
}
static int
fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_capabilities *rec = data;
size_t capa_size;
if (size < sizeof(*rec) ||
le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
return 0;
capa_size = size - offsetof(struct wil_fw_record_capabilities,
capabilities);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
memcpy(wil->fw_capabilities, rec->capabilities,
min(sizeof(wil->fw_capabilities), capa_size));
wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
rec->capabilities, capa_size, false);
return 0;
}
static int fw_handle_data(struct wil6210_priv *wil, const void *data,
size_t size)
{
@ -196,6 +223,13 @@ static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
sizeof(d->comment), true);
if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX,
WIL_FW_VERSION_PREFIX_LEN))
memcpy(wil->fw_version,
d->comment + WIL_FW_VERSION_PREFIX_LEN,
min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN,
sizeof(wil->fw_version) - 1));
return 0;
}
@ -383,42 +417,51 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
static const struct {
int type;
int (*handler)(struct wil6210_priv *wil, const void *data, size_t size);
int (*load_handler)(struct wil6210_priv *wil, const void *data,
size_t size);
int (*parse_handler)(struct wil6210_priv *wil, const void *data,
size_t size);
} wil_fw_handlers[] = {
{wil_fw_type_comment, fw_handle_comment},
{wil_fw_type_data, fw_handle_data},
{wil_fw_type_fill, fw_handle_fill},
{wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
{wil_fw_type_data, fw_handle_data, fw_ignore_section},
{wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
/* wil_fw_type_action */
/* wil_fw_type_verify */
{wil_fw_type_file_header, fw_handle_file_header},
{wil_fw_type_direct_write, fw_handle_direct_write},
{wil_fw_type_gateway_data, fw_handle_gateway_data},
{wil_fw_type_gateway_data4, fw_handle_gateway_data4},
{wil_fw_type_file_header, fw_handle_file_header,
fw_handle_file_header},
{wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section},
{wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section},
{wil_fw_type_gateway_data4, fw_handle_gateway_data4,
fw_ignore_section},
};
static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
const void *data, size_t size)
const void *data, size_t size, bool load)
{
int i;
for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) {
for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++)
if (wil_fw_handlers[i].type == type)
return wil_fw_handlers[i].handler(wil, data, size);
}
return load ?
wil_fw_handlers[i].load_handler(
wil, data, size) :
wil_fw_handlers[i].parse_handler(
wil, data, size);
wil_err_fw(wil, "unknown record type: %d\n", type);
return -EINVAL;
}
/**
* wil_fw_load - load FW into device
*
* Load the FW and uCode code and data to the corresponding device
* memory regions
* wil_fw_process - process section from FW file
* if load is true: Load the FW and uCode code and data to the
* corresponding device memory regions,
* otherwise only parse and look for capabilities
*
* Return error code
*/
static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
static int wil_fw_process(struct wil6210_priv *wil, const void *data,
size_t size, bool load)
{
int rc = 0;
const struct wil_fw_record_head *hdr;
@ -437,7 +480,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
return -EINVAL;
}
rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
&hdr[1], hdr_sz);
&hdr[1], hdr_sz, load);
if (rc)
return rc;
}
@ -456,13 +499,16 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
}
/**
* wil_request_firmware - Request firmware and load to device
* wil_request_firmware - Request firmware
*
* Request firmware image from the file and load it to device
* Request firmware image from the file
* If load is true, load firmware to device, otherwise
* only parse and extract capabilities
*
* Return error code
*/
int wil_request_firmware(struct wil6210_priv *wil, const char *name)
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load)
{
int rc, rc1;
const struct firmware *fw;
@ -482,7 +528,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name)
rc = rc1;
goto out;
}
rc = wil_fw_load(wil, d, rc1);
rc = wil_fw_process(wil, d, rc1, load);
if (rc < 0)
goto out;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -101,7 +101,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
}
static void wil6210_mask_halp(struct wil6210_priv *wil)
void wil6210_mask_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
@ -503,6 +503,13 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
offsetof(struct RGF_ICR, ICR));
u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
/* HALP interrupt can be unmasked when misc interrupts are
* masked
*/
if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP)
return 0;
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@ -592,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
void wil6210_set_halp(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s()\n", __func__);
wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
@ -600,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
void wil6210_clear_halp(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s()\n", __func__);
wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);

View File

@ -232,6 +232,9 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
if (unlikely(!ndev))
return;
might_sleep();
wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
reason_code, from_event ? "+" : "-");
@ -849,6 +852,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
bitmap_zero(wil->status, wil_status_last);
mutex_unlock(&wil->wmi_mutex);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
@ -860,6 +864,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
mutex_unlock(&wil->p2p_wdev_mutex);
wil_mask_irq(wil);
@ -888,11 +893,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
WIL_FW2_NAME);
wil_halt_cpu(wil);
memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
rc = wil_request_firmware(wil, WIL_FW_NAME);
rc = wil_request_firmware(wil, WIL_FW_NAME, true);
if (rc)
return rc;
rc = wil_request_firmware(wil, WIL_FW2_NAME);
rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
if (rc)
return rc;
@ -1035,10 +1041,10 @@ int wil_up(struct wil6210_priv *wil)
int __wil_down(struct wil6210_priv *wil)
{
int rc;
WARN_ON(!mutex_is_locked(&wil->mutex));
set_bit(wil_status_resetting, wil->status);
if (wil->platform_ops.bus_request)
wil->platform_ops.bus_request(wil->platform_handle, 0);
@ -1050,8 +1056,9 @@ int __wil_down(struct wil6210_priv *wil)
}
wil_enable_irq(wil);
(void)wil_p2p_stop_discovery(wil);
wil_p2p_stop_radio_operations(wil);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
@ -1063,18 +1070,7 @@ int __wil_down(struct wil6210_priv *wil)
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
if (test_bit(wil_status_fwconnected, wil->status) ||
test_bit(wil_status_fwconnecting, wil->status)) {
mutex_unlock(&wil->mutex);
rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
WMI_DISCONNECT_EVENTID, NULL, 0,
WIL6210_DISCONNECT_TO_MS);
mutex_lock(&wil->mutex);
if (rc)
wil_err(wil, "timeout waiting for disconnect\n");
}
mutex_unlock(&wil->p2p_wdev_mutex);
wil_reset(wil, false);
@ -1118,23 +1114,26 @@ void wil_halp_vote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc)
if (!rc) {
wil_err(wil, "%s: HALP vote timed out\n", __func__);
else
wil_dbg_misc(wil,
"%s: HALP vote completed after %d ms\n",
__func__,
jiffies_to_msecs(to_jiffies - rc));
/* Mask HALP as done in case the interrupt is raised */
wil6210_mask_halp(wil);
} else {
wil_dbg_irq(wil,
"%s: HALP vote completed after %d ms\n",
__func__,
jiffies_to_msecs(to_jiffies - rc));
}
}
wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
@ -1145,16 +1144,16 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
}
wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}

View File

@ -179,13 +179,6 @@ void *wil_if_alloc(struct device *dev)
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
WIL6210_NAPI_BUDGET);
netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
netif_tx_stop_all_queues(ndev);
return wil;
out_priv:
@ -216,25 +209,48 @@ void wil_if_free(struct wil6210_priv *wil)
int wil_if_add(struct wil6210_priv *wil)
{
struct wireless_dev *wdev = wil_to_wdev(wil);
struct wiphy *wiphy = wdev->wiphy;
struct net_device *ndev = wil_to_ndev(wil);
int rc;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_dbg_misc(wil, "entered");
strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
rc = wiphy_register(wiphy);
if (rc < 0) {
wil_err(wil, "failed to register wiphy, err %d\n", rc);
return rc;
}
netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
WIL6210_NAPI_BUDGET);
netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
netif_tx_stop_all_queues(ndev);
rc = register_netdev(ndev);
if (rc < 0) {
dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
return rc;
goto out_wiphy;
}
return 0;
out_wiphy:
wiphy_unregister(wdev->wiphy);
return rc;
}
void wil_if_remove(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil_to_wdev(wil);
wil_dbg_misc(wil, "%s()\n", __func__);
unregister_netdev(ndev);
wiphy_unregister(wdev->wiphy);
}

View File

@ -263,3 +263,49 @@ void wil_p2p_search_expired(struct work_struct *work)
mutex_unlock(&wil->p2p_wdev_mutex);
}
}
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
{
struct wil_p2p_info *p2p = &wil->p2p;
struct cfg80211_scan_info info = {
.aborted = true,
};
lockdep_assert_held(&wil->mutex);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->radio_wdev != wil->p2p_wdev)
goto out;
if (!p2p->discovery_started) {
/* Regular scan on the p2p device */
if (wil->scan_request &&
wil->scan_request->wdev == wil->p2p_wdev) {
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
goto out;
}
/* Search or listen on p2p device */
mutex_unlock(&wil->p2p_wdev_mutex);
wil_p2p_stop_discovery(wil);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
/* search */
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
} else {
/* listen */
cfg80211_remain_on_channel_expired(wil->radio_wdev,
p2p->cookie,
&p2p->listen_chan,
GFP_KERNEL);
}
out:
wil->radio_wdev = wil->wdev;
mutex_unlock(&wil->p2p_wdev_mutex);
}

View File

@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include "wil6210.h"
#include <linux/rtnetlink.h>
static bool use_msi = true;
module_param(use_msi, bool, S_IRUGO);
@ -38,6 +39,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
switch (rev_id) {
case JTAG_DEV_ID_SPARROW_B0:
@ -51,6 +53,9 @@ void wil_set_capabilities(struct wil6210_priv *wil)
}
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
/* extract FW capabilities from file without loading the FW */
wil_request_firmware(wil, WIL_FW_NAME, false);
}
void wil_disable_irq(struct wil6210_priv *wil)
@ -293,6 +298,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
#endif /* CONFIG_PM */
wil6210_debugfs_remove(wil);
rtnl_lock();
wil_p2p_wdev_free(wil);
rtnl_unlock();
wil_if_remove(wil);
wil_if_pcie_disable(wil);
pci_iounmap(pdev, csr);
@ -300,7 +308,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
wil_p2p_wdev_free(wil);
wil_if_free(wil);
}

View File

@ -873,9 +873,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
rc = -EINVAL;
goto out_free;
}
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
spin_lock_bh(&txdata->lock);
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
spin_unlock_bh(&txdata->lock);
if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
@ -950,9 +953,11 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
rc = -EINVAL;
goto out_free;
}
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
spin_lock_bh(&txdata->lock);
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
spin_unlock_bh(&txdata->lock);
return 0;
out_free:

View File

@ -17,6 +17,7 @@
#ifndef __WIL6210_H__
#define __WIL6210_H__
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/cfg80211.h>
@ -576,10 +577,11 @@ struct wil6210_priv {
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
u32 fw_version;
u8 fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
const char *hw_name;
DECLARE_BITMAP(hw_capabilities, hw_capability_last);
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
@ -657,7 +659,7 @@ struct wil6210_priv {
/* P2P_DEVICE vif */
struct wireless_dev *p2p_wdev;
struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
struct mutex p2p_wdev_mutex; /* protect @p2p_wdev and @scan_request */
struct wireless_dev *radio_wdev;
/* High Access Latency Policy voting */
@ -828,6 +830,7 @@ void wil_unmask_irq(struct wil6210_priv *wil);
void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
void wil_disable_irq(struct wil6210_priv *wil);
void wil_enable_irq(struct wil6210_priv *wil);
void wil6210_mask_halp(struct wil6210_priv *wil);
/* P2P */
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
@ -840,6 +843,7 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
void wil_p2p_listen_expired(struct work_struct *work);
void wil_p2p_search_expired(struct work_struct *work);
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
/* WMI for P2P */
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
@ -893,7 +897,8 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
int wil_request_firmware(struct wil6210_priv *wil, const char *name);
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);

View File

@ -312,14 +312,14 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
struct wireless_dev *wdev = wil->wdev;
struct wmi_ready_event *evt = d;
wil->fw_version = le32_to_cpu(evt->sw_version);
wil->n_mids = evt->numof_additional_mids;
wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
wil->fw_version, le32_to_cpu(evt->sw_version),
evt->mac, wil->n_mids);
/* ignore MAC address, we already have it from the boot loader */
snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
"%d", wil->fw_version);
strlcpy(wdev->wiphy->fw_version, wil->fw_version,
sizeof(wdev->wiphy->fw_version));
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
@ -424,6 +424,7 @@ static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
void *d, int len)
{
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
struct cfg80211_scan_info info = {
@ -435,14 +436,13 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
wil->scan_request, info.aborted);
del_timer_sync(&wil->scan_timer);
mutex_lock(&wil->p2p_wdev_mutex);
cfg80211_scan_done(wil->scan_request, &info);
wil->radio_wdev = wil->wdev;
mutex_unlock(&wil->p2p_wdev_mutex);
wil->scan_request = NULL;
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
}
mutex_unlock(&wil->p2p_wdev_mutex);
}
static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)

View File

@ -46,6 +46,16 @@ enum wmi_mid {
MID_BROADCAST = 0xFF,
};
/* FW capability IDs
* Each ID maps to a bit in a 32-bit bitmask value provided by the FW to
* the host
*/
enum wmi_fw_capability {
WMI_FW_CAPABILITY_FTM = 0,
WMI_FW_CAPABILITY_PS_CONFIG = 1,
WMI_FW_CAPABILITY_MAX,
};
/* WMI_CMD_HDR */
struct wmi_cmd_hdr {
u8 mid;
@ -120,6 +130,8 @@ enum wmi_command_id {
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
@ -134,10 +146,15 @@ enum wmi_command_id {
WMI_BF_CTRL_CMDID = 0x862,
WMI_NOTIFY_REQ_CMDID = 0x863,
WMI_GET_STATUS_CMDID = 0x864,
WMI_GET_RF_STATUS_CMDID = 0x866,
WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
WMI_UNIT_TEST_CMDID = 0x900,
WMI_HICCUP_CMDID = 0x901,
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
/* Power management */
WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
WMI_PORT_ALLOCATE_CMDID = 0x911,
@ -150,6 +167,26 @@ enum wmi_command_id {
WMI_PCP_START_CMDID = 0x918,
WMI_PCP_STOP_CMDID = 0x919,
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
/* Power Save Configuration Commands */
WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
/* Not supported yet */
WMI_PS_DEV_CFG_CMDID = 0x91D,
/* Not supported yet */
WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
/* Per MAC Power Save Configuration commands
* Not supported yet
*/
WMI_PS_MID_CFG_CMDID = 0x91F,
/* Not supported yet */
WMI_PS_MID_CFG_READ_CMDID = 0x920,
WMI_RS_CFG_CMDID = 0x921,
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
WMI_TOF_SESSION_START_CMDID = 0x991,
WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
WMI_TOF_SET_LCR_CMDID = 0x993,
WMI_TOF_SET_LCI_CMDID = 0x994,
WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@ -291,9 +328,8 @@ enum wmi_scan_type {
/* WMI_START_SCAN_CMDID */
struct wmi_start_scan_cmd {
u8 direct_scan_mac_addr[WMI_MAC_LEN];
/* DMG Beacon frame is transmitted during active scanning */
/* run scan with discovery beacon. Relevant for ACTIVE scan only. */
u8 discovery_mode;
/* reserved */
u8 reserved;
/* Max duration in the home channel(ms) */
__le32 dwell_time;
@ -453,6 +489,12 @@ struct wmi_port_delete_cmd {
u8 reserved[3];
} __packed;
/* WMI_TRAFFIC_DEFERRAL_CMDID */
struct wmi_traffic_deferral_cmd {
/* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
u8 wakeup_trigger;
} __packed;
/* WMI_P2P_CFG_CMDID */
enum wmi_discovery_mode {
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
@ -818,85 +860,193 @@ struct wmi_pmc_cmd {
__le64 mem_base;
} __packed;
enum wmi_aoa_meas_type {
WMI_AOA_PHASE_MEAS = 0x00,
WMI_AOA_PHASE_AMP_MEAS = 0x01,
};
/* WMI_AOA_MEAS_CMDID */
struct wmi_aoa_meas_cmd {
u8 mac_addr[WMI_MAC_LEN];
/* channels IDs:
* 0 - 58320 MHz
* 1 - 60480 MHz
* 2 - 62640 MHz
*/
u8 channel;
/* enum wmi_aoa_meas_type */
u8 aoa_meas_type;
__le32 meas_rf_mask;
} __packed;
enum wmi_tof_burst_duration {
WMI_TOF_BURST_DURATION_250_USEC = 2,
WMI_TOF_BURST_DURATION_500_USEC = 3,
WMI_TOF_BURST_DURATION_1_MSEC = 4,
WMI_TOF_BURST_DURATION_2_MSEC = 5,
WMI_TOF_BURST_DURATION_4_MSEC = 6,
WMI_TOF_BURST_DURATION_8_MSEC = 7,
WMI_TOF_BURST_DURATION_16_MSEC = 8,
WMI_TOF_BURST_DURATION_32_MSEC = 9,
WMI_TOF_BURST_DURATION_64_MSEC = 10,
WMI_TOF_BURST_DURATION_128_MSEC = 11,
WMI_TOF_BURST_DURATION_NO_PREFERENCES = 15,
};
enum wmi_tof_session_start_flags {
WMI_TOF_SESSION_START_FLAG_SECURED = 0x1,
WMI_TOF_SESSION_START_FLAG_ASAP = 0x2,
WMI_TOF_SESSION_START_FLAG_LCI_REQ = 0x4,
WMI_TOF_SESSION_START_FLAG_LCR_REQ = 0x8,
};
/* WMI_TOF_SESSION_START_CMDID */
struct wmi_ftm_dest_info {
u8 channel;
/* wmi_tof_session_start_flags_e */
u8 flags;
u8 initial_token;
u8 num_of_ftm_per_burst;
u8 num_of_bursts_exp;
/* wmi_tof_burst_duration_e */
u8 burst_duration;
/* Burst Period indicate interval between two consecutive burst
* instances, in units of 100 ms
*/
__le16 burst_period;
u8 dst_mac[WMI_MAC_LEN];
__le16 reserved;
} __packed;
/* WMI_TOF_SESSION_START_CMDID */
struct wmi_tof_session_start_cmd {
__le32 session_id;
u8 num_of_aoa_measures;
u8 aoa_type;
__le16 num_of_dest;
u8 reserved[4];
struct wmi_ftm_dest_info ftm_dest_info[0];
} __packed;
enum wmi_tof_channel_info_report_type {
WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1,
WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2,
WMI_TOF_CHANNEL_INFO_TYPE_SNR = 0x4,
WMI_TOF_CHANNEL_INFO_TYPE_DEBUG_DATA = 0x8,
WMI_TOF_CHANNEL_INFO_TYPE_VENDOR_SPECIFIC = 0x10,
};
/* WMI_TOF_CHANNEL_INFO_CMDID */
struct wmi_tof_channel_info_cmd {
/* wmi_tof_channel_info_report_type_e */
__le32 channel_info_report_request;
} __packed;
/* WMI Events
* List of Events (target to host)
*/
enum wmi_event_id {
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
WMI_FW_READY_EVENTID = 0x1801,
WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
WMI_ECHO_RSP_EVENTID = 0x1803,
WMI_FS_TUNE_DONE_EVENTID = 0x180A,
WMI_CORR_MEASURE_EVENTID = 0x180B,
WMI_READ_RSSI_EVENTID = 0x180C,
WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
WMI_DC_CALIB_DONE_EVENTID = 0x180F,
WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
WMI_BA_STATUS_EVENTID = 0x1823,
WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
WMI_READ_MAC_RXQ_EVENTID = 0x1830,
WMI_READ_MAC_TXQ_EVENTID = 0x1831,
WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
WMI_OTP_READ_RESULT_EVENTID = 0x1856,
WMI_LED_CFG_DONE_EVENTID = 0x1858,
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
WMI_FW_READY_EVENTID = 0x1801,
WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
WMI_ECHO_RSP_EVENTID = 0x1803,
WMI_FS_TUNE_DONE_EVENTID = 0x180A,
WMI_CORR_MEASURE_EVENTID = 0x180B,
WMI_READ_RSSI_EVENTID = 0x180C,
WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
WMI_DC_CALIB_DONE_EVENTID = 0x180F,
WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
WMI_BA_STATUS_EVENTID = 0x1823,
WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
WMI_READ_MAC_RXQ_EVENTID = 0x1830,
WMI_READ_MAC_TXQ_EVENTID = 0x1831,
WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
WMI_OTP_READ_RESULT_EVENTID = 0x1856,
WMI_LED_CFG_DONE_EVENTID = 0x1858,
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
WMI_VRING_EN_EVENTID = 0x1865,
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
WMI_VRING_EN_EVENTID = 0x1865,
WMI_GET_RF_STATUS_EVENTID = 0x1866,
WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
/* Power management */
WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
/* P2P */
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
WMI_PORT_ALLOCATED_EVENTID = 0x1911,
WMI_PORT_DELETED_EVENTID = 0x1912,
WMI_LISTEN_STARTED_EVENTID = 0x1914,
WMI_SEARCH_STARTED_EVENTID = 0x1915,
WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
WMI_PCP_STARTED_EVENTID = 0x1918,
WMI_PCP_STOPPED_EVENTID = 0x1919,
WMI_PCP_FACTOR_EVENTID = 0x191A,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
WMI_FW_VER_EVENTID = 0x9004,
WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
WMI_PORT_ALLOCATED_EVENTID = 0x1911,
WMI_PORT_DELETED_EVENTID = 0x1912,
WMI_LISTEN_STARTED_EVENTID = 0x1914,
WMI_SEARCH_STARTED_EVENTID = 0x1915,
WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
WMI_PCP_STARTED_EVENTID = 0x1918,
WMI_PCP_STOPPED_EVENTID = 0x1919,
WMI_PCP_FACTOR_EVENTID = 0x191A,
/* Power Save Configuration Events */
WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
/* Not supported yet */
WMI_PS_DEV_CFG_EVENTID = 0x191D,
/* Not supported yet */
WMI_PS_DEV_CFG_READ_EVENTID = 0x191E,
/* Not supported yet */
WMI_PS_MID_CFG_EVENTID = 0x191F,
/* Not supported yet */
WMI_PS_MID_CFG_READ_EVENTID = 0x1920,
WMI_RS_CFG_DONE_EVENTID = 0x1921,
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
WMI_TOF_SESSION_END_EVENTID = 0x1991,
WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
WMI_TOF_SET_LCR_EVENTID = 0x1993,
WMI_TOF_SET_LCI_EVENTID = 0x1994,
WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995,
WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
WMI_FW_VER_EVENTID = 0x9004,
WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
};
/* Events data structures */
@ -943,10 +1093,85 @@ struct wmi_get_status_done_event {
/* WMI_FW_VER_EVENTID */
struct wmi_fw_ver_event {
u8 major;
u8 minor;
__le16 subminor;
__le16 build;
/* FW image version */
__le32 fw_major;
__le32 fw_minor;
__le32 fw_subminor;
__le32 fw_build;
/* FW image build time stamp */
__le32 hour;
__le32 minute;
__le32 second;
__le32 day;
__le32 month;
__le32 year;
/* Boot Loader image version */
__le32 bl_major;
__le32 bl_minor;
__le32 bl_subminor;
__le32 bl_build;
/* The number of entries in the FW capabilies array */
u8 fw_capabilities_len;
u8 reserved[3];
/* FW capabilities info
* Must be the last member of the struct
*/
__le32 fw_capabilities[0];
} __packed;
/* WMI_GET_RF_STATUS_EVENTID */
enum rf_type {
RF_UNKNOWN = 0x00,
RF_MARLON = 0x01,
RF_SPARROW = 0x02,
};
/* WMI_GET_RF_STATUS_EVENTID */
enum board_file_rf_type {
BF_RF_MARLON = 0x00,
BF_RF_SPARROW = 0x01,
};
/* WMI_GET_RF_STATUS_EVENTID */
enum rf_status {
RF_OK = 0x00,
RF_NO_COMM = 0x01,
RF_WRONG_BOARD_FILE = 0x02,
};
/* WMI_GET_RF_STATUS_EVENTID */
struct wmi_get_rf_status_event {
/* enum rf_type */
__le32 rf_type;
/* attached RFs bit vector */
__le32 attached_rf_vector;
/* enabled RFs bit vector */
__le32 enabled_rf_vector;
/* enum rf_status, refers to enabled RFs */
u8 rf_status[32];
/* enum board file RF type */
__le32 board_file_rf_type;
/* board file platform type */
__le32 board_file_platform_type;
/* board file version */
__le32 board_file_version;
__le32 reserved[2];
} __packed;
/* WMI_GET_BASEBAND_TYPE_EVENTID */
enum baseband_type {
BASEBAND_UNKNOWN = 0x00,
BASEBAND_SPARROW_M_A0 = 0x03,
BASEBAND_SPARROW_M_A1 = 0x04,
BASEBAND_SPARROW_M_B0 = 0x05,
BASEBAND_SPARROW_M_C0 = 0x06,
BASEBAND_SPARROW_M_D0 = 0x07,
};
/* WMI_GET_BASEBAND_TYPE_EVENTID */
struct wmi_get_baseband_type_event {
/* enum baseband_type */
__le32 baseband_type;
} __packed;
/* WMI_MAC_ADDR_RESP_EVENTID */
@ -1410,4 +1635,553 @@ struct wmi_led_cfg_done_event {
__le32 status;
} __packed;
#define WMI_NUM_MCS (13)
/* Rate search parameters configuration per connection */
struct wmi_rs_cfg {
/* The maximal allowed PER for each MCS
* MCS will be considered as failed if PER during RS is higher
*/
u8 per_threshold[WMI_NUM_MCS];
/* Number of MPDUs for each MCS
* this is the minimal statistic required to make an educated
* decision
*/
u8 min_frame_cnt[WMI_NUM_MCS];
/* stop threshold [0-100] */
u8 stop_th;
/* MCS1 stop threshold [0-100] */
u8 mcs1_fail_th;
u8 max_back_failure_th;
/* Debug feature for disabling internal RS trigger (which is
* currently triggered by BF Done)
*/
u8 dbg_disable_internal_trigger;
__le32 back_failure_mask;
__le32 mcs_en_vec;
} __packed;
/* WMI_RS_CFG_CMDID */
struct wmi_rs_cfg_cmd {
/* connection id */
u8 cid;
/* enable or disable rate search */
u8 rs_enable;
/* rate search configuration */
struct wmi_rs_cfg rs_cfg;
} __packed;
/* WMI_RS_CFG_DONE_EVENTID */
struct wmi_rs_cfg_done_event {
u8 cid;
/* enum wmi_fw_status */
u8 status;
u8 reserved[2];
} __packed;
/* WMI_GET_DETAILED_RS_RES_CMDID */
struct wmi_get_detailed_rs_res_cmd {
/* connection id */
u8 cid;
u8 reserved[3];
} __packed;
/* RS results status */
enum wmi_rs_results_status {
WMI_RS_RES_VALID = 0x00,
WMI_RS_RES_INVALID = 0x01,
};
/* Rate search results */
struct wmi_rs_results {
/* number of sent MPDUs */
u8 num_of_tx_pkt[WMI_NUM_MCS];
/* number of non-acked MPDUs */
u8 num_of_non_acked_pkt[WMI_NUM_MCS];
/* RS timestamp */
__le32 tsf;
/* RS selected MCS */
u8 mcs;
} __packed;
/* WMI_GET_DETAILED_RS_RES_EVENTID */
struct wmi_get_detailed_rs_res_event {
u8 cid;
/* enum wmi_rs_results_status */
u8 status;
/* detailed rs results */
struct wmi_rs_results rs_results;
u8 reserved[3];
} __packed;
/* broadcast connection ID */
#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
/* Types wmi_link_maintain_cfg presets for WMI_LINK_MAINTAIN_CFG_WRITE_CMD */
enum wmi_link_maintain_cfg_type {
/* AP/PCP default normal (non-FST) configuration settings */
WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP = 0x00,
/* AP/PCP default FST configuration settings */
WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP = 0x01,
/* STA default normal (non-FST) configuration settings */
WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA = 0x02,
/* STA default FST configuration settings */
WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA = 0x03,
/* custom configuration settings */
WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM = 0x04,
/* number of defined configuration types */
WMI_LINK_MAINTAIN_CFG_TYPES_NUM = 0x05,
};
/* Response status codes for WMI_LINK_MAINTAIN_CFG_WRITE/READ commands */
enum wmi_link_maintain_cfg_response_status {
/* WMI_LINK_MAINTAIN_CFG_WRITE/READ command successfully accomplished
*/
WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_OK = 0x00,
/* ERROR due to bad argument in WMI_LINK_MAINTAIN_CFG_WRITE/READ
* command request
*/
WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_BAD_ARGUMENT = 0x01,
};
/* Link Loss and Keep Alive configuration */
struct wmi_link_maintain_cfg {
/* link_loss_enable_detectors_vec */
__le32 link_loss_enable_detectors_vec;
/* detectors check period usec */
__le32 check_link_loss_period_usec;
/* max allowed tx ageing */
__le32 tx_ageing_threshold_usec;
/* keep alive period for high SNR */
__le32 keep_alive_period_usec_high_snr;
/* keep alive period for low SNR */
__le32 keep_alive_period_usec_low_snr;
/* lower snr limit for keep alive period update */
__le32 keep_alive_snr_threshold_low_db;
/* upper snr limit for keep alive period update */
__le32 keep_alive_snr_threshold_high_db;
/* num of successive bad bcons causing link-loss */
__le32 bad_beacons_num_threshold;
/* SNR limit for bad_beacons_detector */
__le32 bad_beacons_snr_threshold_db;
} __packed;
/* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */
struct wmi_link_maintain_cfg_write_cmd {
/* enum wmi_link_maintain_cfg_type_e - type of requested default
* configuration to be applied
*/
__le32 cfg_type;
/* requested connection ID or WMI_LINK_MAINTAIN_CFG_CID_BROADCAST */
__le32 cid;
/* custom configuration settings to be applied (relevant only if
* cfg_type==WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM)
*/
struct wmi_link_maintain_cfg lm_cfg;
} __packed;
/* WMI_LINK_MAINTAIN_CFG_READ_CMDID */
struct wmi_link_maintain_cfg_read_cmd {
/* connection ID which configuration settings are requested */
__le32 cid;
} __packed;
/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
struct wmi_link_maintain_cfg_write_done_event {
/* requested connection ID */
__le32 cid;
/* wmi_link_maintain_cfg_response_status_e - write status */
__le32 status;
} __packed;
/* \WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENT */
struct wmi_link_maintain_cfg_read_done_event {
/* requested connection ID */
__le32 cid;
/* wmi_link_maintain_cfg_response_status_e - read status */
__le32 status;
/* Retrieved configuration settings */
struct wmi_link_maintain_cfg lm_cfg;
} __packed;
enum wmi_traffic_deferral_status {
WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
};
/* WMI_TRAFFIC_DEFERRAL_EVENTID */
struct wmi_traffic_deferral_event {
/* enum wmi_traffic_deferral_status_e */
u8 status;
} __packed;
enum wmi_traffic_resume_status {
WMI_TRAFFIC_RESUME_SUCCESS = 0x0,
WMI_TRAFFIC_RESUME_FAILED = 0x1,
};
/* WMI_TRAFFIC_RESUME_EVENTID */
struct wmi_traffic_resume_event {
/* enum wmi_traffic_resume_status_e */
u8 status;
} __packed;
/* Power Save command completion status codes */
enum wmi_ps_cfg_cmd_status {
WMI_PS_CFG_CMD_STATUS_SUCCESS = 0x00,
WMI_PS_CFG_CMD_STATUS_BAD_PARAM = 0x01,
/* other error */
WMI_PS_CFG_CMD_STATUS_ERROR = 0x02,
};
/* Device Power Save Profiles */
enum wmi_ps_profile_type {
WMI_PS_PROFILE_TYPE_DEFAULT = 0x00,
WMI_PS_PROFILE_TYPE_PS_DISABLED = 0x01,
WMI_PS_PROFILE_TYPE_MAX_PS = 0x02,
WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03,
};
/* WMI_PS_DEV_PROFILE_CFG_CMDID
*
* Power save profile to be used by the device
*
* Returned event:
* - WMI_PS_DEV_PROFILE_CFG_EVENTID
*/
struct wmi_ps_dev_profile_cfg_cmd {
/* wmi_ps_profile_type_e */
u8 ps_profile;
u8 reserved[3];
} __packed;
/* WMI_PS_DEV_PROFILE_CFG_EVENTID */
struct wmi_ps_dev_profile_cfg_event {
/* wmi_ps_cfg_cmd_status_e */
__le32 status;
} __packed;
enum wmi_ps_level {
WMI_PS_LEVEL_DEEP_SLEEP = 0x00,
WMI_PS_LEVEL_SHALLOW_SLEEP = 0x01,
/* awake = all PS mechanisms are disabled */
WMI_PS_LEVEL_AWAKE = 0x02,
};
enum wmi_ps_deep_sleep_clk_level {
/* 33k */
WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC = 0x00,
/* 10k */
WMI_PS_DEEP_SLEEP_CLK_LEVEL_OSC = 0x01,
/* @RTC Low latency */
WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC_LT = 0x02,
WMI_PS_DEEP_SLEEP_CLK_LEVEL_XTAL = 0x03,
WMI_PS_DEEP_SLEEP_CLK_LEVEL_SYSCLK = 0x04,
/* Not Applicable */
WMI_PS_DEEP_SLEEP_CLK_LEVEL_N_A = 0xFF,
};
/* Response by the FW to a D3 entry request */
enum wmi_ps_d3_resp_policy {
WMI_PS_D3_RESP_POLICY_DEFAULT = 0x00,
/* debug -D3 req is always denied */
WMI_PS_D3_RESP_POLICY_DENIED = 0x01,
/* debug -D3 req is always approved */
WMI_PS_D3_RESP_POLICY_APPROVED = 0x02,
};
/* Device common power save configurations */
struct wmi_ps_dev_cfg {
/* lowest level of PS allowed while unassociated, enum wmi_ps_level_e
*/
u8 ps_unassoc_min_level;
/* lowest deep sleep clock level while nonassoc, enum
* wmi_ps_deep_sleep_clk_level_e
*/
u8 ps_unassoc_deep_sleep_min_level;
/* lowest level of PS allowed while associated, enum wmi_ps_level_e */
u8 ps_assoc_min_level;
/* lowest deep sleep clock level while assoc, enum
* wmi_ps_deep_sleep_clk_level_e
*/
u8 ps_assoc_deep_sleep_min_level;
/* enum wmi_ps_deep_sleep_clk_level_e */
u8 ps_assoc_low_latency_ds_min_level;
/* enum wmi_ps_d3_resp_policy_e */
u8 ps_D3_response_policy;
/* BOOL */
u8 ps_D3_pm_pme_enabled;
/* BOOL */
u8 ps_halp_enable;
u8 ps_deep_sleep_enter_thresh_msec;
/* BOOL */
u8 ps_voltage_scaling_en;
} __packed;
/* WMI_PS_DEV_CFG_CMDID
*
* Configure common Power Save parameters of the device and all MIDs.
*
* Returned event:
* - WMI_PS_DEV_CFG_EVENTID
*/
struct wmi_ps_dev_cfg_cmd {
/* Device Power Save configuration to be applied */
struct wmi_ps_dev_cfg ps_dev_cfg;
/* alignment to 32b */
u8 reserved[2];
} __packed;
/* WMI_PS_DEV_CFG_EVENTID */
struct wmi_ps_dev_cfg_event {
/* wmi_ps_cfg_cmd_status_e */
__le32 status;
} __packed;
/* WMI_PS_DEV_CFG_READ_CMDID
*
* request to retrieve device Power Save configuration
* (WMI_PS_DEV_CFG_CMD params)
*
* Returned event:
* - WMI_PS_DEV_CFG_READ_EVENTID
*/
struct wmi_ps_dev_cfg_read_cmd {
__le32 reserved;
} __packed;
/* WMI_PS_DEV_CFG_READ_EVENTID */
struct wmi_ps_dev_cfg_read_event {
/* wmi_ps_cfg_cmd_status_e */
__le32 status;
/* Retrieved device Power Save configuration (WMI_PS_DEV_CFG_CMD
* params)
*/
struct wmi_ps_dev_cfg dev_ps_cfg;
/* alignment to 32b */
u8 reserved[2];
} __packed;
/* Per Mac Power Save configurations */
struct wmi_ps_mid_cfg {
/* Low power RX in BTI is enabled, BOOL */
u8 beacon_lprx_enable;
/* Sync to sector ID enabled, BOOL */
u8 beacon_sync_to_sectorId_enable;
/* Low power RX in DTI is enabled, BOOL */
u8 frame_exchange_lprx_enable;
/* Sleep Cycle while in scheduled PS, 1-31 */
u8 scheduled_sleep_cycle_pow2;
/* Stay Awake for k BIs every (sleep_cycle - k) BIs, 1-31 */
u8 scheduled_num_of_awake_bis;
u8 am_to_traffic_load_thresh_mbp;
u8 traffic_to_am_load_thresh_mbps;
u8 traffic_to_am_num_of_no_traffic_bis;
/* BOOL */
u8 continuous_traffic_psm;
__le16 no_traffic_to_min_usec;
__le16 no_traffic_to_max_usec;
__le16 snoozing_sleep_interval_milisec;
u8 max_no_data_awake_events;
/* Trigger WEB after k failed beacons */
u8 num_of_failed_beacons_rx_to_trigger_web;
/* Trigger BF after k failed beacons */
u8 num_of_failed_beacons_rx_to_trigger_bf;
/* Trigger SOB after k successful beacons */
u8 num_of_successful_beacons_rx_to_trigger_sob;
} __packed;
/* WMI_PS_MID_CFG_CMDID
*
* Configure Power Save parameters of a specific MID.
* These parameters are relevant for the specific BSS this MID belongs to.
*
* Returned event:
* - WMI_PS_MID_CFG_EVENTID
*/
struct wmi_ps_mid_cfg_cmd {
/* MAC ID */
u8 mid;
/* mid PS configuration to be applied */
struct wmi_ps_mid_cfg ps_mid_cfg;
} __packed;
/* WMI_PS_MID_CFG_EVENTID */
struct wmi_ps_mid_cfg_event {
/* MAC ID */
u8 mid;
/* alignment to 32b */
u8 reserved[3];
/* wmi_ps_cfg_cmd_status_e */
__le32 status;
} __packed;
/* WMI_PS_MID_CFG_READ_CMDID
*
* request to retrieve Power Save configuration of mid
* (WMI_PS_MID_CFG_CMD params)
*
* Returned event:
* - WMI_PS_MID_CFG_READ_EVENTID
*/
struct wmi_ps_mid_cfg_read_cmd {
/* MAC ID */
u8 mid;
/* alignment to 32b */
u8 reserved[3];
} __packed;
/* WMI_PS_MID_CFG_READ_EVENTID */
struct wmi_ps_mid_cfg_read_event {
/* MAC ID */
u8 mid;
/* Retrieved MID Power Save configuration(WMI_PS_MID_CFG_CMD params) */
struct wmi_ps_mid_cfg mid_ps_cfg;
/* wmi_ps_cfg_cmd_status_e */
__le32 status;
} __packed;
#define WMI_AOA_MAX_DATA_SIZE (128)
enum wmi_aoa_meas_status {
WMI_AOA_MEAS_SUCCESS = 0x00,
WMI_AOA_MEAS_PEER_INCAPABLE = 0x01,
WMI_AOA_MEAS_FAILURE = 0x02,
};
/* WMI_AOA_MEAS_EVENTID */
struct wmi_aoa_meas_event {
u8 mac_addr[WMI_MAC_LEN];
/* channels IDs:
* 0 - 58320 MHz
* 1 - 60480 MHz
* 2 - 62640 MHz
*/
u8 channel;
/* enum wmi_aoa_meas_type */
u8 aoa_meas_type;
/* Measurments are from RFs, defined by the mask */
__le32 meas_rf_mask;
/* enum wmi_aoa_meas_status */
u8 meas_status;
u8 reserved;
/* Length of meas_data in bytes */
__le16 length;
u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
} __packed;
/* WMI_TOF_GET_CAPABILITIES_EVENTID */
struct wmi_tof_get_capabilities_event {
u8 ftm_capability;
/* maximum supported number of destination to start TOF */
u8 max_num_of_dest;
/* maximum supported number of measurements per burst */
u8 max_num_of_meas_per_burst;
u8 reserved;
/* maximum supported multi bursts */
__le16 max_multi_bursts_sessions;
/* maximum supported FTM burst duration , wmi_tof_burst_duration_e */
__le16 max_ftm_burst_duration;
/* AOA supported types */
__le32 aoa_supported_types;
} __packed;
enum wmi_tof_session_end_status {
WMI_TOF_SESSION_END_NO_ERROR = 0x00,
WMI_TOF_SESSION_END_FAIL = 0x01,
WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02,
WMI_TOF_SESSION_END_ABORTED = 0x03,
};
/* WMI_TOF_SESSION_END_EVENTID */
struct wmi_tof_session_end_event {
/* FTM session ID */
__le32 session_id;
/* wmi_tof_session_end_status_e */
u8 status;
u8 reserved[3];
} __packed;
/* Responder FTM Results */
struct wmi_responder_ftm_res {
u8 t1[6];
u8 t2[6];
u8 t3[6];
u8 t4[6];
__le16 tod_err;
__le16 toa_err;
__le16 tod_err_initiator;
__le16 toa_err_initiator;
} __packed;
enum wmi_tof_ftm_per_dest_res_status {
WMI_PER_DEST_RES_NO_ERROR = 0x00,
WMI_PER_DEST_RES_TX_RX_FAIL = 0x01,
WMI_PER_DEST_RES_PARAM_DONT_MATCH = 0x02,
};
enum wmi_tof_ftm_per_dest_res_flags {
WMI_PER_DEST_RES_REQ_START = 0x01,
WMI_PER_DEST_RES_BURST_REPORT_END = 0x02,
WMI_PER_DEST_RES_REQ_END = 0x04,
WMI_PER_DEST_RES_PARAM_UPDATE = 0x08,
};
/* WMI_TOF_FTM_PER_DEST_RES_EVENTID */
struct wmi_tof_ftm_per_dest_res_event {
/* FTM session ID */
__le32 session_id;
/* destination MAC address */
u8 dst_mac[WMI_MAC_LEN];
/* wmi_tof_ftm_per_dest_res_flags_e */
u8 flags;
/* wmi_tof_ftm_per_dest_res_status_e */
u8 status;
/* responder ASAP */
u8 responder_asap;
/* responder number of FTM per burst */
u8 responder_num_ftm_per_burst;
/* responder number of FTM burst exponent */
u8 responder_num_ftm_bursts_exp;
/* responder burst duration ,wmi_tof_burst_duration_e */
u8 responder_burst_duration;
/* responder burst period, indicate interval between two consecutive
* burst instances, in units of 100 ms
*/
__le16 responder_burst_period;
/* receive burst counter */
__le16 bursts_cnt;
/* tsf of responder start burst */
__le32 tsf_sync;
/* actual received ftm per burst */
u8 actual_ftm_per_burst;
u8 reserved0[7];
struct wmi_responder_ftm_res responder_ftm_res[0];
} __packed;
enum wmi_tof_channel_info_type {
WMI_TOF_CHANNEL_INFO_AOA = 0x00,
WMI_TOF_CHANNEL_INFO_LCI = 0x01,
WMI_TOF_CHANNEL_INFO_LCR = 0x02,
WMI_TOF_CHANNEL_INFO_VENDOR_SPECIFIC = 0x03,
WMI_TOF_CHANNEL_INFO_CIR = 0x04,
WMI_TOF_CHANNEL_INFO_RSSI = 0x05,
WMI_TOF_CHANNEL_INFO_SNR = 0x06,
WMI_TOF_CHANNEL_INFO_DEBUG = 0x07,
};
/* WMI_TOF_CHANNEL_INFO_EVENTID */
struct wmi_tof_channel_info_event {
/* FTM session ID */
__le32 session_id;
/* destination MAC address */
u8 dst_mac[WMI_MAC_LEN];
/* wmi_tof_channel_info_type_e */
u8 type;
/* data report length */
u8 len;
/* data report payload */
u8 report[0];
} __packed;
#endif /* __WILOCITY_WMI_H__ */

View File

@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
do {
/* Check if AMSDU can accommodate this MSDU */
if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
adapter->tx_buf_size)
break;
skb_src = skb_dequeue(&pra_list->skb_head);