Merge ath-next from ath.git.

Major changes in ath10k:

* enable VHT for IBSS
* initial work to support qca99x0 and the corresponding 10.4 firmware branch
This commit is contained in:
Kalle Valo 2015-07-21 11:36:56 +03:00
commit c538bb3b80
27 changed files with 3391 additions and 239 deletions

View File

@ -11,7 +11,8 @@ ath10k_core-y += mac.o \
wmi-tlv.o \
bmi.o \
hw.o \
p2p.o
p2p.o \
swap.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o

View File

@ -178,7 +178,7 @@ struct bmi_target_info {
};
/* in msec */
#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1

View File

@ -452,6 +452,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
struct ath10k *ar = ce_state->ar;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc *base = dest_ring->base_addr_owner_space;

View File

@ -21,7 +21,7 @@
#include "hif.h"
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 8
#define CE_COUNT_MAX 12
#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
@ -38,8 +38,13 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
#define CE_DESC_FLAGS_META_DATA_LSB 2
/* Following desc flags are used in QCA99X0 */
#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
#define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3)
#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb
struct ce_desc {
__le32 addr;
@ -423,8 +428,10 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
ar->regs->ce_wrap_intr_sum_host_msi_lsb
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
ar->regs->ce_wrap_intr_sum_host_msi_mask
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)

View File

@ -49,6 +49,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
.has_shifted_cc_wraparound = true,
.otp_exe_param = 0,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
.fw = QCA988X_HW_2_0_FW_FILE,
@ -63,6 +64,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
.otp_exe_param = 0,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
.fw = QCA6174_HW_2_1_FW_FILE,
@ -77,6 +79,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
.otp_exe_param = 0,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
.fw = QCA6174_HW_3_0_FW_FILE,
@ -91,6 +94,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
.otp_exe_param = 0,
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
@ -101,8 +105,68 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
.name = "qca99x0 hw2.0",
.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
.fw = {
.dir = QCA99X0_HW_2_0_FW_DIR,
.fw = QCA99X0_HW_2_0_FW_FILE,
.otp = QCA99X0_HW_2_0_OTP_FILE,
.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
},
};
static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
[ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
[ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
[ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
[ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
[ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
[ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
[ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
[ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
size_t buf_len,
enum ath10k_fw_features feat)
{
if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
WARN_ON(!ath10k_core_fw_feature_str[feat])) {
return scnprintf(buf, buf_len, "bit%d", feat);
}
return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
}
void ath10k_core_get_fw_features_str(struct ath10k *ar,
char *buf,
size_t buf_len)
{
unsigned int len = 0;
int i;
for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
if (test_bit(i, ar->fw_features)) {
if (len > 0)
len += scnprintf(buf + len, buf_len - len, ",");
len += ath10k_core_get_fw_feature_str(buf + len,
buf_len - len,
i);
}
}
}
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
@ -355,6 +419,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
int ret;
ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
@ -380,7 +445,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return ret;
}
ret = ath10k_bmi_execute(ar, address, 0, &result);
ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp (%d)\n", ret);
return ret;
@ -412,6 +477,13 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
data = ar->firmware_data;
data_len = ar->firmware_len;
mode_name = "normal";
ret = ath10k_swap_code_seg_configure(ar,
ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
if (ret) {
ath10k_err(ar, "failed to configure fw code swap: %d\n",
ret);
return ret;
}
break;
case ATH10K_FIRMWARE_MODE_UTF:
data = ar->testmode.utf->data;
@ -451,6 +523,8 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
ath10k_swap_code_seg_release(ar);
ar->board = NULL;
ar->board_data = NULL;
ar->board_len = 0;
@ -464,6 +538,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
ar->firmware_len = 0;
ar->cal_file = NULL;
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
@ -737,6 +812,13 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
ar->htt.op_version);
break;
case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw code swap image ie (%zd B)\n",
ie_len);
ar->swap.firmware_codeswap_data = data;
ar->swap.firmware_codeswap_len = ie_len;
break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
@ -1014,6 +1096,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
@ -1023,6 +1106,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PEER;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
@ -1033,6 +1117,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->max_num_peers = TARGET_10_4_NUM_PEERS;
ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PEER;
ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@ -1056,6 +1151,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1);
@ -1330,6 +1426,13 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
ret = ath10k_swap_code_seg_init(ar);
if (ret) {
ath10k_err(ar, "failed to initialize code swap segment: %d\n",
ret);
goto err_free_firmware_files;
}
mutex_lock(&ar->conf_mutex);
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@ -1470,9 +1573,15 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
switch (hw_rev) {
case ATH10K_HW_QCA988X:
ar->regs = &qca988x_regs;
ar->hw_values = &qca988x_values;
break;
case ATH10K_HW_QCA6174:
ar->regs = &qca6174_regs;
ar->hw_values = &qca6174_values;
break;
case ATH10K_HW_QCA99X0:
ar->regs = &qca99x0_regs;
ar->hw_values = &qca99x0_values;
break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",

View File

@ -36,6 +36,7 @@
#include "spectral.h"
#include "thermal.h"
#include "wow.h"
#include "swap.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@ -327,8 +328,8 @@ struct ath10k_vif {
u32 uapsd;
} sta;
struct {
/* 127 stations; wmi limit */
u8 tim_bitmap[16];
/* 512 stations */
u8 tim_bitmap[64];
u8 tim_len;
u32 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
@ -545,6 +546,7 @@ struct ath10k {
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
u32 max_spatial_stream;
/* protected by conf_mutex */
bool ani_enabled;
@ -560,6 +562,7 @@ struct ath10k {
struct completion target_suspend;
const struct ath10k_hw_regs *regs;
const struct ath10k_hw_values *hw_values;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
struct ath10k_htc htc;
@ -570,6 +573,7 @@ struct ath10k {
const char *name;
u32 patch_load_addr;
int uart_pin;
u32 otp_exe_param;
/* This is true if given HW chip has a quirky Cycle Counter
* wraparound which resets to 0x7fffffff instead of 0. All
@ -578,6 +582,12 @@ struct ath10k {
*/
bool has_shifted_cc_wraparound;
/* Some of chip expects fragment descriptor to be continuous
* memory for any TX operation. Set continuous_frag_desc flag
* for the hardware which have such requirement.
*/
bool continuous_frag_desc;
struct ath10k_hw_params_fw {
const char *dir;
const char *fw;
@ -602,6 +612,12 @@ struct ath10k {
const struct firmware *cal_file;
struct {
const void *firmware_codeswap_data;
size_t firmware_codeswap_len;
struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
} swap;
char spec_board_id[100];
bool spec_board_loaded;
@ -617,6 +633,7 @@ struct ath10k {
bool is_roc;
int vdev_id;
int roc_freq;
bool roc_notify;
} scan;
struct {
@ -675,6 +692,8 @@ struct ath10k {
int max_num_stations;
int max_num_vdevs;
int max_num_tdls_vdevs;
int num_active_peers;
int num_tids;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@ -749,6 +768,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
void ath10k_core_get_fw_features_str(struct ath10k *ar,
char *buf,
size_t max_len);
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);

View File

@ -124,7 +124,11 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
char fw_features[128];
ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
@ -137,8 +141,10 @@ void ath10k_print_driver_info(struct ath10k *ar)
ar->htt.target_version_major,
ar->htt.target_version_minor,
ar->wmi.op_version,
ar->htt.op_version,
ath10k_cal_mode_str(ar->cal_mode),
ar->max_num_stations);
ar->max_num_stations,
fw_features);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),

View File

@ -102,6 +102,43 @@ static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
};
static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
[HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
[HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
[HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
[HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
[HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
[HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
[HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
[HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
[HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
[HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
[HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
[HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
[HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
[HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
[HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
[HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
[HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
[HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
[HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
[HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
[HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
[HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
[HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
[HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
};
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
@ -147,6 +184,10 @@ int ath10k_htt_init(struct ath10k *ar)
2; /* ip4 dscp or ip6 priority */
switch (ar->htt.op_version) {
case ATH10K_FW_HTT_OP_VERSION_10_4:
ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
break;
case ATH10K_FW_HTT_OP_VERSION_10_1:
ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
@ -208,5 +249,9 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
if (status)
return status;
status = ath10k_htt_send_frag_desc_bank_cfg(htt);
if (status)
return status;
return ath10k_htt_send_rx_ring_cfg_ll(htt);
}

View File

@ -87,6 +87,11 @@ struct htt_data_tx_desc_frag {
__le32 len;
} __packed;
struct htt_msdu_ext_desc {
__le32 tso_flag[4];
struct htt_data_tx_desc_frag frags[6];
};
enum htt_data_tx_desc_flags0 {
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
@ -349,6 +354,38 @@ enum htt_tlv_t2h_msg_type {
HTT_TLV_T2H_NUM_MSGS
};
enum htt_10_4_t2h_msg_type {
HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0,
HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1,
HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2,
HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3,
HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6,
HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9,
HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb,
HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10,
HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11,
HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
HTT_10_4_T2H_MSG_TYPE_TEST = 0x13,
HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17,
HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
/* 0x19 to 0x2f are reserved */
HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30,
/* keep this last */
HTT_10_4_T2H_NUM_MSGS
};
enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_VERSION_CONF,
HTT_T2H_MSG_TYPE_RX_IND,
@ -375,6 +412,10 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_AGGR_CONF,
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
HTT_T2H_MSG_TYPE_TEST,
HTT_T2H_MSG_TYPE_EN_STATS,
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
/* keep this last */
HTT_T2H_NUM_MSGS
};
@ -1430,6 +1471,11 @@ struct ath10k_htt {
/* rx_status template */
struct ieee80211_rx_status rx_status;
struct {
dma_addr_t paddr;
struct htt_msdu_ext_desc *vaddr;
} frag_desc;
};
#define RX_HTT_HDR_STATUS_LEN 64
@ -1497,6 +1543,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,

View File

@ -1201,7 +1201,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
{
struct htt_rx_desc *rxd;
enum rx_msdu_decap_format decap;
struct ieee80211_hdr *hdr;
/* First msdu's decapped header:
* [802.11 header] <-- padded to 4 bytes long
@ -1215,7 +1214,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
*/
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
@ -2074,6 +2072,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
case HTT_T2H_MSG_TYPE_EN_STATS:
case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type);

View File

@ -84,6 +84,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int ret, size;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
@ -94,11 +95,31 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
idr_destroy(&htt->pending_tx);
return -ENOMEM;
ret = -ENOMEM;
goto free_idr_pending_tx;
}
if (!ar->hw_params.continuous_frag_desc)
goto skip_frag_desc_alloc;
size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
&htt->frag_desc.paddr,
GFP_DMA);
if (!htt->frag_desc.vaddr) {
ath10k_warn(ar, "failed to alloc fragment desc memory\n");
ret = -ENOMEM;
goto free_tx_pool;
}
skip_frag_desc_alloc:
return 0;
free_tx_pool:
dma_pool_destroy(htt->tx_pool);
free_idr_pending_tx:
idr_destroy(&htt->pending_tx);
return ret;
}
static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
@ -121,9 +142,18 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
int size;
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
dma_pool_destroy(htt->tx_pool);
if (htt->frag_desc.vaddr) {
size = htt->max_num_pending_tx *
sizeof(struct htt_msdu_ext_desc);
dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
htt->frag_desc.paddr);
}
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@ -201,6 +231,48 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
return 0;
}
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
int ret, size;
if (!ar->hw_params.continuous_frag_desc)
return 0;
if (!htt->frag_desc.paddr) {
ath10k_warn(ar, "invalid frag desc memory\n");
return -EINVAL;
}
size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
skb = ath10k_htc_alloc_skb(ar, size);
if (!skb)
return -ENOMEM;
skb_put(skb, size);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
cmd->frag_desc_bank_cfg.info = 0;
cmd->frag_desc_bank_cfg.num_banks = 1;
cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
__cpu_to_le32(htt->frag_desc.paddr);
cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
__cpu_to_le16(htt->max_num_pending_tx - 1);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
ret);
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;

View File

@ -34,8 +34,15 @@ const struct ath10k_hw_regs qca988x_regs = {
.ce7_base_address = 0x00059000,
.soc_reset_control_si0_rst_mask = 0x00000001,
.soc_reset_control_ce_rst_mask = 0x00040000,
.soc_chip_id_address = 0x00ec,
.scratch_3_address = 0x0030,
.soc_chip_id_address = 0x000000ec,
.scratch_3_address = 0x00000030,
.fw_indicator_address = 0x00009030,
.pcie_local_base_address = 0x00080000,
.ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
.ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
.pcie_intr_fw_mask = 0x00000400,
.pcie_intr_ce_mask_all = 0x0007f800,
.pcie_intr_clr_address = 0x00000014,
};
const struct ath10k_hw_regs qca6174_regs = {
@ -54,8 +61,79 @@ const struct ath10k_hw_regs qca6174_regs = {
.ce7_base_address = 0x00036000,
.soc_reset_control_si0_rst_mask = 0x00000000,
.soc_reset_control_ce_rst_mask = 0x00000001,
.soc_chip_id_address = 0x000f0,
.scratch_3_address = 0x0028,
.soc_chip_id_address = 0x000000f0,
.scratch_3_address = 0x00000028,
.fw_indicator_address = 0x0003a028,
.pcie_local_base_address = 0x00080000,
.ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
.ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
.pcie_intr_fw_mask = 0x00000400,
.pcie_intr_ce_mask_all = 0x0007f800,
.pcie_intr_clr_address = 0x00000014,
};
const struct ath10k_hw_regs qca99x0_regs = {
.rtc_state_cold_reset_mask = 0x00000400,
.rtc_soc_base_address = 0x00080000,
.rtc_wmac_base_address = 0x00000000,
.soc_core_base_address = 0x00082000,
.ce_wrapper_base_address = 0x0004d000,
.ce0_base_address = 0x0004a000,
.ce1_base_address = 0x0004a400,
.ce2_base_address = 0x0004a800,
.ce3_base_address = 0x0004ac00,
.ce4_base_address = 0x0004b000,
.ce5_base_address = 0x0004b400,
.ce6_base_address = 0x0004b800,
.ce7_base_address = 0x0004bc00,
/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
* CE0 and CE1 no other copy engine is directly referred in the code.
* It is not really neccessary to assign address for newly supported
* CEs in this address table.
* Copy Engine Address
* CE8 0x0004c000
* CE9 0x0004c400
* CE10 0x0004c800
* CE11 0x0004cc00
*/
.soc_reset_control_si0_rst_mask = 0x00000001,
.soc_reset_control_ce_rst_mask = 0x00000100,
.soc_chip_id_address = 0x000000ec,
.scratch_3_address = 0x00040050,
.fw_indicator_address = 0x00040050,
.pcie_local_base_address = 0x00000000,
.ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
.ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
.pcie_intr_fw_mask = 0x00100000,
.pcie_intr_ce_mask_all = 0x000fff00,
.pcie_intr_clr_address = 0x00000010,
};
const struct ath10k_hw_values qca988x_values = {
.rtc_state_val_on = 3,
.ce_count = 8,
.msi_assign_ce_max = 7,
.num_target_ce_config_wlan = 7,
.ce_desc_meta_data_mask = 0xFFFC,
.ce_desc_meta_data_lsb = 2,
};
const struct ath10k_hw_values qca6174_values = {
.rtc_state_val_on = 3,
.ce_count = 8,
.msi_assign_ce_max = 7,
.num_target_ce_config_wlan = 7,
.ce_desc_meta_data_mask = 0xFFFC,
.ce_desc_meta_data_lsb = 2,
};
const struct ath10k_hw_values qca99x0_values = {
.rtc_state_val_on = 5,
.ce_count = 12,
.msi_assign_ce_max = 12,
.num_target_ce_config_wlan = 10,
.ce_desc_meta_data_mask = 0xFFF0,
.ce_desc_meta_data_lsb = 4,
};
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,

View File

@ -72,6 +72,18 @@ enum qca6174_chip_id_rev {
#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
/* QCA99X0 1.0 definitions (unsupported) */
#define QCA99X0_HW_1_0_CHIP_ID_REV 0x0
/* QCA99X0 2.0 definitions */
#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
#define QCA99X0_HW_2_0_FW_FILE "firmware.bin"
#define QCA99X0_HW_2_0_OTP_FILE "otp.bin"
#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
@ -112,6 +124,9 @@ enum ath10k_fw_ie_type {
* FW API 5 and above.
*/
ATH10K_FW_IE_HTT_OP_VERSION = 6,
/* Code swap image for firmware binary */
ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
};
enum ath10k_fw_wmi_op_version {
@ -122,6 +137,7 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
ATH10K_FW_WMI_OP_VERSION_TLV = 4,
ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
/* keep last */
ATH10K_FW_WMI_OP_VERSION_MAX,
@ -137,6 +153,8 @@ enum ath10k_fw_htt_op_version {
ATH10K_FW_HTT_OP_VERSION_TLV = 3,
ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
/* keep last */
ATH10K_FW_HTT_OP_VERSION_MAX,
};
@ -144,6 +162,7 @@ enum ath10k_fw_htt_op_version {
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
ATH10K_HW_QCA99X0,
};
struct ath10k_hw_regs {
@ -164,16 +183,38 @@ struct ath10k_hw_regs {
u32 soc_reset_control_ce_rst_mask;
u32 soc_chip_id_address;
u32 scratch_3_address;
u32 fw_indicator_address;
u32 pcie_local_base_address;
u32 ce_wrap_intr_sum_host_msi_lsb;
u32 ce_wrap_intr_sum_host_msi_mask;
u32 pcie_intr_fw_mask;
u32 pcie_intr_ce_mask_all;
u32 pcie_intr_clr_address;
};
extern const struct ath10k_hw_regs qca988x_regs;
extern const struct ath10k_hw_regs qca6174_regs;
extern const struct ath10k_hw_regs qca99x0_regs;
struct ath10k_hw_values {
u32 rtc_state_val_on;
u8 ce_count;
u8 msi_assign_ce_max;
u8 num_target_ce_config_wlan;
u16 ce_desc_meta_data_mask;
u8 ce_desc_meta_data_lsb;
};
extern const struct ath10k_hw_values qca988x_values;
extern const struct ath10k_hw_values qca6174_values;
extern const struct ath10k_hw_values qca99x0_values;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
@ -310,8 +351,73 @@ enum ath10k_hw_rate_cck {
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
/* Diagnostic Window */
#define CE_DIAG_PIPE 7
#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
/* Target specific defines for 10.4 firmware */
#define TARGET_10_4_NUM_VDEVS 16
#define TARGET_10_4_NUM_STATIONS 32
#define TARGET_10_4_NUM_PEERS ((TARGET_10_4_NUM_STATIONS) + \
(TARGET_10_4_NUM_VDEVS))
#define TARGET_10_4_ACTIVE_PEERS 0
/* TODO: increase qcache max client limit to 512 after
* testing with 512 client.
*/
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 256
#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
#define TARGET_10_4_NUM_PEER_KEYS 2
#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
#define TARGET_10_4_AST_SKID_LIMIT 32
#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \
BIT(2) | BIT(3))
#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \
BIT(2) | BIT(3))
/* 100 ms for video, best-effort, and background */
#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100
/* 40 ms for voice */
#define TARGET_10_4_RX_TIMEOUT_HI_PRI 40
#define TARGET_10_4_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_10_4_SCAN_MAX_REQS 4
#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV 3
#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES 8
/* Note: mcast to ucast is disabled by default */
#define TARGET_10_4_NUM_MCAST_GROUPS 0
#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS 0
#define TARGET_10_4_MCAST2UCAST_MODE 0
#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
#define TARGET_10_4_NUM_WDS_ENTRIES 32
#define TARGET_10_4_DMA_BURST_SIZE 1
#define TARGET_10_4_MAC_AGGR_DELIM 0
#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10_4_VOW_CONFIG 0
#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3
#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10_4_11AC_TX_MAX_FRAGS 2
#define TARGET_10_4_MAX_PEER_EXT_STATS 16
#define TARGET_10_4_SMART_ANT_CAP 0
#define TARGET_10_4_BK_MIN_FREE 0
#define TARGET_10_4_BE_MIN_FREE 0
#define TARGET_10_4_VI_MIN_FREE 0
#define TARGET_10_4_VO_MIN_FREE 0
#define TARGET_10_4_RX_BATCH_MODE 1
#define TARGET_10_4_THERMAL_THROTTLING_CONFIG 0
#define TARGET_10_4_ATF_CONFIG 0
#define TARGET_10_4_IPHDR_PAD_CONFIG 1
#define TARGET_10_4_QWRAP_CONFIG 0
/* Number of Copy Engines supported */
#define CE_COUNT 8
#define CE_COUNT ar->hw_values->ce_count
/*
* Total number of PCIe MSI interrupts requested for all interrupt sources.
@ -335,10 +441,10 @@ enum ath10k_hw_rate_cck {
/* MSIs for Copy Engines */
#define MSI_ASSIGN_CE_INITIAL 1
#define MSI_ASSIGN_CE_MAX 7
#define MSI_ASSIGN_CE_MAX ar->hw_values->msi_assign_ce_max
/* as of IP3.7.1 */
#define RTC_STATE_V_ON 3
#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on
#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
#define RTC_STATE_V_LSB 0
@ -374,7 +480,7 @@ enum ath10k_hw_rate_cck {
#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
#define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
#define PCIE_LOCAL_BASE_ADDRESS ar->regs->pcie_local_base_address
#define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
@ -448,7 +554,7 @@ enum ath10k_hw_rate_cck {
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014
#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010
@ -456,16 +562,18 @@ enum ath10k_hw_rate_cck {
#define CCNT_TO_MSEC(x) ((x) / 88000)
/* Firmware indications to the Host via SCRATCH_3 register. */
#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address
#define FW_IND_EVENT_PENDING 1
#define FW_IND_INITIALIZED 2
/* HOST_REG interrupt from firmware */
#define PCIE_INTR_FIRMWARE_MASK 0x00000400
#define PCIE_INTR_CE_MASK_ALL 0x0007f800
#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask
#define PCIE_INTR_CE_MASK_ALL ar->regs->pcie_intr_ce_mask_all
#define DRAM_BASE_ADDRESS 0x00400000
#define PCIE_BAR_REG_ADDRESS 0x40030
#define MISSING 0
#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET

View File

@ -1668,7 +1668,7 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
return 0;
}
static int ath10k_mac_ps_vif_count(struct ath10k *ar)
static int ath10k_mac_num_vifs_started(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int num = 0;
@ -1676,7 +1676,7 @@ static int ath10k_mac_ps_vif_count(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list)
if (arvif->ps)
if (arvif->is_started)
num++;
return num;
@ -1700,7 +1700,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
enable_ps = arvif->ps;
if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
!test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
ar->fw_features)) {
ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
@ -3034,38 +3034,16 @@ static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->htt.tx_lock);
switch (pause_id) {
case WMI_TLV_TX_PAUSE_ID_MCC:
case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
case WMI_TLV_TX_PAUSE_ID_AP_PS:
case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
switch (action) {
case WMI_TLV_TX_PAUSE_ACTION_STOP:
ath10k_mac_vif_tx_lock(arvif, pause_id);
break;
case WMI_TLV_TX_PAUSE_ACTION_WAKE:
ath10k_mac_vif_tx_unlock(arvif, pause_id);
break;
default:
ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
action, arvif->vdev_id);
break;
}
switch (action) {
case WMI_TLV_TX_PAUSE_ACTION_STOP:
ath10k_mac_vif_tx_lock(arvif, pause_id);
break;
case WMI_TLV_TX_PAUSE_ACTION_WAKE:
ath10k_mac_vif_tx_unlock(arvif, pause_id);
break;
case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
case WMI_TLV_TX_PAUSE_ID_HOST:
default:
/* FIXME: Some pause_ids aren't vdev specific. Instead they
* target peer_id and tid. Implementing these could improve
* traffic scheduling fairness across multiple connected
* stations in AP/IBSS modes.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac ignoring unsupported tx pause vdev %i id %d\n",
arvif->vdev_id, pause_id);
ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
action, arvif->vdev_id);
break;
}
}
@ -3082,12 +3060,15 @@ static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_mac_tx_pause *arg = data;
if (arvif->vdev_id != arg->vdev_id)
return;
ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
}
void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
enum wmi_tlv_tx_pause_id pause_id,
enum wmi_tlv_tx_pause_action action)
void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
enum wmi_tlv_tx_pause_id pause_id,
enum wmi_tlv_tx_pause_action action)
{
struct ath10k_mac_tx_pause arg = {
.vdev_id = vdev_id,
@ -3449,14 +3430,13 @@ void __ath10k_scan_finish(struct ath10k *ar)
case ATH10K_SCAN_IDLE:
break;
case ATH10K_SCAN_RUNNING:
if (ar->scan.is_roc)
ieee80211_remain_on_channel_expired(ar->hw);
/* fall through */
case ATH10K_SCAN_ABORTING:
if (!ar->scan.is_roc)
ieee80211_scan_completed(ar->hw,
(ar->scan.state ==
ATH10K_SCAN_ABORTING));
else if (ar->scan.roc_notify)
ieee80211_remain_on_channel_expired(ar->hw);
/* fall through */
case ATH10K_SCAN_STARTING:
ar->scan.state = ATH10K_SCAN_IDLE;
@ -4641,9 +4621,6 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH10K_SCAN_ID;
if (!req->no_cck)
arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
if (req->ie_len) {
arg.ie_len = req->ie_len;
memcpy(arg.ie, req->ie, arg.ie_len);
@ -5462,6 +5439,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ar->scan.is_roc = true;
ar->scan.vdev_id = arvif->vdev_id;
ar->scan.roc_freq = chan->center_freq;
ar->scan.roc_notify = true;
ret = 0;
break;
case ATH10K_SCAN_STARTING:
@ -5525,7 +5503,13 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
ar->scan.roc_notify = false;
spin_unlock_bh(&ar->data_lock);
ath10k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
@ -5566,7 +5550,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct ath10k *ar = hw->priv;
bool skip;
int ret;
long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs */
@ -5578,7 +5562,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (ar->state == ATH10K_STATE_WEDGED)
goto skip;
ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
@ -5592,9 +5576,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
(empty || skip);
}), ATH10K_FLUSH_TIMEOUT_HZ);
if (ret <= 0 || skip)
ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
skip, ar->state, ret);
if (time_left == 0 || skip)
ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
skip, ar->state, time_left);
skip:
mutex_unlock(&ar->conf_mutex);
@ -6219,6 +6203,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = true;
ret = ath10k_mac_vif_setup_ps(arvif);
if (ret) {
ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
arvif->vdev_id, ret);
goto err_stop;
}
if (vif->type == NL80211_IFTYPE_MONITOR) {
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
if (ret) {
@ -6236,6 +6227,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
err_stop:
ath10k_vdev_stop(arvif);
arvif->is_started = false;
ath10k_mac_vif_setup_ps(arvif);
err:
mutex_unlock(&ar->conf_mutex);
@ -6565,8 +6557,11 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
{
.max = 2,
.types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 2,
.types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
},
@ -6576,6 +6571,26 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
},
};
static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
{
.max = 2,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 2,
.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
},
};
static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
{
.max = 1,
@ -6594,7 +6609,7 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
{
.limits = ath10k_tlv_if_limit,
.num_different_channels = 1,
.max_interfaces = 3,
.max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
},
{
@ -6608,10 +6623,16 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
{
.limits = ath10k_tlv_if_limit,
.num_different_channels = 2,
.max_interfaces = 3,
.num_different_channels = 1,
.max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
},
{
.limits = ath10k_tlv_qcs_if_limit,
.num_different_channels = 2,
.max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
},
{
.limits = ath10k_tlv_if_limit_ibss,
.num_different_channels = 1,
@ -6620,6 +6641,33 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
},
};
static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
{
.max = 1,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 16,
.types = BIT(NL80211_IFTYPE_AP)
},
};
static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
{
.limits = ath10k_10_4_if_limits,
.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
.max_interfaces = 16,
.num_different_channels = 1,
.beacon_int_infra_match = true,
#ifdef CONFIG_ATH10K_DFS_CERTIFIED
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
#endif
},
};
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
@ -6902,6 +6950,8 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
@ -6941,6 +6991,11 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10x_if_comb);
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10_4_if_comb);
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1);

View File

@ -61,9 +61,9 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
enum wmi_tlv_tx_pause_id pause_id,
enum wmi_tlv_tx_pause_action action);
void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
enum wmi_tlv_tx_pause_id pause_id,
enum wmi_tlv_tx_pause_action action);
u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 hw_rate);

View File

@ -59,6 +59,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define QCA988X_2_0_DEVICE_ID (0x003c)
#define QCA6174_2_1_DEVICE_ID (0x003e)
#define QCA99X0_2_0_DEVICE_ID (0x0040)
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
@ -81,7 +82,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar);
static int ath10k_pci_warm_reset(struct ath10k *ar);
static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
@ -90,6 +91,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@ -155,6 +157,38 @@ static const struct ce_attr host_ce_config_wlan[] = {
.src_sz_max = DIAG_TRANSFER_LIMIT,
.dest_nentries = 2,
},
/* CE8: target->host pktlog */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 128,
},
/* CE9 target autonomous qcache memcpy */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE10: target autonomous hif memcpy */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE11: target autonomous hif memcpy */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
};
/* Target firmware's Copy Engine configuration. */
@ -232,6 +266,38 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(0),
.nbytes_max = __cpu_to_le32(0),
.flags = __cpu_to_le32(0),
.reserved = __cpu_to_le32(0),
},
/* CE8 target->host packtlog */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(64),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE9 target autonomous qcache memcpy */
{
.pipenum = __cpu_to_le32(9),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* It not necessary to send target wlan configuration for CE10 & CE11
* as these CEs are not actively used in target.
*/
};
/*
@ -479,6 +545,12 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
offset, offset + sizeof(value), ar_pci->mem_len);
return;
}
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
@ -496,6 +568,12 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
u32 val;
int ret;
if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
offset, offset + sizeof(val), ar_pci->mem_len);
return 0;
}
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
@ -678,6 +756,26 @@ static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
ath10k_pci_rx_post(ar);
}
static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0;
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA6174:
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS) &
0x7ff) << 21;
break;
case ATH10K_HW_QCA99X0:
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
break;
}
val |= 0x100000 | (addr & 0xfffff);
return val;
}
/*
* Diagnostic read/write access is provided for startup/config/debug usage.
* Caller must guarantee proper alignment, when applicable, and single user
@ -740,8 +838,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* convert it from Target CPU virtual address space
* to CE address space
*/
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
address);
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
0);
@ -899,7 +996,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* to
* CE address space
*/
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
remaining_bytes = orig_nbytes;
ce_data = ce_data_base;
@ -1331,20 +1428,42 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
u32 val;
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA6174:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS);
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
/* TODO: Find appropriate register configuration for QCA99X0
* to mask irq/MSI.
*/
break;
}
}
static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
{
u32 val;
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
val |= CORE_CTRL_PCIE_REG_31_MASK;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA6174:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS);
val |= CORE_CTRL_PCIE_REG_31_MASK;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
/* TODO: Find appropriate register configuration for QCA99X0
* to unmask irq/MSI.
*/
break;
}
}
static void ath10k_pci_irq_disable(struct ath10k *ar)
@ -1506,7 +1625,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
* masked. To prevent the device from asserting the interrupt reset it
* before proceeding with cleanup.
*/
ath10k_pci_warm_reset(ar);
ath10k_pci_safe_chip_reset(ar);
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
@ -1687,6 +1806,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (ar_pci->pdev->device) {
case QCA988X_2_0_DEVICE_ID:
case QCA99X0_2_0_DEVICE_ID:
return 1;
case QCA6174_2_1_DEVICE_ID:
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
@ -1757,7 +1877,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
target_ce_config_wlan,
sizeof(target_ce_config_wlan));
sizeof(struct ce_pipe_config) *
NUM_TARGET_CE_CONFIG_WLAN);
if (ret != 0) {
ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
@ -1871,7 +1992,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar)
}
/* Last CE is Diagnostic Window */
if (i == CE_COUNT - 1) {
if (i == CE_DIAG_PIPE) {
ar_pci->ce_diag = pipe->ce_hdl;
continue;
}
@ -2016,6 +2137,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0;
}
static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
{
if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
return ath10k_pci_warm_reset(ar);
} else if (QCA_REV_99X0(ar)) {
ath10k_pci_irq_disable(ar);
return ath10k_pci_qca99x0_chip_reset(ar);
} else {
return -ENOTSUPP;
}
}
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
{
int i, ret;
@ -2122,12 +2255,38 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
return 0;
}
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
ret = ath10k_pci_cold_reset(ar);
if (ret) {
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
return ret;
}
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
ret);
return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
return 0;
}
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
if (QCA_REV_988X(ar))
return ath10k_pci_qca988x_chip_reset(ar);
else if (QCA_REV_6174(ar))
return ath10k_pci_qca6174_chip_reset(ar);
else if (QCA_REV_99X0(ar))
return ath10k_pci_qca99x0_chip_reset(ar);
else
return -ENOTSUPP;
}
@ -2679,6 +2838,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
pci_set_master(pdev);
/* Arrange for access to Target SoC registers. */
ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
if (!ar_pci->mem) {
ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
@ -2745,6 +2905,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
break;
default:
WARN_ON(1);
return -ENOTSUPP;

View File

@ -162,6 +162,7 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
size_t mem_len;
/*
* Number of MSI interrupts granted, 0 --> using legacy PCI line
@ -236,18 +237,6 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define CDC_WAR_MAGIC_STR 0xceef0000
#define CDC_WAR_DATA_CE 4
/*
* TODO: Should be a function call specific to each Target-type.
* This convoluted macro converts from Target CPU Virtual Address Space to CE
* Address Space. As part of this process, we conservatively fetch the current
* PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
* for this device; but that's not guaranteed.
*/
#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
(((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \
CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
0x100000 | ((addr) & 0xfffff))
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10

View File

@ -0,0 +1,208 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file has implementation for code swap logic. With code swap feature,
* target can run the fw binary with even smaller IRAM size by using host
* memory to store some of the code segments.
*/
#include "core.h"
#include "bmi.h"
#include "debug.h"
static int ath10k_swap_code_seg_fill(struct ath10k *ar,
struct ath10k_swap_code_seg_info *seg_info,
const void *data, size_t data_len)
{
u8 *virt_addr = seg_info->virt_address[0];
u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
const u8 *fw_data = data;
union ath10k_swap_code_seg_item *swap_item;
u32 length = 0;
u32 payload_len;
u32 total_payload_len = 0;
u32 size_left = data_len;
/* Parse swap bin and copy the content to host allocated memory.
* The format is Address, length and value. The last 4-bytes is
* target write address. Currently address field is not used.
*/
seg_info->target_addr = -1;
while (size_left >= sizeof(*swap_item)) {
swap_item = (union ath10k_swap_code_seg_item *)fw_data;
payload_len = __le32_to_cpu(swap_item->tlv.length);
if ((payload_len > size_left) ||
(payload_len == 0 &&
size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
payload_len);
return -EINVAL;
}
if (payload_len == 0) {
if (memcmp(swap_item->tail.magic_signature, swap_magic,
ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
ath10k_err(ar, "refusing an invalid swap file\n");
return -EINVAL;
}
seg_info->target_addr =
__le32_to_cpu(swap_item->tail.bmi_write_addr);
break;
}
memcpy(virt_addr, swap_item->tlv.data, payload_len);
virt_addr += payload_len;
length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv);
size_left -= length;
fw_data += length;
total_payload_len += payload_len;
}
if (seg_info->target_addr == -1) {
ath10k_err(ar, "failed to parse invalid swap file\n");
return -EINVAL;
}
seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
return 0;
}
static void
ath10k_swap_code_seg_free(struct ath10k *ar,
struct ath10k_swap_code_seg_info *seg_info)
{
u32 seg_size;
if (!seg_info)
return;
if (!seg_info->virt_address[0])
return;
seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
seg_info->paddr[0]);
}
static struct ath10k_swap_code_seg_info *
ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
{
struct ath10k_swap_code_seg_info *seg_info;
void *virt_addr;
dma_addr_t paddr;
swap_bin_len = roundup(swap_bin_len, 2);
if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
return NULL;
}
seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
if (!seg_info)
return NULL;
virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
GFP_KERNEL);
if (!virt_addr) {
ath10k_err(ar, "failed to allocate dma coherent memory\n");
return NULL;
}
seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
seg_info->seg_hw_info.num_segs =
__cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
seg_info->virt_address[0] = virt_addr;
seg_info->paddr[0] = paddr;
return seg_info;
}
int ath10k_swap_code_seg_configure(struct ath10k *ar,
enum ath10k_swap_code_seg_bin_type type)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info = NULL;
switch (type) {
case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
if (!ar->swap.firmware_swap_code_seg_info)
return 0;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
seg_info = ar->swap.firmware_swap_code_seg_info;
break;
default:
case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
type);
return 0;
}
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
&seg_info->seg_hw_info,
sizeof(seg_info->seg_hw_info));
if (ret) {
ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
ret);
return ret;
}
return 0;
}
void ath10k_swap_code_seg_release(struct ath10k *ar)
{
ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
ar->swap.firmware_codeswap_data = NULL;
ar->swap.firmware_codeswap_len = 0;
ar->swap.firmware_swap_code_seg_info = NULL;
}
int ath10k_swap_code_seg_init(struct ath10k *ar)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info;
if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
return 0;
seg_info = ath10k_swap_code_seg_alloc(ar,
ar->swap.firmware_codeswap_len);
if (!seg_info) {
ath10k_err(ar, "failed to allocate fw code swap segment\n");
return -ENOMEM;
}
ret = ath10k_swap_code_seg_fill(ar, seg_info,
ar->swap.firmware_codeswap_data,
ar->swap.firmware_codeswap_len);
if (ret) {
ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
ret);
ath10k_swap_code_seg_free(ar, seg_info);
return ret;
}
ar->swap.firmware_swap_code_seg_info = seg_info;
return 0;
}

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _SWAP_H_
#define _SWAP_H_
#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX (512 * 1024)
#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ 12
#define ATH10K_SWAP_CODE_SEG_NUM_MAX 16
/* Currently only one swap segment is supported */
#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
u8 data[0];
} __packed;
struct ath10k_swap_code_seg_tail {
u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
__le32 bmi_write_addr;
} __packed;
union ath10k_swap_code_seg_item {
struct ath10k_swap_code_seg_tlv tlv;
struct ath10k_swap_code_seg_tail tail;
} __packed;
enum ath10k_swap_code_seg_bin_type {
ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
};
struct ath10k_swap_code_seg_hw_info {
/* Swap binary image size */
__le32 swap_size;
__le32 num_segs;
/* Swap data size */
__le32 size;
__le32 size_log2;
__le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
__le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
} __packed;
struct ath10k_swap_code_seg_info {
struct ath10k_swap_code_seg_hw_info seg_hw_info;
void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
u32 target_addr;
dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
};
int ath10k_swap_code_seg_configure(struct ath10k *ar,
enum ath10k_swap_code_seg_bin_type type);
void ath10k_swap_code_seg_release(struct ath10k *ar);
int ath10k_swap_code_seg_init(struct ath10k *ar);
#endif

View File

@ -450,4 +450,7 @@ Fw Mode/SubMode Mask
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
#define QCA99X0_BOARD_DATA_SZ 12288
#define QCA99X0_BOARD_EXT_DATA_SZ 0
#endif /* __TARGADDRS_H__ */

View File

@ -147,9 +147,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
const u8 *addr, bool expect_mapped)
{
int ret;
long time_left;
ret = wait_event_timeout(ar->peer_mapping_wq, ({
time_left = wait_event_timeout(ar->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ar->data_lock);
@ -160,7 +160,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
}), 3*HZ);
if (ret <= 0)
if (time_left == 0)
return -ETIMEDOUT;
return 0;

View File

@ -377,12 +377,34 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
"wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
pause_id, action, vdev_map, peer_id, tid_map);
for (vdev_id = 0; vdev_map; vdev_id++) {
if (!(vdev_map & BIT(vdev_id)))
continue;
switch (pause_id) {
case WMI_TLV_TX_PAUSE_ID_MCC:
case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
case WMI_TLV_TX_PAUSE_ID_AP_PS:
case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
for (vdev_id = 0; vdev_map; vdev_id++) {
if (!(vdev_map & BIT(vdev_id)))
continue;
vdev_map &= ~BIT(vdev_id);
ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
vdev_map &= ~BIT(vdev_id);
ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
action);
}
break;
case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
case WMI_TLV_TX_PAUSE_ID_HOST:
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac ignoring unsupported tx pause id %d\n",
pause_id);
break;
default:
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac ignoring unknown tx pause vdev %d\n",
pause_id);
break;
}
kfree(tb);
@ -709,6 +731,8 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_swba_parse *swba = data;
struct wmi_tim_info_arg *tim_info_arg;
const struct wmi_tim_info *tim_info_ev = ptr;
if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
return -EPROTO;
@ -716,7 +740,21 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
return -ENOBUFS;
swba->arg->tim_info[swba->n_tim++] = ptr;
if (__le32_to_cpu(tim_info_ev->tim_len) >
sizeof(tim_info_ev->tim_bitmap)) {
ath10k_warn(ar, "refusing to parse invalid swba structure\n");
return -EPROTO;
}
tim_info_arg = &swba->arg->tim_info[swba->n_tim];
tim_info_arg->tim_len = tim_info_ev->tim_len;
tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
tim_info_arg->tim_changed = tim_info_ev->tim_changed;
tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
swba->n_tim++;
return 0;
}
@ -3151,6 +3189,38 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
.nan_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@ -3204,6 +3274,48 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@ -3262,6 +3374,22 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_TLV_VDEV_PARAM_UNSUPPORTED,
.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
};
static const struct wmi_ops wmi_tlv_ops = {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -273,7 +273,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
tmp_false_count++;
}
}
if (ps.count < min_count)
if (ps.count <= min_count)
/* did not reach minimum count, drop sequence */
continue;

View File

@ -736,6 +736,92 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
return rc;
}
/* internal functions for device reset and starting AP */
static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
size_t probe_ies_len, const u8 *probe_ies,
size_t assoc_ies_len, const u8 *assoc_ies)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
/* FW do not form regular beacon, so bcon IE's are not set
* For the DMG bcon, when it will be supported, bcon IE's will
* be reused; add something like:
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies);
if (rc) {
wil_err(wil, "set_ie(PROBE_RESP) failed\n");
return rc;
}
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies);
if (rc) {
wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
return rc;
}
return 0;
}
static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
const u8 *ssid, size_t ssid_len, u32 privacy,
int bi, u8 chan,
size_t probe_ies_len, const u8 *probe_ies,
size_t assoc_ies_len, const u8 *assoc_ies,
u8 hidden_ssid)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
__wil_down(wil);
rc = __wil_up(wil);
if (rc)
goto out;
rc = wmi_set_ssid(wil, ssid_len, ssid);
if (rc)
goto out;
rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies,
assoc_ies_len, assoc_ies);
if (rc)
goto out;
wil->privacy = privacy;
wil->channel = chan;
wil->hidden_ssid = hidden_ssid;
netif_carrier_on(ndev);
rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
if (rc)
goto err_pcp_start;
rc = wil_bcast_init(wil);
if (rc)
goto err_bcast;
goto out; /* success */
err_bcast:
wmi_pcp_stop(wil);
err_pcp_start:
netif_carrier_off(ndev);
out:
mutex_unlock(&wil->mutex);
return rc;
}
static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_beacon_data *bcon)
@ -746,6 +832,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
const u8 *pr_ies = NULL;
size_t pr_ies_len = 0;
int rc;
u32 privacy = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
@ -760,40 +847,41 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
}
/* FW do not form regular beacon, so bcon IE's are not set
* For the DMG bcon, when it will be supported, bcon IE's will
* be reused; add something like:
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
if (rc) {
wil_err(wil, "set_ie(PROBE_RESP) failed\n");
return rc;
if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len))
privacy = 1;
/* in case privacy has changed, need to restart the AP */
if (wil->privacy != privacy) {
struct wireless_dev *wdev = ndev->ieee80211_ptr;
wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
wil->privacy, privacy);
rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
wdev->ssid_len, privacy,
wdev->beacon_interval,
wil->channel, pr_ies_len, pr_ies,
bcon->assocresp_ies_len,
bcon->assocresp_ies,
wil->hidden_ssid);
} else {
rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies,
bcon->assocresp_ies_len,
bcon->assocresp_ies);
}
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
bcon->assocresp_ies_len,
bcon->assocresp_ies);
if (rc) {
wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
return rc;
}
return 0;
return rc;
}
static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_ap_settings *info)
{
int rc = 0;
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct ieee80211_channel *channel = info->chandef.chan;
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
const u8 *pr_ies = NULL;
@ -807,6 +895,23 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
switch (info->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
break;
case NL80211_HIDDEN_SSID_ZERO_LEN:
hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
break;
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
break;
default:
wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
return -EOPNOTSUPP;
}
wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
channel->center_freq, info->privacy ? "secure" : "open");
wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
@ -830,70 +935,14 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
}
wil_set_recovery_state(wil, fw_recovery_idle);
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
pr_ies_len, pr_ies,
bcon->assocresp_ies_len,
bcon->assocresp_ies,
hidden_ssid);
mutex_lock(&wil->mutex);
__wil_down(wil);
rc = __wil_up(wil);
if (rc)
goto out;
rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
if (rc)
goto out;
/* IE's */
/* bcon 'head IE's are not relevant for 60g band */
/*
* FW do not form regular beacon, so bcon IE's are not set
* For the DMG bcon, when it will be supported, bcon IE's will
* be reused; add something like:
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
bcon->assocresp_ies);
wil->privacy = info->privacy;
switch (info->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
break;
case NL80211_HIDDEN_SSID_ZERO_LEN:
hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
break;
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
break;
default:
rc = -EOPNOTSUPP;
goto out;
}
netif_carrier_on(ndev);
rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
channel->hw_value, hidden_ssid);
if (rc)
goto err_pcp_start;
rc = wil_bcast_init(wil);
if (rc)
goto err_bcast;
goto out; /* success */
err_bcast:
wmi_pcp_stop(wil);
err_pcp_start:
netif_carrier_off(ndev);
out:
mutex_unlock(&wil->mutex);
return rc;
}

View File

@ -559,6 +559,8 @@ struct wil6210_priv {
/* profile */
u32 monitor_flags;
u32 privacy; /* secure connection? */
u8 hidden_ssid; /* relevant in AP mode */
u16 channel; /* relevant in AP mode */
int sinfo_gen;
u32 ap_isolate; /* no intra-BSS communication */
/* interrupt moderation */