mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
Fourth set of iwlwifi patches intended for 4.20
* Support for a new scan type; * Clean-up in the queue handling code; * A few bug fixes; -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAlvAXKkACgkQoUecoho8 xfqiVBAAu3plnGNNclDGmclBpDXydbUn+gDlxTtR/W7p5lGMK6bDr1Y2Wrn9S1KW aDD7YS4DSp60DW5gAUPypKByAHpY7XK1QQ3tyeNy8Qfw2rW6+XYw5HC7vWMgHwvD cbVZsKV+VdAfP38BD7vfZXkLhAzIG58jYPoOHhZ7mHgRkndXaNwPgn+cN9bFIIEX wWI3SwvfUalM5SeHnjURlxO18kg3kcc+O8hFslwVA/IvKECNrrtqvcB4rcYGwWBh py4o6kc3m/pLW055XEEhJWi5quu2hIKA0z9NYqsPhMjdtpSVZLhvwRlrbStW0LcF qFH3f+rkl1Acp7SSZtScz8Vyor4v9grOTt2OtZJg0nIWEbu9hjl72KasxvCTECBd P7HjO4dbogUSEtDIdazyS2X9Gx2tnMOXe7g9xXzyyLoo8gNfnhTR8+IldjbjjmhE UrAKsxn1nQocsTht2hm04K646beeitXrs+/EKwOwaLwrJhRedPI79PVLxR5kvPiA +JYf9xHO4+usv/MVCrdni8rwup3i3gwjRZKQPwY3rNA/ec+2txvFP7tIfosUzCm3 KNjX1n6hj2hGIEVTNGn9oYjW6BJcWTPGCRbuPCII4GVFnc/V2g4X6SD6IP8igUmB Arns9ENTP9a9OJH10ENxVmU2EEtGfqGs1c/1/yufMtQeGm2sEUc= =etyT -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2018-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next Fourth set of iwlwifi patches intended for 4.20 * Support for a new scan type; * Clean-up in the queue handling code; * A few bug fixes;
This commit is contained in:
commit
12f7a18674
@ -1154,14 +1154,14 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
|
|||||||
}
|
}
|
||||||
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
|
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
|
||||||
|
|
||||||
void iwl_fw_error_dump_wk(struct work_struct *work)
|
/* this function assumes dump_start was called beforehand and dump_end will be
|
||||||
|
* called afterwards
|
||||||
|
*/
|
||||||
|
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
|
||||||
{
|
{
|
||||||
struct iwl_fw_runtime *fwrt =
|
|
||||||
container_of(work, struct iwl_fw_runtime, dump.wk.work);
|
|
||||||
struct iwl_fw_dbg_params params = {0};
|
struct iwl_fw_dbg_params params = {0};
|
||||||
|
|
||||||
if (fwrt->ops && fwrt->ops->dump_start &&
|
if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
|
||||||
fwrt->ops->dump_start(fwrt->ops_ctx))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (fwrt->ops && fwrt->ops->fw_running &&
|
if (fwrt->ops && fwrt->ops->fw_running &&
|
||||||
@ -1169,7 +1169,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
|
|||||||
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
|
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
|
||||||
iwl_fw_free_dump_desc(fwrt);
|
iwl_fw_free_dump_desc(fwrt);
|
||||||
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
|
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
|
||||||
goto out;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
iwl_fw_dbg_stop_recording(fwrt, ¶ms);
|
iwl_fw_dbg_stop_recording(fwrt, ¶ms);
|
||||||
@ -1183,7 +1183,20 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
|
|||||||
udelay(500);
|
udelay(500);
|
||||||
iwl_fw_dbg_restart_recording(fwrt, ¶ms);
|
iwl_fw_dbg_restart_recording(fwrt, ¶ms);
|
||||||
}
|
}
|
||||||
out:
|
}
|
||||||
|
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
|
||||||
|
|
||||||
|
void iwl_fw_error_dump_wk(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct iwl_fw_runtime *fwrt =
|
||||||
|
container_of(work, struct iwl_fw_runtime, dump.wk.work);
|
||||||
|
|
||||||
|
if (fwrt->ops && fwrt->ops->dump_start &&
|
||||||
|
fwrt->ops->dump_start(fwrt->ops_ctx))
|
||||||
|
return;
|
||||||
|
|
||||||
|
iwl_fw_dbg_collect_sync(fwrt);
|
||||||
|
|
||||||
if (fwrt->ops && fwrt->ops->dump_end)
|
if (fwrt->ops && fwrt->ops->dump_end)
|
||||||
fwrt->ops->dump_end(fwrt->ops_ctx);
|
fwrt->ops->dump_end(fwrt->ops_ctx);
|
||||||
}
|
}
|
||||||
|
@ -367,4 +367,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
|
|||||||
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
||||||
|
|
||||||
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
|
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
|
||||||
|
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
|
||||||
#endif /* __iwl_fw_dbg_h__ */
|
#endif /* __iwl_fw_dbg_h__ */
|
||||||
|
@ -30,38 +30,20 @@
|
|||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
#define TRACE_SYSTEM iwlwifi_data
|
#define TRACE_SYSTEM iwlwifi_data
|
||||||
|
|
||||||
TRACE_EVENT(iwlwifi_dev_tx_data,
|
TRACE_EVENT(iwlwifi_dev_tx_tb,
|
||||||
TP_PROTO(const struct device *dev,
|
TP_PROTO(const struct device *dev, struct sk_buff *skb,
|
||||||
struct sk_buff *skb, u8 hdr_len),
|
u8 *data_src, size_t data_len),
|
||||||
TP_ARGS(dev, skb, hdr_len),
|
TP_ARGS(dev, skb, data_src, data_len),
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
DEV_ENTRY
|
DEV_ENTRY
|
||||||
|
|
||||||
__dynamic_array(u8, data,
|
__dynamic_array(u8, data,
|
||||||
iwl_trace_data(skb) ? skb->len - hdr_len : 0)
|
iwl_trace_data(skb) ? data_len : 0)
|
||||||
),
|
),
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
DEV_ASSIGN;
|
DEV_ASSIGN;
|
||||||
if (iwl_trace_data(skb))
|
if (iwl_trace_data(skb))
|
||||||
skb_copy_bits(skb, hdr_len,
|
memcpy(__get_dynamic_array(data), data_src, data_len);
|
||||||
__get_dynamic_array(data),
|
|
||||||
skb->len - hdr_len);
|
|
||||||
),
|
|
||||||
TP_printk("[%s] TX frame data", __get_str(dev))
|
|
||||||
);
|
|
||||||
|
|
||||||
TRACE_EVENT(iwlwifi_dev_tx_tso_chunk,
|
|
||||||
TP_PROTO(const struct device *dev,
|
|
||||||
u8 *data_src, size_t data_len),
|
|
||||||
TP_ARGS(dev, data_src, data_len),
|
|
||||||
TP_STRUCT__entry(
|
|
||||||
DEV_ENTRY
|
|
||||||
|
|
||||||
__dynamic_array(u8, data, data_len)
|
|
||||||
),
|
|
||||||
TP_fast_assign(
|
|
||||||
DEV_ASSIGN;
|
|
||||||
memcpy(__get_dynamic_array(data), data_src, data_len);
|
|
||||||
),
|
),
|
||||||
TP_printk("[%s] TX frame data", __get_str(dev))
|
TP_printk("[%s] TX frame data", __get_str(dev))
|
||||||
);
|
);
|
||||||
|
@ -722,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
|||||||
{
|
{
|
||||||
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
|
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
|
||||||
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
|
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
|
||||||
|
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
|
||||||
|
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
|
||||||
struct wowlan_key_data key_data = {
|
struct wowlan_key_data key_data = {
|
||||||
.configure_keys = !d0i3,
|
.configure_keys = !d0i3 && !unified,
|
||||||
.use_rsc_tsc = false,
|
.use_rsc_tsc = false,
|
||||||
.tkip = &tkip_cmd,
|
.tkip = &tkip_cmd,
|
||||||
.use_tkip = false,
|
.use_tkip = false,
|
||||||
@ -1636,32 +1638,10 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct iwl_wowlan_status *
|
static struct iwl_wowlan_status *
|
||||||
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
|
||||||
{
|
{
|
||||||
u32 base = mvm->error_event_table[0];
|
|
||||||
struct error_table_start {
|
|
||||||
/* cf. struct iwl_error_event_table */
|
|
||||||
u32 valid;
|
|
||||||
u32 error_id;
|
|
||||||
} err_info;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
iwl_trans_read_mem_bytes(mvm->trans, base,
|
|
||||||
&err_info, sizeof(err_info));
|
|
||||||
|
|
||||||
if (err_info.valid) {
|
|
||||||
IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
|
|
||||||
err_info.valid, err_info.error_id);
|
|
||||||
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
|
|
||||||
struct cfg80211_wowlan_wakeup wakeup = {
|
|
||||||
.rfkill_release = true,
|
|
||||||
};
|
|
||||||
ieee80211_report_wowlan_wakeup(vif, &wakeup,
|
|
||||||
GFP_KERNEL);
|
|
||||||
}
|
|
||||||
return ERR_PTR(-EIO);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* only for tracing for now */
|
/* only for tracing for now */
|
||||||
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
|
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1680,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
|||||||
bool keep;
|
bool keep;
|
||||||
struct iwl_mvm_sta *mvm_ap_sta;
|
struct iwl_mvm_sta *mvm_ap_sta;
|
||||||
|
|
||||||
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
|
fw_status = iwl_mvm_get_wakeup_status(mvm);
|
||||||
if (IS_ERR_OR_NULL(fw_status))
|
if (IS_ERR_OR_NULL(fw_status))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
@ -1805,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
|||||||
u32 reasons = 0;
|
u32 reasons = 0;
|
||||||
int i, j, n_matches, ret;
|
int i, j, n_matches, ret;
|
||||||
|
|
||||||
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
|
fw_status = iwl_mvm_get_wakeup_status(mvm);
|
||||||
if (!IS_ERR_OR_NULL(fw_status)) {
|
if (!IS_ERR_OR_NULL(fw_status)) {
|
||||||
reasons = le32_to_cpu(fw_status->wakeup_reasons);
|
reasons = le32_to_cpu(fw_status->wakeup_reasons);
|
||||||
kfree(fw_status);
|
kfree(fw_status);
|
||||||
@ -1918,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
|
|||||||
ieee80211_resume_disconnect(vif);
|
ieee80211_resume_disconnect(vif);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
|
||||||
|
struct ieee80211_vif *vif)
|
||||||
|
{
|
||||||
|
u32 base = mvm->error_event_table[0];
|
||||||
|
struct error_table_start {
|
||||||
|
/* cf. struct iwl_error_event_table */
|
||||||
|
u32 valid;
|
||||||
|
u32 error_id;
|
||||||
|
} err_info;
|
||||||
|
|
||||||
|
iwl_trans_read_mem_bytes(mvm->trans, base,
|
||||||
|
&err_info, sizeof(err_info));
|
||||||
|
|
||||||
|
if (err_info.valid &&
|
||||||
|
err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
|
||||||
|
struct cfg80211_wowlan_wakeup wakeup = {
|
||||||
|
.rfkill_release = true,
|
||||||
|
};
|
||||||
|
ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
|
||||||
|
}
|
||||||
|
return err_info.valid;
|
||||||
|
}
|
||||||
|
|
||||||
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
||||||
{
|
{
|
||||||
struct ieee80211_vif *vif = NULL;
|
struct ieee80211_vif *vif = NULL;
|
||||||
@ -1949,6 +1952,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
|||||||
/* query SRAM first in case we want event logging */
|
/* query SRAM first in case we want event logging */
|
||||||
iwl_mvm_read_d3_sram(mvm);
|
iwl_mvm_read_d3_sram(mvm);
|
||||||
|
|
||||||
|
if (iwl_mvm_check_rt_status(mvm, vif)) {
|
||||||
|
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
|
||||||
|
iwl_mvm_dump_nic_error_log(mvm);
|
||||||
|
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
|
||||||
|
NULL, 0);
|
||||||
|
ret = 1;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
if (d0i3_first) {
|
if (d0i3_first) {
|
||||||
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
|
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -364,7 +364,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
|
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
|
||||||
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
|
/*
|
||||||
|
* Set a 'fake' TID for the command queue, since we use the
|
||||||
|
* hweight() of the tid_bitmap as a refcount now. Not that
|
||||||
|
* we ever even consider the command queue as one we might
|
||||||
|
* want to reuse, but be safe nevertheless.
|
||||||
|
*/
|
||||||
|
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
|
||||||
|
BIT(IWL_MAX_TID_COUNT + 2);
|
||||||
|
|
||||||
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
|
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
|
||||||
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
|
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
|
||||||
|
@ -512,6 +512,7 @@ enum iwl_mvm_scan_type {
|
|||||||
IWL_SCAN_TYPE_WILD,
|
IWL_SCAN_TYPE_WILD,
|
||||||
IWL_SCAN_TYPE_MILD,
|
IWL_SCAN_TYPE_MILD,
|
||||||
IWL_SCAN_TYPE_FRAGMENTED,
|
IWL_SCAN_TYPE_FRAGMENTED,
|
||||||
|
IWL_SCAN_TYPE_FAST_BALANCE,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum iwl_mvm_sched_scan_pass_all_states {
|
enum iwl_mvm_sched_scan_pass_all_states {
|
||||||
@ -753,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
|
|||||||
* This is a state in which a single queue serves more than one TID, all of
|
* This is a state in which a single queue serves more than one TID, all of
|
||||||
* which are not aggregated. Note that the queue is only associated to one
|
* which are not aggregated. Note that the queue is only associated to one
|
||||||
* RA.
|
* RA.
|
||||||
* @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
|
|
||||||
* This is a state of a queue that has had traffic on it, but during the
|
|
||||||
* last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
|
|
||||||
* it. In this state, when a new queue is needed to be allocated but no
|
|
||||||
* such free queue exists, an inactive queue might be freed and given to
|
|
||||||
* the new RA/TID.
|
|
||||||
* @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
|
|
||||||
* This is the state of a queue that has had traffic pass through it, but
|
|
||||||
* needs to be reconfigured for some reason, e.g. the queue needs to
|
|
||||||
* become unshared and aggregations re-enabled on.
|
|
||||||
*/
|
*/
|
||||||
enum iwl_mvm_queue_status {
|
enum iwl_mvm_queue_status {
|
||||||
IWL_MVM_QUEUE_FREE,
|
IWL_MVM_QUEUE_FREE,
|
||||||
IWL_MVM_QUEUE_RESERVED,
|
IWL_MVM_QUEUE_RESERVED,
|
||||||
IWL_MVM_QUEUE_READY,
|
IWL_MVM_QUEUE_READY,
|
||||||
IWL_MVM_QUEUE_SHARED,
|
IWL_MVM_QUEUE_SHARED,
|
||||||
IWL_MVM_QUEUE_INACTIVE,
|
|
||||||
IWL_MVM_QUEUE_RECONFIGURING,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
|
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
|
||||||
@ -787,6 +776,17 @@ struct iwl_mvm_geo_profile {
|
|||||||
u8 values[ACPI_GEO_TABLE_SIZE];
|
u8 values[ACPI_GEO_TABLE_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct iwl_mvm_dqa_txq_info {
|
||||||
|
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
|
||||||
|
bool reserved; /* Is this the TXQ reserved for a STA */
|
||||||
|
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
|
||||||
|
u8 txq_tid; /* The TID "owner" of this queue*/
|
||||||
|
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
|
||||||
|
/* Timestamp for inactivation per TID of this queue */
|
||||||
|
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
|
||||||
|
enum iwl_mvm_queue_status status;
|
||||||
|
};
|
||||||
|
|
||||||
struct iwl_mvm {
|
struct iwl_mvm {
|
||||||
/* for logger access */
|
/* for logger access */
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
@ -843,17 +843,7 @@ struct iwl_mvm {
|
|||||||
|
|
||||||
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
|
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
|
||||||
|
|
||||||
struct {
|
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
|
||||||
u8 hw_queue_refcount;
|
|
||||||
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
|
|
||||||
bool reserved; /* Is this the TXQ reserved for a STA */
|
|
||||||
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
|
|
||||||
u8 txq_tid; /* The TID "owner" of this queue*/
|
|
||||||
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
|
|
||||||
/* Timestamp for inactivation per TID of this queue */
|
|
||||||
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
|
|
||||||
enum iwl_mvm_queue_status status;
|
|
||||||
} queue_info[IWL_MAX_HW_QUEUES];
|
|
||||||
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
|
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
|
||||||
struct work_struct add_stream_wk; /* To add streams to queues */
|
struct work_struct add_stream_wk; /* To add streams to queues */
|
||||||
|
|
||||||
@ -1883,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
|
|||||||
mvmvif->low_latency &= ~cause;
|
mvmvif->low_latency &= ~cause;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* hw scheduler queue config */
|
|
||||||
bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|
||||||
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
|
|
||||||
unsigned int wdg_timeout);
|
|
||||||
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
|
|
||||||
u8 sta_id, u8 tid, unsigned int timeout);
|
|
||||||
|
|
||||||
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|
||||||
u8 tid, u8 flags);
|
|
||||||
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
|
|
||||||
|
|
||||||
/* Return a bitmask with all the hw supported queues, except for the
|
/* Return a bitmask with all the hw supported queues, except for the
|
||||||
* command queue, which can't be flushed.
|
* command queue, which can't be flushed.
|
||||||
*/
|
*/
|
||||||
@ -1905,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
|
|||||||
|
|
||||||
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
||||||
{
|
{
|
||||||
|
lockdep_assert_held(&mvm->mutex);
|
||||||
|
/* calling this function without using dump_start/end since at this
|
||||||
|
* point we already hold the op mode mutex
|
||||||
|
*/
|
||||||
|
iwl_fw_dbg_collect_sync(&mvm->fwrt);
|
||||||
iwl_fw_cancel_timestamp(&mvm->fwrt);
|
iwl_fw_cancel_timestamp(&mvm->fwrt);
|
||||||
iwl_free_fw_paging(&mvm->fwrt);
|
iwl_free_fw_paging(&mvm->fwrt);
|
||||||
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||||
@ -1990,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t);
|
|||||||
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
|
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
|
||||||
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
|
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
|
||||||
|
|
||||||
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
|
|
||||||
|
|
||||||
#define MVM_TCM_PERIOD_MSEC 500
|
#define MVM_TCM_PERIOD_MSEC 500
|
||||||
#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
|
#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
|
||||||
#define MVM_LL_PERIOD (10 * HZ)
|
#define MVM_LL_PERIOD (10 * HZ)
|
||||||
|
@ -110,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
|
|||||||
.suspend_time = 95,
|
.suspend_time = 95,
|
||||||
.max_out_time = 44,
|
.max_out_time = 44,
|
||||||
},
|
},
|
||||||
|
[IWL_SCAN_TYPE_FAST_BALANCE] = {
|
||||||
|
.suspend_time = 30,
|
||||||
|
.max_out_time = 37,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
struct iwl_mvm_scan_params {
|
struct iwl_mvm_scan_params {
|
||||||
@ -235,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
|
|||||||
return mvm->tcm.result.band_load[band];
|
return mvm->tcm.result.band_load[band];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct iwl_is_dcm_with_go_iterator_data {
|
||||||
|
struct ieee80211_vif *current_vif;
|
||||||
|
bool is_dcm_with_p2p_go;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
|
||||||
|
struct ieee80211_vif *vif)
|
||||||
|
{
|
||||||
|
struct iwl_is_dcm_with_go_iterator_data *data = _data;
|
||||||
|
struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||||
|
struct iwl_mvm_vif *curr_mvmvif =
|
||||||
|
iwl_mvm_vif_from_mac80211(data->current_vif);
|
||||||
|
|
||||||
|
/* exclude the given vif */
|
||||||
|
if (vif == data->current_vif)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
|
||||||
|
other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
|
||||||
|
other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
|
||||||
|
data->is_dcm_with_p2p_go = true;
|
||||||
|
}
|
||||||
|
|
||||||
static enum
|
static enum
|
||||||
iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
|
iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
|
||||||
|
struct ieee80211_vif *vif,
|
||||||
enum iwl_mvm_traffic_load load,
|
enum iwl_mvm_traffic_load load,
|
||||||
bool low_latency)
|
bool low_latency)
|
||||||
{
|
{
|
||||||
@ -249,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
|
|||||||
if (!global_cnt)
|
if (!global_cnt)
|
||||||
return IWL_SCAN_TYPE_UNASSOC;
|
return IWL_SCAN_TYPE_UNASSOC;
|
||||||
|
|
||||||
if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
|
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||||
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
|
IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
|
||||||
return IWL_SCAN_TYPE_FRAGMENTED;
|
if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
|
||||||
|
(!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
|
||||||
|
return IWL_SCAN_TYPE_FRAGMENTED;
|
||||||
|
|
||||||
|
/* in case of DCM with GO where BSS DTIM interval < 220msec
|
||||||
|
* set all scan requests as fast-balance scan
|
||||||
|
* */
|
||||||
|
if (vif && vif->type == NL80211_IFTYPE_STATION &&
|
||||||
|
vif->bss_conf.dtim_period < 220) {
|
||||||
|
struct iwl_is_dcm_with_go_iterator_data data = {
|
||||||
|
.current_vif = vif,
|
||||||
|
.is_dcm_with_p2p_go = false,
|
||||||
|
};
|
||||||
|
|
||||||
|
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
|
||||||
|
IEEE80211_IFACE_ITER_NORMAL,
|
||||||
|
iwl_mvm_is_dcm_with_go_iterator,
|
||||||
|
&data);
|
||||||
|
if (data.is_dcm_with_p2p_go)
|
||||||
|
return IWL_SCAN_TYPE_FAST_BALANCE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
|
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
|
||||||
return IWL_SCAN_TYPE_MILD;
|
return IWL_SCAN_TYPE_MILD;
|
||||||
@ -260,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum
|
static enum
|
||||||
iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
|
iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
|
||||||
|
struct ieee80211_vif *vif)
|
||||||
{
|
{
|
||||||
enum iwl_mvm_traffic_load load;
|
enum iwl_mvm_traffic_load load;
|
||||||
bool low_latency;
|
bool low_latency;
|
||||||
@ -268,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
|
|||||||
load = iwl_mvm_get_traffic_load(mvm);
|
load = iwl_mvm_get_traffic_load(mvm);
|
||||||
low_latency = iwl_mvm_low_latency(mvm);
|
low_latency = iwl_mvm_low_latency(mvm);
|
||||||
|
|
||||||
return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
|
return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum
|
static enum
|
||||||
iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
|
iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
|
||||||
bool p2p_device,
|
struct ieee80211_vif *vif,
|
||||||
enum nl80211_band band)
|
enum nl80211_band band)
|
||||||
{
|
{
|
||||||
enum iwl_mvm_traffic_load load;
|
enum iwl_mvm_traffic_load load;
|
||||||
@ -282,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
|
|||||||
load = iwl_mvm_get_traffic_load_band(mvm, band);
|
load = iwl_mvm_get_traffic_load_band(mvm, band);
|
||||||
low_latency = iwl_mvm_low_latency_band(mvm, band);
|
low_latency = iwl_mvm_low_latency_band(mvm, band);
|
||||||
|
|
||||||
return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
|
return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -860,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
|
|||||||
params->scan_plans[0].iterations == 1;
|
params->scan_plans[0].iterations == 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
|
||||||
|
{
|
||||||
|
return (type == IWL_SCAN_TYPE_FRAGMENTED ||
|
||||||
|
type == IWL_SCAN_TYPE_FAST_BALANCE);
|
||||||
|
}
|
||||||
|
|
||||||
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
|
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
|
||||||
struct iwl_mvm_scan_params *params,
|
struct iwl_mvm_scan_params *params,
|
||||||
struct ieee80211_vif *vif)
|
struct ieee80211_vif *vif)
|
||||||
@ -872,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
|
|||||||
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
|
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
|
||||||
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
|
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
|
||||||
|
|
||||||
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
|
if (iwl_mvm_is_scan_fragmented(params->type))
|
||||||
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
|
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
|
||||||
|
|
||||||
if (iwl_mvm_rrm_scan_needed(mvm) &&
|
if (iwl_mvm_rrm_scan_needed(mvm) &&
|
||||||
@ -895,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
|
|||||||
|
|
||||||
if (iwl_mvm_is_regular_scan(params) &&
|
if (iwl_mvm_is_regular_scan(params) &&
|
||||||
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
|
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
|
||||||
params->type != IWL_SCAN_TYPE_FRAGMENTED)
|
!iwl_mvm_is_scan_fragmented(params->type))
|
||||||
flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
|
flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
|
||||||
|
|
||||||
return flags;
|
return flags;
|
||||||
@ -1044,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
|
|||||||
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
|
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
|
||||||
u32 flags, u8 channel_flags)
|
u32 flags, u8 channel_flags)
|
||||||
{
|
{
|
||||||
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
|
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
|
||||||
struct iwl_scan_config_v1 *cfg = config;
|
struct iwl_scan_config_v1 *cfg = config;
|
||||||
|
|
||||||
cfg->flags = cpu_to_le32(flags);
|
cfg->flags = cpu_to_le32(flags);
|
||||||
@ -1077,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
|||||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||||
enum iwl_mvm_scan_type lb_type, hb_type;
|
enum iwl_mvm_scan_type lb_type, hb_type;
|
||||||
|
|
||||||
lb_type = iwl_mvm_get_scan_type_band(mvm, false,
|
lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
|
||||||
NL80211_BAND_2GHZ);
|
NL80211_BAND_2GHZ);
|
||||||
hb_type = iwl_mvm_get_scan_type_band(mvm, false,
|
hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
|
||||||
NL80211_BAND_5GHZ);
|
NL80211_BAND_5GHZ);
|
||||||
|
|
||||||
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
|
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
|
||||||
@ -1093,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
|||||||
cpu_to_le32(scan_timing[hb_type].suspend_time);
|
cpu_to_le32(scan_timing[hb_type].suspend_time);
|
||||||
} else {
|
} else {
|
||||||
enum iwl_mvm_scan_type type =
|
enum iwl_mvm_scan_type type =
|
||||||
iwl_mvm_get_scan_type(mvm, false);
|
iwl_mvm_get_scan_type(mvm, NULL);
|
||||||
|
|
||||||
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
|
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
|
||||||
cpu_to_le32(scan_timing[type].max_out_time);
|
cpu_to_le32(scan_timing[type].max_out_time);
|
||||||
@ -1130,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
|||||||
return -ENOBUFS;
|
return -ENOBUFS;
|
||||||
|
|
||||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||||
type = iwl_mvm_get_scan_type_band(mvm, false,
|
type = iwl_mvm_get_scan_type_band(mvm, NULL,
|
||||||
NL80211_BAND_2GHZ);
|
NL80211_BAND_2GHZ);
|
||||||
hb_type = iwl_mvm_get_scan_type_band(mvm, false,
|
hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
|
||||||
NL80211_BAND_5GHZ);
|
NL80211_BAND_5GHZ);
|
||||||
if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
|
if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
type = iwl_mvm_get_scan_type(mvm, false);
|
type = iwl_mvm_get_scan_type(mvm, NULL);
|
||||||
if (type == mvm->scan_type)
|
if (type == mvm->scan_type)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1162,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
|||||||
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
|
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
|
||||||
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
|
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
|
||||||
SCAN_CONFIG_N_CHANNELS(num_channels) |
|
SCAN_CONFIG_N_CHANNELS(num_channels) |
|
||||||
(type == IWL_SCAN_TYPE_FRAGMENTED ?
|
(iwl_mvm_is_scan_fragmented(type) ?
|
||||||
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
|
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
|
||||||
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
|
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
|
||||||
|
|
||||||
@ -1177,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
|||||||
*/
|
*/
|
||||||
if (iwl_mvm_cdb_scan_api(mvm)) {
|
if (iwl_mvm_cdb_scan_api(mvm)) {
|
||||||
if (iwl_mvm_is_cdb_supported(mvm))
|
if (iwl_mvm_is_cdb_supported(mvm))
|
||||||
flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
|
flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
|
||||||
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
|
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
|
||||||
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
|
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
|
||||||
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
|
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
|
||||||
@ -1338,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
|
|||||||
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
|
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
|
||||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
|
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
|
||||||
|
|
||||||
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
|
if (iwl_mvm_is_scan_fragmented(params->type))
|
||||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
|
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
|
||||||
|
|
||||||
if (iwl_mvm_is_cdb_supported(mvm) &&
|
if (iwl_mvm_is_cdb_supported(mvm) &&
|
||||||
params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
|
iwl_mvm_is_scan_fragmented(params->hb_type))
|
||||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
|
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
|
||||||
|
|
||||||
if (iwl_mvm_rrm_scan_needed(mvm) &&
|
if (iwl_mvm_rrm_scan_needed(mvm) &&
|
||||||
@ -1380,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
|
|||||||
*/
|
*/
|
||||||
if (iwl_mvm_is_regular_scan(params) &&
|
if (iwl_mvm_is_regular_scan(params) &&
|
||||||
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
|
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
|
||||||
params->type != IWL_SCAN_TYPE_FRAGMENTED &&
|
!iwl_mvm_is_scan_fragmented(params->type) &&
|
||||||
!iwl_mvm_is_adaptive_dwell_supported(mvm) &&
|
!iwl_mvm_is_adaptive_dwell_supported(mvm) &&
|
||||||
!iwl_mvm_is_oce_supported(mvm))
|
!iwl_mvm_is_oce_supported(mvm))
|
||||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
|
flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
|
||||||
@ -1589,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
|
|||||||
|
|
||||||
static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
|
static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
|
||||||
struct iwl_mvm_scan_params *params,
|
struct iwl_mvm_scan_params *params,
|
||||||
bool p2p)
|
struct ieee80211_vif *vif)
|
||||||
{
|
{
|
||||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||||
params->type =
|
params->type =
|
||||||
iwl_mvm_get_scan_type_band(mvm, p2p,
|
iwl_mvm_get_scan_type_band(mvm, vif,
|
||||||
NL80211_BAND_2GHZ);
|
NL80211_BAND_2GHZ);
|
||||||
params->hb_type =
|
params->hb_type =
|
||||||
iwl_mvm_get_scan_type_band(mvm, p2p,
|
iwl_mvm_get_scan_type_band(mvm, vif,
|
||||||
NL80211_BAND_5GHZ);
|
NL80211_BAND_5GHZ);
|
||||||
} else {
|
} else {
|
||||||
params->type = iwl_mvm_get_scan_type(mvm, p2p);
|
params->type = iwl_mvm_get_scan_type(mvm, vif);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||||
struct cfg80211_scan_request *req,
|
struct cfg80211_scan_request *req,
|
||||||
struct ieee80211_scan_ies *ies)
|
struct ieee80211_scan_ies *ies)
|
||||||
@ -1649,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||||||
params.scan_plans = &scan_plan;
|
params.scan_plans = &scan_plan;
|
||||||
params.n_scan_plans = 1;
|
params.n_scan_plans = 1;
|
||||||
|
|
||||||
iwl_mvm_fill_scan_type(mvm, ¶ms,
|
iwl_mvm_fill_scan_type(mvm, ¶ms, vif);
|
||||||
vif->type == NL80211_IFTYPE_P2P_DEVICE);
|
|
||||||
|
|
||||||
ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms);
|
ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -1745,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
|
|||||||
params.n_scan_plans = req->n_scan_plans;
|
params.n_scan_plans = req->n_scan_plans;
|
||||||
params.scan_plans = req->scan_plans;
|
params.scan_plans = req->scan_plans;
|
||||||
|
|
||||||
iwl_mvm_fill_scan_type(mvm, ¶ms,
|
iwl_mvm_fill_scan_type(mvm, ¶ms, vif);
|
||||||
vif->type == NL80211_IFTYPE_P2P_DEVICE);
|
|
||||||
|
|
||||||
/* In theory, LMAC scans can handle a 32-bit delay, but since
|
/* In theory, LMAC scans can handle a 32-bit delay, but since
|
||||||
* waiting for over 18 hours to start the scan is a bit silly
|
* waiting for over 18 hours to start the scan is a bit silly
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -312,9 +312,6 @@ enum iwl_mvm_agg_state {
|
|||||||
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
|
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
|
||||||
* we are ready to finish the Tx AGG stop / start flow.
|
* we are ready to finish the Tx AGG stop / start flow.
|
||||||
* @tx_time: medium time consumed by this A-MPDU
|
* @tx_time: medium time consumed by this A-MPDU
|
||||||
* @is_tid_active: has this TID sent traffic in the last
|
|
||||||
* %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
|
|
||||||
* field should be ignored.
|
|
||||||
* @tpt_meas_start: time of the throughput measurements start, is reset every HZ
|
* @tpt_meas_start: time of the throughput measurements start, is reset every HZ
|
||||||
* @tx_count_last: number of frames transmitted during the last second
|
* @tx_count_last: number of frames transmitted during the last second
|
||||||
* @tx_count: counts the number of frames transmitted since the last reset of
|
* @tx_count: counts the number of frames transmitted since the last reset of
|
||||||
@ -332,7 +329,6 @@ struct iwl_mvm_tid_data {
|
|||||||
u16 txq_id;
|
u16 txq_id;
|
||||||
u16 ssn;
|
u16 ssn;
|
||||||
u16 tx_time;
|
u16 tx_time;
|
||||||
bool is_tid_active;
|
|
||||||
unsigned long tpt_meas_start;
|
unsigned long tpt_meas_start;
|
||||||
u32 tx_count_last;
|
u32 tx_count_last;
|
||||||
u32 tx_count;
|
u32 tx_count;
|
||||||
@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
|
|||||||
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||||
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
|
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
|
||||||
|
|
||||||
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|
||||||
int ac, int ssn, unsigned int wdg_timeout,
|
|
||||||
bool force);
|
|
||||||
|
|
||||||
#endif /* __sta_h__ */
|
#endif /* __sta_h__ */
|
||||||
|
@ -1140,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||||||
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
||||||
|
|
||||||
/* Check if TXQ needs to be allocated or re-activated */
|
/* Check if TXQ needs to be allocated or re-activated */
|
||||||
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
|
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
|
||||||
!mvmsta->tid_data[tid].is_tid_active)) {
|
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
|
||||||
/* If TXQ needs to be allocated... */
|
|
||||||
if (txq_id == IWL_MVM_INVALID_QUEUE) {
|
|
||||||
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The frame is now deferred, and the worker scheduled
|
* The frame is now deferred, and the worker scheduled
|
||||||
* will re-allocate it, so we can free it for now.
|
* will re-allocate it, so we can free it for now.
|
||||||
*/
|
*/
|
||||||
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
||||||
spin_unlock(&mvmsta->lock);
|
spin_unlock(&mvmsta->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
/* queue should always be active in new TX path */
|
|
||||||
WARN_ON(iwl_mvm_has_new_tx_api(mvm));
|
|
||||||
|
|
||||||
/* If we are here - TXQ exists and needs to be re-activated */
|
|
||||||
spin_lock(&mvm->queue_info_lock);
|
|
||||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
|
||||||
mvmsta->tid_data[tid].is_tid_active = true;
|
|
||||||
spin_unlock(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
|
|
||||||
txq_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||||
|
@ -599,36 +599,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
|
|||||||
iwl_mvm_dump_umac_error_log(mvm);
|
iwl_mvm_dump_umac_error_log(mvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
lockdep_assert_held(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
/* This should not be hit with new TX path */
|
|
||||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
|
||||||
return -ENOSPC;
|
|
||||||
|
|
||||||
/* Start by looking for a free queue */
|
|
||||||
for (i = minq; i <= maxq; i++)
|
|
||||||
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
|
|
||||||
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
|
|
||||||
return i;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If no free queue found - settle for an inactive one to reconfigure
|
|
||||||
* Make sure that the inactive queue either already belongs to this STA,
|
|
||||||
* or that if it belongs to another one - it isn't the reserved queue
|
|
||||||
*/
|
|
||||||
for (i = minq; i <= maxq; i++)
|
|
||||||
if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
|
|
||||||
(sta_id == mvm->queue_info[i].ra_sta_id ||
|
|
||||||
!mvm->queue_info[i].reserved))
|
|
||||||
return i;
|
|
||||||
|
|
||||||
return -ENOSPC;
|
|
||||||
}
|
|
||||||
|
|
||||||
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||||
int tid, int frame_limit, u16 ssn)
|
int tid, int frame_limit, u16 ssn)
|
||||||
{
|
{
|
||||||
@ -649,7 +619,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_bh(&mvm->queue_info_lock);
|
spin_lock_bh(&mvm->queue_info_lock);
|
||||||
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
|
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
|
||||||
"Trying to reconfig unallocated queue %d\n", queue)) {
|
"Trying to reconfig unallocated queue %d\n", queue)) {
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
spin_unlock_bh(&mvm->queue_info_lock);
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
@ -665,229 +635,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
|
||||||
int mac80211_queue, u8 sta_id, u8 tid)
|
|
||||||
{
|
|
||||||
bool enable_queue = true;
|
|
||||||
|
|
||||||
spin_lock_bh(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
/* Make sure this TID isn't already enabled */
|
|
||||||
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
|
|
||||||
queue, tid);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update mappings and refcounts */
|
|
||||||
if (mvm->queue_info[queue].hw_queue_refcount > 0)
|
|
||||||
enable_queue = false;
|
|
||||||
|
|
||||||
if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
|
|
||||||
WARN(mac80211_queue >=
|
|
||||||
BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
|
|
||||||
"cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
|
|
||||||
mac80211_queue, queue, sta_id, tid);
|
|
||||||
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
mvm->queue_info[queue].hw_queue_refcount++;
|
|
||||||
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
|
|
||||||
mvm->queue_info[queue].ra_sta_id = sta_id;
|
|
||||||
|
|
||||||
if (enable_queue) {
|
|
||||||
if (tid != IWL_MAX_TID_COUNT)
|
|
||||||
mvm->queue_info[queue].mac80211_ac =
|
|
||||||
tid_to_mac80211_ac[tid];
|
|
||||||
else
|
|
||||||
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
|
|
||||||
|
|
||||||
mvm->queue_info[queue].txq_tid = tid;
|
|
||||||
}
|
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm,
|
|
||||||
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
|
|
||||||
queue, mvm->queue_info[queue].hw_queue_refcount,
|
|
||||||
mvm->hw_queue_to_mac80211[queue]);
|
|
||||||
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
return enable_queue;
|
|
||||||
}
|
|
||||||
|
|
||||||
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
|
|
||||||
u8 sta_id, u8 tid, unsigned int timeout)
|
|
||||||
{
|
|
||||||
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
|
|
||||||
|
|
||||||
if (tid == IWL_MAX_TID_COUNT) {
|
|
||||||
tid = IWL_MGMT_TID;
|
|
||||||
size = IWL_MGMT_QUEUE_SIZE;
|
|
||||||
}
|
|
||||||
queue = iwl_trans_txq_alloc(mvm->trans,
|
|
||||||
cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
|
|
||||||
sta_id, tid, SCD_QUEUE_CFG, size, timeout);
|
|
||||||
|
|
||||||
if (queue < 0) {
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm,
|
|
||||||
"Failed allocating TXQ for sta %d tid %d, ret: %d\n",
|
|
||||||
sta_id, tid, queue);
|
|
||||||
return queue;
|
|
||||||
}
|
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
|
|
||||||
queue, sta_id, tid);
|
|
||||||
|
|
||||||
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm,
|
|
||||||
"Enabling TXQ #%d (mac80211 map:0x%x)\n",
|
|
||||||
queue, mvm->hw_queue_to_mac80211[queue]);
|
|
||||||
|
|
||||||
return queue;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|
||||||
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
|
|
||||||
unsigned int wdg_timeout)
|
|
||||||
{
|
|
||||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
|
||||||
.scd_queue = queue,
|
|
||||||
.action = SCD_CFG_ENABLE_QUEUE,
|
|
||||||
.window = cfg->frame_limit,
|
|
||||||
.sta_id = cfg->sta_id,
|
|
||||||
.ssn = cpu_to_le16(ssn),
|
|
||||||
.tx_fifo = cfg->fifo,
|
|
||||||
.aggregate = cfg->aggregate,
|
|
||||||
.tid = cfg->tid,
|
|
||||||
};
|
|
||||||
bool inc_ssn;
|
|
||||||
|
|
||||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* Send the enabling command if we need to */
|
|
||||||
if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
|
|
||||||
cfg->sta_id, cfg->tid))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
|
|
||||||
NULL, wdg_timeout);
|
|
||||||
if (inc_ssn)
|
|
||||||
le16_add_cpu(&cmd.ssn, 1);
|
|
||||||
|
|
||||||
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
|
|
||||||
"Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
|
|
||||||
|
|
||||||
return inc_ssn;
|
|
||||||
}
|
|
||||||
|
|
||||||
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|
||||||
u8 tid, u8 flags)
|
|
||||||
{
|
|
||||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
|
||||||
.scd_queue = queue,
|
|
||||||
.action = SCD_CFG_DISABLE_QUEUE,
|
|
||||||
};
|
|
||||||
bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
||||||
spin_lock_bh(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
if (remove_mac_queue)
|
|
||||||
mvm->hw_queue_to_mac80211[queue] &=
|
|
||||||
~BIT(mac80211_queue);
|
|
||||||
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
iwl_trans_txq_free(mvm->trans, queue);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_bh(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there is another TID with the same AC - don't remove the MAC queue
|
|
||||||
* from the mapping
|
|
||||||
*/
|
|
||||||
if (tid < IWL_MAX_TID_COUNT) {
|
|
||||||
unsigned long tid_bitmap =
|
|
||||||
mvm->queue_info[queue].tid_bitmap;
|
|
||||||
int ac = tid_to_mac80211_ac[tid];
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
|
|
||||||
if (tid_to_mac80211_ac[i] == ac)
|
|
||||||
remove_mac_queue = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remove_mac_queue)
|
|
||||||
mvm->hw_queue_to_mac80211[queue] &=
|
|
||||||
~BIT(mac80211_queue);
|
|
||||||
mvm->queue_info[queue].hw_queue_refcount--;
|
|
||||||
|
|
||||||
cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
|
|
||||||
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
|
|
||||||
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
|
|
||||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
|
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm,
|
|
||||||
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
|
|
||||||
queue,
|
|
||||||
mvm->queue_info[queue].hw_queue_refcount,
|
|
||||||
mvm->hw_queue_to_mac80211[queue]);
|
|
||||||
|
|
||||||
/* If the queue is still enabled - nothing left to do in this func */
|
|
||||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
|
||||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
|
||||||
|
|
||||||
/* Make sure queue info is correct even though we overwrite it */
|
|
||||||
WARN(mvm->queue_info[queue].hw_queue_refcount ||
|
|
||||||
mvm->queue_info[queue].tid_bitmap ||
|
|
||||||
mvm->hw_queue_to_mac80211[queue],
|
|
||||||
"TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
|
|
||||||
queue, mvm->queue_info[queue].hw_queue_refcount,
|
|
||||||
mvm->hw_queue_to_mac80211[queue],
|
|
||||||
mvm->queue_info[queue].tid_bitmap);
|
|
||||||
|
|
||||||
/* If we are here - the queue is freed and we can zero out these vals */
|
|
||||||
mvm->queue_info[queue].hw_queue_refcount = 0;
|
|
||||||
mvm->queue_info[queue].tid_bitmap = 0;
|
|
||||||
mvm->hw_queue_to_mac80211[queue] = 0;
|
|
||||||
|
|
||||||
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
|
|
||||||
mvm->queue_info[queue].reserved = false;
|
|
||||||
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
|
||||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
|
|
||||||
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
|
|
||||||
queue, ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iwl_mvm_send_lq_cmd() - Send link quality command
|
* iwl_mvm_send_lq_cmd() - Send link quality command
|
||||||
* @sync: This command can be sent synchronously.
|
* @sync: This command can be sent synchronously.
|
||||||
@ -1255,171 +1002,6 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||||||
ieee80211_connection_loss(vif);
|
ieee80211_connection_loss(vif);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove inactive TIDs of a given queue.
|
|
||||||
* If all queue TIDs are inactive - mark the queue as inactive
|
|
||||||
* If only some the queue TIDs are inactive - unmap them from the queue
|
|
||||||
*/
|
|
||||||
static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
|
||||||
struct iwl_mvm_sta *mvmsta, int queue,
|
|
||||||
unsigned long tid_bitmap)
|
|
||||||
{
|
|
||||||
int tid;
|
|
||||||
|
|
||||||
lockdep_assert_held(&mvmsta->lock);
|
|
||||||
lockdep_assert_held(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
|
|
||||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
|
||||||
/* If some TFDs are still queued - don't mark TID as inactive */
|
|
||||||
if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
|
|
||||||
tid_bitmap &= ~BIT(tid);
|
|
||||||
|
|
||||||
/* Don't mark as inactive any TID that has an active BA */
|
|
||||||
if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
|
|
||||||
tid_bitmap &= ~BIT(tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If all TIDs in the queue are inactive - mark queue as inactive. */
|
|
||||||
if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
|
|
||||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
|
|
||||||
|
|
||||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
|
|
||||||
mvmsta->tid_data[tid].is_tid_active = false;
|
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
|
|
||||||
queue);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we are here, this is a shared queue and not all TIDs timed-out.
|
|
||||||
* Remove the ones that did.
|
|
||||||
*/
|
|
||||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
|
||||||
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
|
|
||||||
|
|
||||||
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
|
||||||
mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
|
|
||||||
mvm->queue_info[queue].hw_queue_refcount--;
|
|
||||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
|
||||||
mvmsta->tid_data[tid].is_tid_active = false;
|
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm,
|
|
||||||
"Removing inactive TID %d from shared Q:%d\n",
|
|
||||||
tid, queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm,
|
|
||||||
"TXQ #%d left with tid bitmap 0x%x\n", queue,
|
|
||||||
mvm->queue_info[queue].tid_bitmap);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* There may be different TIDs with the same mac queues, so make
|
|
||||||
* sure all TIDs have existing corresponding mac queues enabled
|
|
||||||
*/
|
|
||||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
|
||||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
|
||||||
mvm->hw_queue_to_mac80211[queue] |=
|
|
||||||
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If the queue is marked as shared - "unshare" it */
|
|
||||||
if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
|
|
||||||
mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
|
|
||||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
|
|
||||||
IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
|
|
||||||
queue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
|
|
||||||
{
|
|
||||||
unsigned long timeout_queues_map = 0;
|
|
||||||
unsigned long now = jiffies;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (iwl_mvm_has_new_tx_api(mvm))
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_bh(&mvm->queue_info_lock);
|
|
||||||
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
|
|
||||||
if (mvm->queue_info[i].hw_queue_refcount > 0)
|
|
||||||
timeout_queues_map |= BIT(i);
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If a queue time outs - mark it as INACTIVE (don't remove right away
|
|
||||||
* if we don't have to.) This is an optimization in case traffic comes
|
|
||||||
* later, and we don't HAVE to use a currently-inactive queue
|
|
||||||
*/
|
|
||||||
for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
|
|
||||||
struct ieee80211_sta *sta;
|
|
||||||
struct iwl_mvm_sta *mvmsta;
|
|
||||||
u8 sta_id;
|
|
||||||
int tid;
|
|
||||||
unsigned long inactive_tid_bitmap = 0;
|
|
||||||
unsigned long queue_tid_bitmap;
|
|
||||||
|
|
||||||
spin_lock_bh(&mvm->queue_info_lock);
|
|
||||||
queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
|
|
||||||
|
|
||||||
/* If TXQ isn't in active use anyway - nothing to do here... */
|
|
||||||
if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
|
|
||||||
mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check to see if there are inactive TIDs on this queue */
|
|
||||||
for_each_set_bit(tid, &queue_tid_bitmap,
|
|
||||||
IWL_MAX_TID_COUNT + 1) {
|
|
||||||
if (time_after(mvm->queue_info[i].last_frame_time[tid] +
|
|
||||||
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
inactive_tid_bitmap |= BIT(tid);
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&mvm->queue_info_lock);
|
|
||||||
|
|
||||||
/* If all TIDs are active - finish check on this queue */
|
|
||||||
if (!inactive_tid_bitmap)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we are here - the queue hadn't been served recently and is
|
|
||||||
* in use
|
|
||||||
*/
|
|
||||||
|
|
||||||
sta_id = mvm->queue_info[i].ra_sta_id;
|
|
||||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the STA doesn't exist anymore, it isn't an error. It could
|
|
||||||
* be that it was removed since getting the queues, and in this
|
|
||||||
* case it should've inactivated its queues anyway.
|
|
||||||
*/
|
|
||||||
if (IS_ERR_OR_NULL(sta))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
||||||
|
|
||||||
spin_lock_bh(&mvmsta->lock);
|
|
||||||
spin_lock(&mvm->queue_info_lock);
|
|
||||||
iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
|
|
||||||
inactive_tid_bitmap);
|
|
||||||
spin_unlock(&mvm->queue_info_lock);
|
|
||||||
spin_unlock_bh(&mvmsta->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
|
void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
|
||||||
struct ieee80211_vif *vif,
|
struct ieee80211_vif *vif,
|
||||||
const struct ieee80211_sta *sta,
|
const struct ieee80211_sta *sta,
|
||||||
|
@ -330,7 +330,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
|||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
||||||
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
|
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
|
||||||
/* add this subframe's headers' length to the tx_cmd */
|
/* add this subframe's headers' length to the tx_cmd */
|
||||||
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
|
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
|
||||||
|
|
||||||
@ -347,8 +347,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
|||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
||||||
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
|
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
|
||||||
tb_len);
|
tb_len);
|
||||||
|
|
||||||
data_left -= tb_len;
|
data_left -= tb_len;
|
||||||
tso_build_data(skb, &tso, tb_len);
|
tso_build_data(skb, &tso, tb_len);
|
||||||
@ -438,6 +438,9 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
|
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
|
||||||
skb_frag_size(frag));
|
skb_frag_size(frag));
|
||||||
|
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
|
||||||
|
skb_frag_address(frag),
|
||||||
|
skb_frag_size(frag));
|
||||||
if (tb_idx < 0)
|
if (tb_idx < 0)
|
||||||
return tb_idx;
|
return tb_idx;
|
||||||
|
|
||||||
@ -454,7 +457,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct iwl_cmd_meta *out_meta,
|
struct iwl_cmd_meta *out_meta,
|
||||||
int hdr_len,
|
int hdr_len,
|
||||||
int tx_cmd_len)
|
int tx_cmd_len,
|
||||||
|
bool pad)
|
||||||
{
|
{
|
||||||
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
||||||
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
|
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
|
||||||
@ -478,7 +482,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||||||
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
|
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
|
||||||
IWL_FIRST_TB_SIZE;
|
IWL_FIRST_TB_SIZE;
|
||||||
|
|
||||||
tb1_len = ALIGN(len, 4);
|
if (pad)
|
||||||
|
tb1_len = ALIGN(len, 4);
|
||||||
|
else
|
||||||
|
tb1_len = len;
|
||||||
|
|
||||||
/* map the data for TB1 */
|
/* map the data for TB1 */
|
||||||
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
|
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
|
||||||
@ -486,6 +493,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
|
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
|
||||||
|
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
|
||||||
|
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
|
||||||
|
|
||||||
/* set up TFD's third entry to point to remainder of skb's head */
|
/* set up TFD's third entry to point to remainder of skb's head */
|
||||||
tb2_len = skb_headlen(skb) - hdr_len;
|
tb2_len = skb_headlen(skb) - hdr_len;
|
||||||
@ -496,15 +505,14 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
|
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
|
||||||
|
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
|
||||||
|
skb->data + hdr_len,
|
||||||
|
tb2_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
|
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
|
|
||||||
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
|
|
||||||
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
|
|
||||||
|
|
||||||
return tfd;
|
return tfd;
|
||||||
|
|
||||||
out_err:
|
out_err:
|
||||||
@ -551,7 +559,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
|||||||
out_meta, hdr_len, len);
|
out_meta, hdr_len, len);
|
||||||
|
|
||||||
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
|
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
|
||||||
hdr_len, len);
|
hdr_len, len, !amsdu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
|
@ -1994,6 +1994,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
head_tb_len, DMA_TO_DEVICE);
|
head_tb_len, DMA_TO_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
|
||||||
|
skb->data + hdr_len,
|
||||||
|
head_tb_len);
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
|
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2011,6 +2014,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
|
|
||||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
|
||||||
|
skb_frag_address(frag),
|
||||||
|
skb_frag_size(frag));
|
||||||
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
||||||
skb_frag_size(frag), false);
|
skb_frag_size(frag), false);
|
||||||
if (tb_idx < 0)
|
if (tb_idx < 0)
|
||||||
@ -2190,8 +2196,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
|
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
|
||||||
hdr_tb_len, false);
|
hdr_tb_len, false);
|
||||||
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
|
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
|
||||||
hdr_tb_len);
|
hdr_tb_len);
|
||||||
/* add this subframe's headers' length to the tx_cmd */
|
/* add this subframe's headers' length to the tx_cmd */
|
||||||
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
|
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
|
||||||
|
|
||||||
@ -2216,8 +2222,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
|
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
||||||
size, false);
|
size, false);
|
||||||
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
|
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
|
||||||
size);
|
size);
|
||||||
|
|
||||||
data_left -= size;
|
data_left -= size;
|
||||||
tso_build_data(skb, &tso, size);
|
tso_build_data(skb, &tso, size);
|
||||||
@ -2398,6 +2404,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
goto out_err;
|
goto out_err;
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
|
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
|
||||||
|
|
||||||
|
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||||
|
iwl_pcie_get_tfd(trans, txq,
|
||||||
|
txq->write_ptr),
|
||||||
|
trans_pcie->tfd_size,
|
||||||
|
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
|
||||||
|
hdr_len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If gso_size wasn't set, don't give the frame "amsdu treatment"
|
* If gso_size wasn't set, don't give the frame "amsdu treatment"
|
||||||
* (adding subframes, etc.).
|
* (adding subframes, etc.).
|
||||||
@ -2421,14 +2434,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||||||
out_meta)))
|
out_meta)))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
|
||||||
iwl_pcie_get_tfd(trans, txq,
|
|
||||||
txq->write_ptr),
|
|
||||||
trans_pcie->tfd_size,
|
|
||||||
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
|
|
||||||
hdr_len);
|
|
||||||
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* building the A-MSDU might have changed this data, so memcpy it now */
|
/* building the A-MSDU might have changed this data, so memcpy it now */
|
||||||
|
Loading…
Reference in New Issue
Block a user