mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-27 14:30:44 +07:00
* Finalize and enable dynamic queue allocation;
* Use dev_coredumpmsg() to prevent locking the driver; * Small fix to pass the AID to the FW; * Use FW PS decisions with multi-queue; -----BEGIN PGP SIGNATURE----- iQIcBAABCAAGBQJYEGRBAAoJEKFHnKIaPMX63wAP+wSdixtt6iKzKL+RUHf+UXoz Qg+Kh8XCg6JzYYoTuWb00DgFZv6sBcca+PMv+/EREShZz+/hhQlxz5PPuA63Yx+7 ZwO0UdtRP3QsbukvVJcpnEnoWwpmqfnSW2R3pgQirWQAjSiNJvrXkQh4LDu+C89F Bv/UyIo+oUNX5cCUavoR7meoau2GKLCz3B5vNZgLDBfNeOuV7KyVwuvdb7suL9C7 hf+Q/zi1BBU2p2xYLwrb9AuKURrqWqI+i8zZZ0OzgD3w61QqMQ+k4x4vpY1MupwF eblR2wjmo51p1OmbzswjzvIqgeu8EUt+w0mzmfq8j8FMv+zdiZ4tbjnrYZC1s1jw yxVkRUO9otZSoHD2C5sJsXlzCs2CFEbmpU3iTa/WV/QMWAclbvubEZwKpVQAp05D tX9p0PTB8Zefuw84K2jcg0AuSPTDJLB7ojwdIEzYI6m/+O6N6pn9ymQ/TWzl0oyC XzPGnhELa6FTreIOfen75eHusDa3mcYwb1NntGFbSrouS/2KUpUusOBgDLC2P97x cHPv2ADOI4xPrMA8vMnVs7Sd8RT+hb61gBz37b/s+M5n74WwDLEpZdbEDD8xaHW9 wcYiiFnB53WXcUQdreQ6IbanDWH4MufpMeDQ72lXxrH+VvzMgfsMxwGCb1sUYy7h XRWyhhjEgagj4/w2WpqK =D+eo -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2016-10-25-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next * Finalize and enable dynamic queue allocation; * Use dev_coredumpmsg() to prevent locking the driver; * Small fix to pass the AID to the FW; * Use FW PS decisions with multi-queue;
This commit is contained in:
commit
3f8247c8c4
@ -293,6 +293,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
|
||||
* is supported.
|
||||
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
|
||||
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
|
||||
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
|
||||
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
|
||||
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
|
||||
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
|
||||
@ -342,6 +343,7 @@ enum iwl_ucode_tlv_capa {
|
||||
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
|
||||
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
|
||||
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
|
||||
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
|
||||
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
|
||||
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
|
||||
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
|
||||
|
@ -474,4 +474,30 @@ struct iwl_mvm_internal_rxq_notif {
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* enum iwl_mvm_pm_event - type of station PM event
|
||||
* @IWL_MVM_PM_EVENT_AWAKE: station woke up
|
||||
* @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep
|
||||
* @IWL_MVM_PM_EVENT_UAPSD: station sent uAPSD trigger
|
||||
* @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll
|
||||
*/
|
||||
enum iwl_mvm_pm_event {
|
||||
IWL_MVM_PM_EVENT_AWAKE,
|
||||
IWL_MVM_PM_EVENT_ASLEEP,
|
||||
IWL_MVM_PM_EVENT_UAPSD,
|
||||
IWL_MVM_PM_EVENT_PS_POLL,
|
||||
}; /* PEER_PM_NTFY_API_E_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_pm_state_notification - station PM state notification
|
||||
* @sta_id: station ID of the station changing state
|
||||
* @type: the new powersave state, see IWL_MVM_PM_EVENT_ above
|
||||
*/
|
||||
struct iwl_mvm_pm_state_notification {
|
||||
u8 sta_id;
|
||||
u8 type;
|
||||
/* private: */
|
||||
u16 reserved;
|
||||
} __packed; /* PEER_PM_NTFY_API_S_VER_1 */
|
||||
|
||||
#endif /* __fw_api_rx_h__ */
|
||||
|
@ -179,7 +179,7 @@ enum iwl_sta_key_flag {
|
||||
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
|
||||
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
|
||||
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
|
||||
* @STA_MODIFY_TX_RATE: unused
|
||||
* @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs
|
||||
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
|
||||
* @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
|
||||
* @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
|
||||
@ -189,7 +189,7 @@ enum iwl_sta_key_flag {
|
||||
enum iwl_sta_modify_flag {
|
||||
STA_MODIFY_QUEUE_REMOVAL = BIT(0),
|
||||
STA_MODIFY_TID_DISABLE_TX = BIT(1),
|
||||
STA_MODIFY_TX_RATE = BIT(2),
|
||||
STA_MODIFY_UAPSD_ACS = BIT(2),
|
||||
STA_MODIFY_ADD_BA_TID = BIT(3),
|
||||
STA_MODIFY_REMOVE_BA_TID = BIT(4),
|
||||
STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
|
||||
@ -353,6 +353,8 @@ struct iwl_mvm_add_sta_cmd_v7 {
|
||||
* @beamform_flags: beam forming controls
|
||||
* @tfd_queue_msk: tfd queues used by this station
|
||||
* @rx_ba_window: aggregation window size
|
||||
* @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means
|
||||
* that the queues used by this station are in the first 32.
|
||||
*
|
||||
* The device contains an internal table of per-station information, with info
|
||||
* on security keys, aggregation parameters, and Tx rates for initial Tx
|
||||
@ -382,7 +384,8 @@ struct iwl_mvm_add_sta_cmd {
|
||||
__le16 beamform_flags;
|
||||
__le32 tfd_queue_msk;
|
||||
__le16 rx_ba_window;
|
||||
__le16 reserved;
|
||||
u8 scd_queue_bank;
|
||||
u8 uapsd_trigger_acs;
|
||||
} __packed; /* ADD_STA_CMD_API_S_VER_8 */
|
||||
|
||||
/**
|
||||
|
@ -332,6 +332,7 @@ enum iwl_data_path_subcmd_ids {
|
||||
DQA_ENABLE_CMD = 0x0,
|
||||
UPDATE_MU_GROUPS_CMD = 0x1,
|
||||
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
|
||||
STA_PM_NOTIF = 0xFD,
|
||||
MU_GROUP_MGMT_NOTIF = 0xFE,
|
||||
RX_QUEUES_NOTIFICATION = 0xFF,
|
||||
};
|
||||
|
@ -70,49 +70,6 @@
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-csr.h"
|
||||
|
||||
static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
|
||||
void *data, size_t datalen)
|
||||
{
|
||||
const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
|
||||
ssize_t bytes_read;
|
||||
ssize_t bytes_read_trans;
|
||||
|
||||
if (offset < dump_ptrs->op_mode_len) {
|
||||
bytes_read = min_t(ssize_t, count,
|
||||
dump_ptrs->op_mode_len - offset);
|
||||
memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
|
||||
bytes_read);
|
||||
offset += bytes_read;
|
||||
count -= bytes_read;
|
||||
|
||||
if (count == 0)
|
||||
return bytes_read;
|
||||
} else {
|
||||
bytes_read = 0;
|
||||
}
|
||||
|
||||
if (!dump_ptrs->trans_ptr)
|
||||
return bytes_read;
|
||||
|
||||
offset -= dump_ptrs->op_mode_len;
|
||||
bytes_read_trans = min_t(ssize_t, count,
|
||||
dump_ptrs->trans_ptr->len - offset);
|
||||
memcpy(buffer + bytes_read,
|
||||
(u8 *)dump_ptrs->trans_ptr->data + offset,
|
||||
bytes_read_trans);
|
||||
|
||||
return bytes_read + bytes_read_trans;
|
||||
}
|
||||
|
||||
static void iwl_mvm_free_coredump(void *data)
|
||||
{
|
||||
const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
|
||||
|
||||
vfree(fw_error_dump->op_mode_ptr);
|
||||
vfree(fw_error_dump->trans_ptr);
|
||||
kfree(fw_error_dump);
|
||||
}
|
||||
|
||||
#define RADIO_REG_MAX_READ 0x2ad
|
||||
static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
|
||||
struct iwl_fw_error_dump_data **dump_data)
|
||||
@ -491,6 +448,43 @@ static u32 iwl_dump_prph(struct iwl_trans *trans,
|
||||
return prph_len;
|
||||
}
|
||||
|
||||
/*
|
||||
* alloc_sgtable - allocates scallerlist table in the given size,
|
||||
* fills it with pages and returns it
|
||||
* @size: the size (in bytes) of the table
|
||||
*/
|
||||
static struct scatterlist *alloc_sgtable(int size)
|
||||
{
|
||||
int alloc_size, nents, i;
|
||||
struct page *new_page;
|
||||
struct scatterlist *iter;
|
||||
struct scatterlist *table;
|
||||
|
||||
nents = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||
table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return NULL;
|
||||
sg_init_table(table, nents);
|
||||
iter = table;
|
||||
for_each_sg(table, iter, sg_nents(table), i) {
|
||||
new_page = alloc_page(GFP_KERNEL);
|
||||
if (!new_page) {
|
||||
/* release all previous allocated pages in the table */
|
||||
iter = table;
|
||||
for_each_sg(table, iter, sg_nents(table), i) {
|
||||
new_page = sg_page(iter);
|
||||
if (new_page)
|
||||
__free_page(new_page);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
alloc_size = min_t(int, size, PAGE_SIZE);
|
||||
size -= PAGE_SIZE;
|
||||
sg_set_page(iter, new_page, alloc_size, 0);
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_fw_error_dump_file *dump_file;
|
||||
@ -499,6 +493,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
struct iwl_fw_error_dump_mem *dump_mem;
|
||||
struct iwl_fw_error_dump_trigger_desc *dump_trig;
|
||||
struct iwl_mvm_dump_ptrs *fw_error_dump;
|
||||
struct scatterlist *sg_dump_data;
|
||||
u32 sram_len, sram_ofs;
|
||||
struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
|
||||
mvm->fw->dbg_mem_tlv;
|
||||
@ -815,8 +810,23 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
file_len += fw_error_dump->trans_ptr->len;
|
||||
dump_file->file_len = cpu_to_le32(file_len);
|
||||
|
||||
dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
|
||||
GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
|
||||
sg_dump_data = alloc_sgtable(file_len);
|
||||
if (sg_dump_data) {
|
||||
sg_pcopy_from_buffer(sg_dump_data,
|
||||
sg_nents(sg_dump_data),
|
||||
fw_error_dump->op_mode_ptr,
|
||||
fw_error_dump->op_mode_len, 0);
|
||||
sg_pcopy_from_buffer(sg_dump_data,
|
||||
sg_nents(sg_dump_data),
|
||||
fw_error_dump->trans_ptr->data,
|
||||
fw_error_dump->trans_ptr->len,
|
||||
fw_error_dump->op_mode_len);
|
||||
dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
vfree(fw_error_dump->op_mode_ptr);
|
||||
vfree(fw_error_dump->trans_ptr);
|
||||
kfree(fw_error_dump);
|
||||
|
||||
out:
|
||||
iwl_mvm_free_fw_dump_desc(mvm);
|
||||
|
@ -499,23 +499,21 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If DQA is supported - queues will be enabled when needed */
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
return 0;
|
||||
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_TX_FIFO_VO, 0,
|
||||
wdg_timeout);
|
||||
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
|
||||
break;
|
||||
case NL80211_IFTYPE_AP:
|
||||
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
|
||||
/* fall through */
|
||||
default:
|
||||
/* If DQA is supported - queues will be enabled when needed */
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
break;
|
||||
|
||||
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
|
||||
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
|
||||
vif->hw_queue[ac],
|
||||
@ -899,9 +897,11 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
|
||||
|
||||
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++)
|
||||
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
|
||||
tfd_queue_msk |= BIT(vif->hw_queue[i]);
|
||||
if (!iwl_mvm_is_dqa_supported(mvm)) {
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++)
|
||||
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
|
||||
tfd_queue_msk |= BIT(vif->hw_queue[i]);
|
||||
}
|
||||
|
||||
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
|
||||
MAC_FILTER_IN_CONTROL_AND_MGMT |
|
||||
|
@ -445,6 +445,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
|
||||
if (iwl_mvm_has_new_rx_api(mvm))
|
||||
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF))
|
||||
ieee80211_hw_set(hw, AP_LINK_PS);
|
||||
|
||||
if (mvm->trans->num_rx_queues > 1)
|
||||
ieee80211_hw_set(hw, USES_RSS);
|
||||
@ -2097,6 +2099,22 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
||||
if (ret)
|
||||
goto out_unbind;
|
||||
|
||||
/* enable the multicast queue, now that we have a station for it */
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
||||
.sta_id = mvmvif->bcast_sta.sta_id,
|
||||
.tid = IWL_MAX_TID_COUNT,
|
||||
.aggregate = false,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||
&cfg, wdg_timeout);
|
||||
}
|
||||
|
||||
/* must be set before quota calculations */
|
||||
mvmvif->ap_ibss_active = true;
|
||||
|
||||
@ -2318,10 +2336,9 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
|
||||
tids, more_data, true);
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
enum sta_notify_cmd cmd,
|
||||
struct ieee80211_sta *sta)
|
||||
static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
||||
enum sta_notify_cmd cmd,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
@ -2374,6 +2391,67 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
enum sta_notify_cmd cmd,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
__iwl_mvm_mac_sta_notify(hw, cmd, sta);
|
||||
}
|
||||
|
||||
void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
|
||||
|
||||
if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
sta = mvm->fw_id_to_mac_id[notif->sta_id];
|
||||
if (WARN_ON(IS_ERR_OR_NULL(sta))) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
if (!mvmsta->vif ||
|
||||
mvmsta->vif->type != NL80211_IFTYPE_AP) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (mvmsta->sleeping != sleeping) {
|
||||
mvmsta->sleeping = sleeping;
|
||||
__iwl_mvm_mac_sta_notify(mvm->hw,
|
||||
sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
|
||||
sta);
|
||||
ieee80211_sta_ps_transition(sta, sleeping);
|
||||
}
|
||||
|
||||
if (sleeping) {
|
||||
switch (notif->type) {
|
||||
case IWL_MVM_PM_EVENT_AWAKE:
|
||||
case IWL_MVM_PM_EVENT_ASLEEP:
|
||||
break;
|
||||
case IWL_MVM_PM_EVENT_UAPSD:
|
||||
ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
|
||||
break;
|
||||
case IWL_MVM_PM_EVENT_PS_POLL:
|
||||
ieee80211_sta_pspoll(sta);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
|
@ -1111,9 +1111,8 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
|
||||
|
||||
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
/* Make sure DQA isn't allowed in driver until feature is complete */
|
||||
return false && fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
|
||||
return fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
|
||||
@ -1418,6 +1417,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
|
||||
|
@ -306,6 +306,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
||||
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
|
||||
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
|
||||
iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
|
||||
RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
|
||||
iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
|
||||
};
|
||||
#undef RX_HANDLER
|
||||
#undef RX_HANDLER_GRP
|
||||
@ -452,6 +454,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
|
||||
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
|
||||
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
|
||||
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
|
||||
HCMD_NAME(STA_PM_NOTIF),
|
||||
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
|
||||
HCMD_NAME(RX_QUEUES_NOTIFICATION),
|
||||
};
|
||||
|
@ -202,6 +202,20 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
|
||||
add_sta_cmd.station_flags |=
|
||||
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
|
||||
add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
|
||||
|
||||
if (sta->wme) {
|
||||
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
|
||||
|
||||
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
|
||||
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
|
||||
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
|
||||
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
|
||||
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
|
||||
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
|
||||
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
|
||||
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
|
||||
}
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
|
||||
@ -875,12 +889,17 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
|
||||
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
else
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
|
||||
queue, tid);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].txq_tid = tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
|
||||
queue, tid);
|
||||
}
|
||||
|
||||
static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
|
||||
@ -1010,6 +1029,7 @@ static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
|
||||
local_bh_disable();
|
||||
spin_lock(&mvmsta->lock);
|
||||
skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
|
||||
mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
|
||||
spin_unlock(&mvmsta->lock);
|
||||
|
||||
while ((skb = __skb_dequeue(&deferred_tx)))
|
||||
@ -1489,12 +1509,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
||||
|
||||
/* If DQA is supported - the queues can be disabled now */
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
|
||||
|
||||
/* If there is a TXQ still marked as reserved - free it */
|
||||
if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
|
||||
u8 reserved_txq = mvm_sta->reserved_queue;
|
||||
enum iwl_mvm_queue_status *status;
|
||||
|
||||
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
|
||||
|
||||
/*
|
||||
* If no traffic has gone through the reserved TXQ - it
|
||||
* is still marked as IWL_MVM_QUEUE_RESERVED, and
|
||||
|
@ -436,6 +436,7 @@ struct iwl_mvm_sta {
|
||||
|
||||
bool disable_tx;
|
||||
bool tlc_amsdu;
|
||||
bool sleeping;
|
||||
u8 agg_tids;
|
||||
u8 sleep_tx_count;
|
||||
u8 avg_energy;
|
||||
|
@ -1598,6 +1598,29 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
static const char *queue_name(struct device *dev,
|
||||
struct iwl_trans_pcie *trans_p, int i)
|
||||
{
|
||||
if (trans_p->shared_vec_mask) {
|
||||
int vec = trans_p->shared_vec_mask &
|
||||
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
|
||||
|
||||
if (i == 0)
|
||||
return DRV_NAME ": shared IRQ";
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL,
|
||||
DRV_NAME ": queue %d", i + vec);
|
||||
}
|
||||
if (i == 0)
|
||||
return DRV_NAME ": default queue";
|
||||
|
||||
if (i == trans_p->alloc_vecs - 1)
|
||||
return DRV_NAME ": exception";
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL,
|
||||
DRV_NAME ": queue %d", i);
|
||||
}
|
||||
|
||||
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
|
||||
struct iwl_trans_pcie *trans_pcie)
|
||||
{
|
||||
@ -1606,6 +1629,10 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
|
||||
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
|
||||
int ret;
|
||||
struct msix_entry *msix_entry;
|
||||
const char *qname = queue_name(&pdev->dev, trans_pcie, i);
|
||||
|
||||
if (!qname)
|
||||
return -ENOMEM;
|
||||
|
||||
msix_entry = &trans_pcie->msix_entries[i];
|
||||
ret = devm_request_threaded_irq(&pdev->dev,
|
||||
@ -1615,7 +1642,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
|
||||
iwl_pcie_irq_msix_handler :
|
||||
iwl_pcie_irq_rx_msix_handler,
|
||||
IRQF_SHARED,
|
||||
DRV_NAME,
|
||||
qname,
|
||||
msix_entry);
|
||||
if (ret) {
|
||||
IWL_ERR(trans_pcie->trans,
|
||||
|
Loading…
Reference in New Issue
Block a user