Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

This commit is contained in:
John W. Linville 2013-08-09 15:07:23 -04:00
commit 2437f3c5d6
14 changed files with 247 additions and 219 deletions

View File

@ -93,7 +93,7 @@ struct iwl_cfg;
* 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
* capabilities advertized by the fw file (in TLV format).
* 2) The driver layer starts the op_mode (ops->start)
* 3) The op_mode registers registers mac80211
* 3) The op_mode registers mac80211
* 4) The op_mode is governed by mac80211
* 5) The driver layer stops the op_mode
*/
@ -112,7 +112,7 @@ struct iwl_cfg;
* @stop: stop the op_mode. Must free all the memory allocated.
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD the this Rx responds to.
* HCMD this Rx responds to.
* This callback may sleep, it is called from a threaded IRQ handler.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.

View File

@ -180,7 +180,7 @@ struct iwl_rx_packet {
* enum CMD_MODE - how to send the host commands ?
*
* @CMD_SYNC: The caller will be stalled until the fw responds to the command
* @CMD_ASYNC: Return right away and don't want for the response
* @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
*/
@ -218,7 +218,7 @@ struct iwl_device_cmd {
*
* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
* rather copies it to an previously allocated DMA buffer. This flag tells
* rather copies it to a previously allocated DMA buffer. This flag tells
* the transport layer not to copy the command, but to map the existing
* buffer (that is passed in) instead. This saves the memcpy and allows
* commands that are bigger than the fixed buffer to be submitted.
@ -243,7 +243,7 @@ enum iwl_hcmd_dataflag {
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
* @len: array of the lenths of the chunks in data
* @len: array of the lengths of the chunks in data
* @dataflags: IWL_HCMD_DFL_*
* @id: id of the host command
*/
@ -396,8 +396,6 @@ struct iwl_trans;
* May sleep
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
* @suspend: stop the device unless WoWLAN is configured
* @resume: resume activity of the device
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
@ -443,10 +441,7 @@ struct iwl_trans_ops {
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
#ifdef CONFIG_PM_SLEEP
int (*suspend)(struct iwl_trans *trans);
int (*resume)(struct iwl_trans *trans);
#endif
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
@ -700,18 +695,6 @@ static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
return trans->ops->dbgfs_register(trans, dir);
}
#ifdef CONFIG_PM_SLEEP
static inline int iwl_trans_suspend(struct iwl_trans *trans)
{
return trans->ops->suspend(trans);
}
static inline int iwl_trans_resume(struct iwl_trans *trans)
{
return trans->ops->resume(trans);
}
#endif
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trans->ops->write8(trans, ofs, val);

View File

@ -592,6 +592,142 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
}
#undef BT_MBOX_PRINT
#define PRINT_STATS_LE32(_str, _val) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \
le32_to_cpu(_val))
static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
static const char *fmt_table = "\t%-30s %10u\n";
static const char *fmt_header = "%-32s\n";
int pos = 0;
char *buf;
int ret;
int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
sizeof(struct mvm_statistics_rx_non_phy) * 10 +
sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
struct mvm_statistics_rx_phy *ofdm;
struct mvm_statistics_rx_phy *cck;
struct mvm_statistics_rx_non_phy *general;
struct mvm_statistics_rx_ht_phy *ht;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&mvm->mutex);
ofdm = &mvm->rx_stats.ofdm;
cck = &mvm->rx_stats.cck;
general = &mvm->rx_stats.general;
ht = &mvm->rx_stats.ofdm_ht;
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - OFDM");
PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt);
PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt);
PRINT_STATS_LE32("plcp_err", ofdm->plcp_err);
PRINT_STATS_LE32("crc32_err", ofdm->crc32_err);
PRINT_STATS_LE32("overrun_err", ofdm->overrun_err);
PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err);
PRINT_STATS_LE32("crc32_good", ofdm->crc32_good);
PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt);
PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt);
PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout);
PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout);
PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts);
PRINT_STATS_LE32("rxe_frame_lmt_overrun",
ofdm->rxe_frame_limit_overrun);
PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt);
PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt);
PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt);
PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill);
PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err);
PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum);
PRINT_STATS_LE32("reserved", ofdm->reserved);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - CCK");
PRINT_STATS_LE32("ina_cnt", cck->ina_cnt);
PRINT_STATS_LE32("fina_cnt", cck->fina_cnt);
PRINT_STATS_LE32("plcp_err", cck->plcp_err);
PRINT_STATS_LE32("crc32_err", cck->crc32_err);
PRINT_STATS_LE32("overrun_err", cck->overrun_err);
PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err);
PRINT_STATS_LE32("crc32_good", cck->crc32_good);
PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt);
PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt);
PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout);
PRINT_STATS_LE32("fina_timeout", cck->fina_timeout);
PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts);
PRINT_STATS_LE32("rxe_frame_lmt_overrun",
cck->rxe_frame_limit_overrun);
PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt);
PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt);
PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt);
PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill);
PRINT_STATS_LE32("mh_format_err", cck->mh_format_err);
PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum);
PRINT_STATS_LE32("reserved", cck->reserved);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - GENERAL");
PRINT_STATS_LE32("bogus_cts", general->bogus_cts);
PRINT_STATS_LE32("bogus_ack", general->bogus_ack);
PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames);
PRINT_STATS_LE32("filtered_frames", general->filtered_frames);
PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons);
PRINT_STATS_LE32("channel_beacons", general->channel_beacons);
PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon);
PRINT_STATS_LE32("adc_rx_saturation_time",
general->adc_rx_saturation_time);
PRINT_STATS_LE32("ina_detection_search_time",
general->ina_detection_search_time);
PRINT_STATS_LE32("beacon_silence_rssi_a",
general->beacon_silence_rssi_a);
PRINT_STATS_LE32("beacon_silence_rssi_b",
general->beacon_silence_rssi_b);
PRINT_STATS_LE32("beacon_silence_rssi_c",
general->beacon_silence_rssi_c);
PRINT_STATS_LE32("interference_data_flag",
general->interference_data_flag);
PRINT_STATS_LE32("channel_load", general->channel_load);
PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms);
PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a);
PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b);
PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c);
PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - HT");
PRINT_STATS_LE32("plcp_err", ht->plcp_err);
PRINT_STATS_LE32("overrun_err", ht->overrun_err);
PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err);
PRINT_STATS_LE32("crc32_good", ht->crc32_good);
PRINT_STATS_LE32("crc32_err", ht->crc32_err);
PRINT_STATS_LE32("mh_format_err", ht->mh_format_err);
PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good);
PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt);
PRINT_STATS_LE32("agg_cnt", ht->agg_cnt);
PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs);
mutex_unlock(&mvm->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
#undef PRINT_STAT_LE32
static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@ -924,6 +1060,7 @@ MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
@ -947,6 +1084,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);

View File

@ -508,7 +508,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
/* Allocate resources for the MAC context, and add it the the fw */
/* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
if (ret)
goto out_unlock;
@ -569,6 +569,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mode(mvm, vif);
/* beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
if (!mvm->bf_allowed_vif &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
@ -576,10 +580,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
}
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_release;
/*
* P2P_DEVICE interface does not have a channel context assigned to it,
* so a dedicated PHY context is allocated to it and the corresponding
@ -590,7 +590,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
if (!mvmvif->phy_ctxt) {
ret = -ENOSPC;
goto out_remove_mac;
goto out_free_bf;
}
iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
@ -614,6 +614,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
out_unref_phy:
iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
out_free_bf:
if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL;
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
}
out_remove_mac:
mvmvif->phy_ctxt = NULL;
iwl_mvm_mac_ctxt_remove(mvm, vif);

View File

@ -421,6 +421,8 @@ struct iwl_mvm {
struct iwl_notif_wait_data notif_wait;
struct mvm_statistics_rx rx_stats;
unsigned long transport_queue_stop;
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];

View File

@ -440,6 +440,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
else
mvm->pm_ops = &pm_legacy_ops;
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
return op_mode;
out_unregister:

View File

@ -131,7 +131,7 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd;
struct iwl_time_quota_cmd cmd = {};
int i, idx, ret, num_active_macs, quota, quota_rem;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
@ -139,15 +139,14 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
.new_vif = newvif,
};
lockdep_assert_held(&mvm->mutex);
/* update all upon completion */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return 0;
BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1);
lockdep_assert_held(&mvm->mutex);
memset(&cmd, 0, sizeof(cmd));
/* iterator data above must match */
BUILD_BUG_ON(MAX_BINDINGS != 4);
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,

View File

@ -56,24 +56,30 @@
#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
static u8 rs_ht_to_legacy[] = {
IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
IWL_RATE_6M_INDEX,
IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
[IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
[IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
[IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
[IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
[IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
[IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
[IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
[IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
[IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
[IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
[IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
[IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
[IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
};
static const u8 ant_toggle_lookup[] = {
/*ANT_NONE -> */ ANT_NONE,
/*ANT_A -> */ ANT_B,
/*ANT_B -> */ ANT_C,
/*ANT_AB -> */ ANT_BC,
/*ANT_C -> */ ANT_A,
/*ANT_AC -> */ ANT_AB,
/*ANT_BC -> */ ANT_AC,
/*ANT_ABC -> */ ANT_ABC,
[ANT_NONE] = ANT_NONE,
[ANT_A] = ANT_B,
[ANT_B] = ANT_C,
[ANT_AB] = ANT_BC,
[ANT_C] = ANT_A,
[ANT_AC] = ANT_AB,
[ANT_BC] = ANT_AC,
[ANT_ABC] = ANT_ABC,
};
#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
@ -260,82 +266,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
return (ant_type & valid_antenna) == ant_type;
}
/*
* removes the old data from the statistics. All data that is older than
* TID_MAX_TIME_DIFF, will be deleted.
*/
static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
{
/* The oldest age we want to keep */
u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
while (tl->queue_count &&
(tl->time_stamp < oldest_time)) {
tl->total -= tl->packet_count[tl->head];
tl->packet_count[tl->head] = 0;
tl->time_stamp += TID_QUEUE_CELL_SPACING;
tl->queue_count--;
tl->head++;
if (tl->head >= TID_QUEUE_MAX_SIZE)
tl->head = 0;
}
}
/*
* increment traffic load value for tid and also remove
* any old values if passed the certain time period
*/
static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
struct ieee80211_hdr *hdr)
{
u32 curr_time = jiffies_to_msecs(jiffies);
u32 time_diff;
s32 index;
struct iwl_traffic_load *tl = NULL;
u8 tid;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
} else {
return IWL_MAX_TID_COUNT;
}
if (unlikely(tid >= IWL_MAX_TID_COUNT))
return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
curr_time -= curr_time % TID_ROUND_VALUE;
/* Happens only for the first packet. Initialize the data */
if (!(tl->queue_count)) {
tl->total = 1;
tl->time_stamp = curr_time;
tl->queue_count = 1;
tl->head = 0;
tl->packet_count[0] = 1;
return IWL_MAX_TID_COUNT;
}
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
index = time_diff / TID_QUEUE_CELL_SPACING;
/* The history is too long: remove data that is older than */
/* TID_MAX_TIME_DIFF */
if (index >= TID_QUEUE_MAX_SIZE)
rs_tl_rm_old_stats(tl, curr_time);
index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
tl->packet_count[index] = tl->packet_count[index] + 1;
tl->total = tl->total + 1;
if ((index + 1) > tl->queue_count)
tl->queue_count = index + 1;
return tid;
}
#ifdef CONFIG_MAC80211_DEBUGFS
/**
* Program the device to use fixed rate for frame transmit
@ -361,45 +291,11 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
}
#endif
/*
get the traffic load value for tid
*/
static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
{
u32 curr_time = jiffies_to_msecs(jiffies);
u32 time_diff;
s32 index;
struct iwl_traffic_load *tl = NULL;
if (tid >= IWL_MAX_TID_COUNT)
return 0;
tl = &(lq_data->load[tid]);
curr_time -= curr_time % TID_ROUND_VALUE;
if (!(tl->queue_count))
return 0;
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
index = time_diff / TID_QUEUE_CELL_SPACING;
/* The history is too long: remove data that is older than */
/* TID_MAX_TIME_DIFF */
if (index >= TID_QUEUE_MAX_SIZE)
rs_tl_rm_old_stats(tl, curr_time);
return tl->total;
}
static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
int ret = -EAGAIN;
u32 load;
load = rs_tl_get_load(lq_data, tid);
/*
* Don't create TX aggregation sessions when in high
@ -2086,6 +1982,22 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
}
static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
struct ieee80211_hdr *hdr)
{
u8 tid = IWL_MAX_TID_COUNT;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
}
if (unlikely(tid > IWL_MAX_TID_COUNT))
tid = IWL_MAX_TID_COUNT;
return tid;
}
/*
* Do rate scaling and search for new modulation mode.
*/
@ -2129,7 +2041,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
tid = rs_tl_add_packet(lq_sta, hdr);
tid = rs_get_tid(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
tid_data = &sta_priv->tid_data[tid];

View File

@ -290,17 +290,6 @@ struct iwl_scale_tbl_info {
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
struct iwl_traffic_load {
unsigned long time_stamp; /* age of the oldest statistics */
u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
* slice */
u32 total; /* total num of packets during the
* last TID_MAX_TIME_DIFF */
u8 queue_count; /* number of queues that has
* been used since the last cleanup */
u8 head; /* start of the circular buffer */
};
/**
* struct iwl_lq_sta -- driver's rate scaling private structure
*
@ -337,7 +326,6 @@ struct iwl_lq_sta {
struct iwl_lq_cmd lq;
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;

View File

@ -167,6 +167,9 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
/*
* iwl_mvm_get_signal_strength - use new rx PHY INFO API
* values are reported by the fw as positive values - need to negate
* to obtain their dBM. Account for missing antennas by replacing 0
* values by -256dBm: practically 0 power and a non-feasible 8 bit value.
*/
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
struct iwl_rx_phy_info *phy_info,
@ -177,12 +180,15 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
val =
le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
energy_a = -((val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
IWL_RX_INFO_ENERGY_ANT_A_POS);
energy_b = -((val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
IWL_RX_INFO_ENERGY_ANT_B_POS);
energy_c = -((val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
IWL_RX_INFO_ENERGY_ANT_C_POS);
energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
IWL_RX_INFO_ENERGY_ANT_A_POS;
energy_a = energy_a ? -energy_a : -256;
energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
IWL_RX_INFO_ENERGY_ANT_B_POS;
energy_b = energy_b ? -energy_b : -256;
energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
IWL_RX_INFO_ENERGY_ANT_C_POS;
energy_c = energy_c ? -energy_c : -256;
max_energy = max(energy_a, energy_b);
max_energy = max(max_energy, energy_c);
@ -378,6 +384,18 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
struct iwl_notif_statistics *stats)
{
/*
* NOTE FW aggregates the statistics - BUT the statistics are cleared
* when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
* bit set.
*/
lockdep_assert_held(&mvm->mutex);
memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
}
/*
* iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
*
@ -396,6 +414,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
mvm->temperature = le32_to_cpu(common->temperature);
iwl_mvm_tt_handler(mvm);
}
iwl_mvm_update_rx_statistics(mvm, stats);
return 0;
}

View File

@ -173,7 +173,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
}
/*
* for data packets, rate info comes from the table inside he fw. This
* for data packets, rate info comes from the table inside the fw. This
* table is controlled by LINK_QUALITY commands
*/

View File

@ -368,21 +368,19 @@ static void iwl_pci_remove(struct pci_dev *pdev)
static int iwl_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
return iwl_trans_suspend(iwl_trans);
return 0;
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
struct iwl_trans *trans = pci_get_drvdata(pdev);
bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@ -395,7 +393,15 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
return iwl_trans_resume(iwl_trans);
if (!trans->op_mode)
return 0;
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return 0;
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);

View File

@ -820,25 +820,6 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
}
#ifdef CONFIG_PM_SLEEP
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
return 0;
}
static int iwl_trans_pcie_resume(struct iwl_trans *trans)
{
bool hw_rfkill;
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
unsigned long *flags)
{
@ -1380,10 +1361,6 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
#ifdef CONFIG_PM_SLEEP
.suspend = iwl_trans_pcie_suspend,
.resume = iwl_trans_pcie_resume,
#endif
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
.read32 = iwl_trans_pcie_read32,

View File

@ -451,13 +451,10 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return -EINVAL;
}
if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
if (WARN(addr & ~IWL_TX_DMA_MASK,
"Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
if (unlikely(addr & ~IWL_TX_DMA_MASK))
IWL_ERR(trans, "Unaligned address = %llx\n",
(unsigned long long)addr);
iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
@ -1153,10 +1150,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
/*
* iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
* @cmd: a point to the ucode command structure
* @cmd: a pointer to the ucode command structure
*
* The function returns < 0 values to indicate the operation is
* failed. On success, it turns the index (> 0) of command in the
* The function returns < 0 values to indicate the operation
* failed. On success, it returns the index (>= 0) of command in the
* command queue.
*/
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
@ -1631,7 +1628,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* Check here that the packets are in the right place on the ring.
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
WARN_ONCE(trans_pcie->txq[txq_id].ampdu &&
WARN_ONCE(txq->ampdu &&
(wifi_seq & 0xff) != q->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
txq_id, wifi_seq, q->write_ptr);
@ -1663,7 +1660,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
tb1_len = (len + 3) & ~3;
tb1_len = ALIGN(len, 4);
/* Tell NIC about any 2-byte padding after MAC header */
if (tb1_len != len)