2019-06-04 15:11:33 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2007-07-27 20:43:22 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2002-2005, Instant802 Networks, Inc.
|
|
|
|
* Copyright 2005-2006, Devicescape Software, Inc.
|
|
|
|
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
|
|
|
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
|
2014-09-03 19:24:57 +07:00
|
|
|
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
2020-02-21 16:45:45 +07:00
|
|
|
* Copyright (C) 2018-2020 Intel Corporation
|
2007-07-27 20:43:22 +07:00
|
|
|
*
|
|
|
|
* Transmit and frame generation functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/skbuff.h>
|
2016-11-22 17:52:18 +07:00
|
|
|
#include <linux/if_vlan.h>
|
2007-07-27 20:43:22 +07:00
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/bitmap.h>
|
[MAC80211]: fix race conditions with keys
During receive processing, we select the key long before using it and
because there's no locking it is possible that we kfree() the key
after having selected it but before using it for crypto operations.
Obviously, this is bad.
Secondly, during transmit processing, there are two possible races: We
have a similar race between select_key() and using it for encryption,
but we also have a race here between select_key() and hardware
encryption (both when a key is removed.)
This patch solves these issues by using RCU: when a key is to be freed,
we first remove the pointer from the appropriate places (sdata->keys,
sdata->default_key, sta->key) using rcu_assign_pointer() and then
synchronize_rcu(). Then, we can safely kfree() the key and remove it
from the hardware. There's a window here where the hardware may still
be using it for decryption, but we can't work around that without having
two hardware callbacks, one to disable the key for RX and one to disable
it for TX; but the worst thing that will happen is that we receive a
packet decrypted that we don't find a key for any more and then drop it.
When we add a key, we first need to upload it to the hardware and then,
using rcu_assign_pointer() again, link it into our structures.
In the code using keys (TX/RX paths) we use rcu_dereference() to get the
key and enclose the whole tx/rx section in a rcu_read_lock() ...
rcu_read_unlock() block. Because we've uploaded the key to hardware
before linking it into internal structures, we can guarantee that it is
valid once get to into tx().
One possible race condition remains, however: when we have hardware
acceleration enabled and the driver shuts down the queues, we end up
queueing the frame. If now somebody removes the key, the key will be
removed from hwaccel and then then driver will be asked to encrypt the
frame with a key index that has been removed. Hence, drivers will need
to be aware that the hw_key_index they are passed might not be under
all circumstances. Most drivers will, however, simply ignore that
condition and encrypt the frame with the selected key anyway, this
only results in a frame being encrypted with a wrong key or dropped
(rightfully) because the key was not valid. There isn't much we can
do about it unless we want to walk the pending frame queue every time
a key is removed and remove all frames that used it.
This race condition, however, will most likely be solved once we add
multiqueue support to mac80211 because then frames will be queued
further up the stack instead of after being processed.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-14 22:10:24 +07:00
|
|
|
#include <linux/rcupdate.h>
|
2011-07-15 22:47:34 +07:00
|
|
|
#include <linux/export.h>
|
2007-09-18 01:56:21 +07:00
|
|
|
#include <net/net_namespace.h>
|
2007-07-27 20:43:22 +07:00
|
|
|
#include <net/ieee80211_radiotap.h>
|
|
|
|
#include <net/cfg80211.h>
|
|
|
|
#include <net/mac80211.h>
|
2016-05-19 15:37:51 +07:00
|
|
|
#include <net/codel.h>
|
|
|
|
#include <net/codel_impl.h>
|
2007-07-27 20:43:22 +07:00
|
|
|
#include <asm/unaligned.h>
|
2016-05-19 15:37:49 +07:00
|
|
|
#include <net/fq_impl.h>
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
#include "ieee80211_i.h"
|
2009-04-23 23:52:52 +07:00
|
|
|
#include "driver-ops.h"
|
2008-04-09 02:14:40 +07:00
|
|
|
#include "led.h"
|
2008-02-23 21:17:10 +07:00
|
|
|
#include "mesh.h"
|
2007-07-27 20:43:22 +07:00
|
|
|
#include "wep.h"
|
|
|
|
#include "wpa.h"
|
|
|
|
#include "wme.h"
|
2008-04-09 02:14:40 +07:00
|
|
|
#include "rate.h"
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
/* misc utils */
|
|
|
|
|
2015-04-22 22:10:38 +07:00
|
|
|
static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
|
|
|
|
{
|
|
|
|
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
|
|
|
|
|
|
|
|
u64_stats_update_begin(&tstats->syncp);
|
|
|
|
tstats->tx_packets++;
|
|
|
|
tstats->tx_bytes += len;
|
|
|
|
u64_stats_update_end(&tstats->syncp);
|
|
|
|
}
|
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
|
|
|
|
struct sk_buff *skb, int group_addr,
|
2008-06-25 18:36:27 +07:00
|
|
|
int next_frag_len)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2013-07-08 21:55:53 +07:00
|
|
|
int rate, mrate, erp, dur, i, shift = 0;
|
2008-05-15 17:55:27 +07:00
|
|
|
struct ieee80211_rate *txrate;
|
2007-07-27 20:43:22 +07:00
|
|
|
struct ieee80211_local *local = tx->local;
|
2008-01-25 01:38:38 +07:00
|
|
|
struct ieee80211_supported_band *sband;
|
2008-07-16 08:44:13 +07:00
|
|
|
struct ieee80211_hdr *hdr;
|
2011-11-16 21:28:55 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2013-07-08 21:55:53 +07:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
u32 rate_flags = 0;
|
|
|
|
|
2016-12-15 02:46:57 +07:00
|
|
|
/* assume HW handles this */
|
|
|
|
if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
|
|
|
|
return 0;
|
|
|
|
|
2013-07-08 21:55:53 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
|
|
|
|
if (chanctx_conf) {
|
|
|
|
shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
|
|
|
|
rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2008-10-21 17:40:02 +07:00
|
|
|
|
|
|
|
/* uh huh? */
|
2013-04-22 21:14:41 +07:00
|
|
|
if (WARN_ON_ONCE(tx->rate.idx < 0))
|
2008-10-21 17:40:02 +07:00
|
|
|
return 0;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2012-07-23 20:12:51 +07:00
|
|
|
sband = local->hw.wiphy->bands[info->band];
|
2013-04-22 21:14:41 +07:00
|
|
|
txrate = &sband->bitrates[tx->rate.idx];
|
2008-01-25 01:38:38 +07:00
|
|
|
|
2008-10-21 17:40:02 +07:00
|
|
|
erp = txrate->flags & IEEE80211_RATE_ERP_G;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* data and mgmt (except PS Poll):
|
|
|
|
* - during CFP: 32768
|
|
|
|
* - during contention period:
|
|
|
|
* if addr1 is group address: 0
|
|
|
|
* if more fragments = 0 and addr1 is individual address: time to
|
|
|
|
* transmit one ACK plus SIFS
|
|
|
|
* if more fragments = 1 and addr1 is individual address: time to
|
|
|
|
* transmit next fragment plus 2 x ACK plus 3 x SIFS
|
|
|
|
*
|
|
|
|
* IEEE 802.11, 9.6:
|
|
|
|
* - control response frame (CTS or ACK) shall be transmitted using the
|
|
|
|
* same rate as the immediately previous frame in the frame exchange
|
|
|
|
* sequence, if this rate belongs to the PHY mandatory rates, or else
|
|
|
|
* at the highest possible rate belonging to the PHY rates in the
|
|
|
|
* BSSBasicRateSet
|
|
|
|
*/
|
2011-11-16 21:28:55 +07:00
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
2008-07-16 08:44:13 +07:00
|
|
|
if (ieee80211_is_ctl(hdr->frame_control)) {
|
2007-07-27 20:43:22 +07:00
|
|
|
/* TODO: These control frames are not currently sent by
|
2008-09-11 05:01:56 +07:00
|
|
|
* mac80211, but should they be implemented, this function
|
2007-07-27 20:43:22 +07:00
|
|
|
* needs to be updated to support duration field calculation.
|
|
|
|
*
|
|
|
|
* RTS: time needed to transmit pending data/mgmt frame plus
|
|
|
|
* one CTS frame plus one ACK frame plus 3 x SIFS
|
|
|
|
* CTS: duration of immediately previous RTS minus time
|
|
|
|
* required to transmit CTS and its SIFS
|
|
|
|
* ACK: 0 if immediately previous directed data/mgmt had
|
|
|
|
* more=0, with more=1 duration in ACK frame is duration
|
|
|
|
* from previous frame minus time needed to transmit ACK
|
|
|
|
* and its SIFS
|
|
|
|
* PS Poll: BIT(15) | BIT(14) | aid
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* data/mgmt */
|
|
|
|
if (0 /* FIX: data/mgmt during CFP */)
|
2008-06-25 18:36:27 +07:00
|
|
|
return cpu_to_le16(32768);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
if (group_addr) /* Group address as the destination - no ACK */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Individual destination address:
|
|
|
|
* IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
|
|
|
|
* CTS and ACK frames shall be transmitted using the highest rate in
|
|
|
|
* basic rate set that is less than or equal to the rate of the
|
|
|
|
* immediately previous frame and that is using the same modulation
|
|
|
|
* (CCK or OFDM). If no basic rate set matches with these requirements,
|
|
|
|
* the highest mandatory rate of the PHY that is less than or equal to
|
|
|
|
* the rate of the previous frame is used.
|
|
|
|
* Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
|
|
|
|
*/
|
|
|
|
rate = -1;
|
2008-01-25 01:38:38 +07:00
|
|
|
/* use lowest available if everything fails */
|
|
|
|
mrate = sband->bitrates[0].bitrate;
|
|
|
|
for (i = 0; i < sband->n_bitrates; i++) {
|
|
|
|
struct ieee80211_rate *r = &sband->bitrates[i];
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-01-25 01:38:38 +07:00
|
|
|
if (r->bitrate > txrate->bitrate)
|
|
|
|
break;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2013-07-08 21:55:53 +07:00
|
|
|
if ((rate_flags & r->flags) != rate_flags)
|
|
|
|
continue;
|
|
|
|
|
2008-10-11 06:51:51 +07:00
|
|
|
if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
|
2013-07-08 21:55:53 +07:00
|
|
|
rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
|
2008-01-25 01:38:38 +07:00
|
|
|
|
|
|
|
switch (sband->band) {
|
2016-04-12 20:56:15 +07:00
|
|
|
case NL80211_BAND_2GHZ: {
|
2008-01-25 01:38:38 +07:00
|
|
|
u32 flag;
|
|
|
|
if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
|
|
|
|
flag = IEEE80211_RATE_MANDATORY_G;
|
|
|
|
else
|
|
|
|
flag = IEEE80211_RATE_MANDATORY_B;
|
|
|
|
if (r->flags & flag)
|
|
|
|
mrate = r->bitrate;
|
|
|
|
break;
|
|
|
|
}
|
2016-04-12 20:56:15 +07:00
|
|
|
case NL80211_BAND_5GHZ:
|
2019-08-02 18:30:58 +07:00
|
|
|
case NL80211_BAND_6GHZ:
|
2008-01-25 01:38:38 +07:00
|
|
|
if (r->flags & IEEE80211_RATE_MANDATORY_A)
|
|
|
|
mrate = r->bitrate;
|
|
|
|
break;
|
2020-06-02 13:22:47 +07:00
|
|
|
case NL80211_BAND_S1GHZ:
|
2016-04-12 20:56:15 +07:00
|
|
|
case NL80211_BAND_60GHZ:
|
2012-07-02 13:32:32 +07:00
|
|
|
/* TODO, for now fall through */
|
2016-04-12 20:56:15 +07:00
|
|
|
case NUM_NL80211_BANDS:
|
2008-01-25 01:38:38 +07:00
|
|
|
WARN_ON(1);
|
|
|
|
break;
|
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
if (rate == -1) {
|
|
|
|
/* No matching basic rate found; use highest suitable mandatory
|
|
|
|
* PHY rate */
|
2013-07-08 21:55:53 +07:00
|
|
|
rate = DIV_ROUND_UP(mrate, 1 << shift);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2011-11-22 03:34:30 +07:00
|
|
|
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
|
|
|
|
if (ieee80211_is_data_qos(hdr->frame_control) &&
|
2012-05-28 19:06:25 +07:00
|
|
|
*(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
|
2011-11-22 03:34:30 +07:00
|
|
|
dur = 0;
|
|
|
|
else
|
|
|
|
/* Time needed to transmit ACK
|
|
|
|
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
|
|
|
|
* to closest integer */
|
2012-04-11 13:47:56 +07:00
|
|
|
dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
|
2013-07-08 21:55:51 +07:00
|
|
|
tx->sdata->vif.bss_conf.use_short_preamble,
|
|
|
|
shift);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
if (next_frag_len) {
|
|
|
|
/* Frame is fragmented: duration increases with time needed to
|
|
|
|
* transmit next fragment plus ACK and 2 x SIFS. */
|
|
|
|
dur *= 2; /* ACK + SIFS */
|
|
|
|
/* next fragment */
|
2012-04-11 13:47:56 +07:00
|
|
|
dur += ieee80211_frame_duration(sband->band, next_frag_len,
|
2008-01-25 01:38:38 +07:00
|
|
|
txrate->bitrate, erp,
|
2013-07-08 21:55:51 +07:00
|
|
|
tx->sdata->vif.bss_conf.use_short_preamble,
|
|
|
|
shift);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2008-06-25 18:36:27 +07:00
|
|
|
return cpu_to_le16(dur);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* tx handlers */
|
2010-01-12 15:42:46 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
|
|
|
ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = tx->local;
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 15:42:53 +07:00
|
|
|
struct ieee80211_if_managed *ifmgd;
|
2018-09-05 12:06:14 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2010-01-12 15:42:46 +07:00
|
|
|
|
|
|
|
/* driver doesn't support power save */
|
2015-06-03 02:39:54 +07:00
|
|
|
if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
|
2010-01-12 15:42:46 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* hardware does dynamic power save */
|
2015-06-03 02:39:54 +07:00
|
|
|
if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
|
2010-01-12 15:42:46 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* dynamic power save disabled */
|
|
|
|
if (local->hw.conf.dynamic_ps_timeout <= 0)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* we are scanning, don't enable power save */
|
|
|
|
if (local->scanning)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
if (!local->ps_sdata)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* No point if we're going to suspend */
|
|
|
|
if (local->quiescing)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 15:42:53 +07:00
|
|
|
/* dynamic ps is supported only in managed mode */
|
|
|
|
if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2018-09-05 12:06:14 +07:00
|
|
|
if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 15:42:53 +07:00
|
|
|
ifmgd = &tx->sdata->u.mgd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't wakeup from power save if u-apsd is enabled, voip ac has
|
|
|
|
* u-apsd enabled and the frame is in voip class. This effectively
|
|
|
|
* means that even if all access categories have u-apsd enabled, in
|
|
|
|
* practise u-apsd is only used with the voip ac. This is a
|
|
|
|
* workaround for the case when received voip class packets do not
|
|
|
|
* have correct qos tag for some reason, due the network or the
|
|
|
|
* peer application.
|
|
|
|
*
|
2012-03-14 21:15:03 +07:00
|
|
|
* Note: ifmgd->uapsd_queues access is racy here. If the value is
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 15:42:53 +07:00
|
|
|
* changed via debugfs, user needs to reassociate manually to have
|
|
|
|
* everything in sync.
|
|
|
|
*/
|
2012-03-27 19:18:37 +07:00
|
|
|
if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
|
|
|
|
(ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
|
|
|
|
skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 15:42:53 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2010-01-12 15:42:46 +07:00
|
|
|
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
|
|
|
|
ieee80211_stop_queues_by_reason(&local->hw,
|
2013-02-13 18:25:28 +07:00
|
|
|
IEEE80211_MAX_QUEUE_MAP,
|
2014-06-13 20:30:05 +07:00
|
|
|
IEEE80211_QUEUE_STOP_REASON_PS,
|
|
|
|
false);
|
2011-02-18 18:48:03 +07:00
|
|
|
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
|
2010-01-12 15:42:46 +07:00
|
|
|
ieee80211_queue_work(&local->hw,
|
|
|
|
&local->dynamic_ps_disable_work);
|
|
|
|
}
|
|
|
|
|
2011-05-04 01:40:08 +07:00
|
|
|
/* Don't restart the timer if we're not disassociated */
|
|
|
|
if (!ifmgd->associated)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2010-01-12 15:42:46 +07:00
|
|
|
mod_timer(&local->dynamic_ps_timer, jiffies +
|
|
|
|
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
|
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-06-30 20:10:44 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-02-25 22:27:43 +07:00
|
|
|
ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2008-07-16 08:44:13 +07:00
|
|
|
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2011-09-29 21:04:36 +07:00
|
|
|
bool assoc = false;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-05-15 17:55:29 +07:00
|
|
|
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-09-26 22:53:18 +07:00
|
|
|
|
mac80211: Optimize scans on current operating channel.
This should decrease un-necessary flushes, on/off channel work,
and channel changes in cases where the only scanned channel is
the current operating channel.
* Removes SCAN_OFF_CHANNEL flag, uses SDATA_STATE_OFFCHANNEL
and is-scanning flags instead.
* Add helper method to determine if we are currently configured
for the operating channel.
* Do no blindly go off/on channel in work.c Instead, only call
appropriate on/off code when we really need to change channels.
Always enable offchannel-ps mode when starting work,
and disable it when we are done.
* Consolidate ieee80211_offchannel_stop_station and
ieee80211_offchannel_stop_beaconing, call it
ieee80211_offchannel_stop_vifs instead.
* Accept non-beacon frames when scanning on operating channel.
* Scan state machine optimized to minimize on/off channel
transitions. Also, when going on-channel, go ahead and
re-enable beaconing. We're going to be there for 200ms,
so seems like some useful beaconing could happen.
Always enable offchannel-ps mode when starting software
scan, and disable it when we are done.
* Grab local->mtx earlier in __ieee80211_scan_completed_finish
so that we are protected when calling hw_config(), etc.
* Pass probe-responses up the stack if scanning on local
channel, so that mlme can take a look.
Signed-off-by: Ben Greear <greearb@candelatech.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-02-05 02:54:17 +07:00
|
|
|
if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
|
|
|
|
test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
|
2009-03-18 19:06:44 +07:00
|
|
|
!ieee80211_is_probe_req(hdr->frame_control) &&
|
2020-01-14 12:59:40 +07:00
|
|
|
!ieee80211_is_any_nullfunc(hdr->frame_control))
|
2009-03-18 19:06:44 +07:00
|
|
|
/*
|
|
|
|
* When software scanning only nullfunc frames (to notify
|
|
|
|
* the sleep state to the AP) and probe requests (for the
|
|
|
|
* active scan) are allowed, all other frames should not be
|
|
|
|
* sent and we should not get here, but if we do
|
|
|
|
* nonetheless, drop them to avoid sending them
|
|
|
|
* off-channel. See the link below and
|
|
|
|
* ieee80211_start_scan() for more.
|
|
|
|
*
|
|
|
|
* http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
|
|
|
|
*/
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_DROP;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2014-11-03 16:33:19 +07:00
|
|
|
if (tx->sdata->vif.type == NL80211_IFTYPE_OCB)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2010-10-01 22:20:41 +07:00
|
|
|
if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-02-25 22:27:43 +07:00
|
|
|
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2011-09-29 21:04:36 +07:00
|
|
|
if (tx->sta)
|
|
|
|
assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-02-25 22:27:43 +07:00
|
|
|
if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
|
2011-09-29 21:04:36 +07:00
|
|
|
if (unlikely(!assoc &&
|
2008-07-16 08:44:13 +07:00
|
|
|
ieee80211_is_data(hdr->frame_control))) {
|
2007-07-27 20:43:22 +07:00
|
|
|
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
|
2012-06-22 16:29:50 +07:00
|
|
|
sdata_info(tx->sdata,
|
|
|
|
"dropped data frame to not associated station %pM\n",
|
|
|
|
hdr->addr1);
|
|
|
|
#endif
|
2007-07-27 20:43:22 +07:00
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_DROP;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
2016-10-11 00:12:21 +07:00
|
|
|
} else if (unlikely(ieee80211_is_data(hdr->frame_control) &&
|
|
|
|
ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) {
|
2011-12-14 18:20:31 +07:00
|
|
|
/*
|
|
|
|
* No associated STAs - no need to send multicast
|
|
|
|
* frames.
|
|
|
|
*/
|
|
|
|
return TX_DROP;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is called whenever the AP is about to exceed the maximum limit
|
|
|
|
* of buffered frames for power saving STAs. This situation should not really
|
|
|
|
* happen often during normal operation, so dropping the oldest buffered packet
|
|
|
|
* from each queue should be OK to make some room for new frames. */
|
|
|
|
static void purge_old_ps_buffers(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
int total = 0, purged = 0;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
2007-09-19 04:29:21 +07:00
|
|
|
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
|
2012-10-11 02:39:50 +07:00
|
|
|
struct ps_data *ps;
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
ps = &sdata->u.ap.ps;
|
2013-01-31 00:14:08 +07:00
|
|
|
else if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
|
|
ps = &sdata->u.mesh.ps;
|
2012-10-11 02:39:50 +07:00
|
|
|
else
|
2007-07-27 20:43:22 +07:00
|
|
|
continue;
|
2012-10-11 02:39:50 +07:00
|
|
|
|
|
|
|
skb = skb_dequeue(&ps->bc_buf);
|
2007-07-27 20:43:22 +07:00
|
|
|
if (skb) {
|
|
|
|
purged++;
|
2016-08-02 16:13:41 +07:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
2012-10-11 02:39:50 +07:00
|
|
|
total += skb_queue_len(&ps->bc_buf);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2011-09-29 21:04:29 +07:00
|
|
|
/*
|
|
|
|
* Drop one frame from each station from the lowest-priority
|
|
|
|
* AC that has frames at all.
|
|
|
|
*/
|
2008-02-25 22:27:46 +07:00
|
|
|
list_for_each_entry_rcu(sta, &local->sta_list, list) {
|
2011-09-29 21:04:29 +07:00
|
|
|
int ac;
|
|
|
|
|
|
|
|
for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
|
|
|
|
skb = skb_dequeue(&sta->ps_tx_buf[ac]);
|
|
|
|
total += skb_queue_len(&sta->ps_tx_buf[ac]);
|
|
|
|
if (skb) {
|
|
|
|
purged++;
|
2012-10-08 19:39:33 +07:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2011-09-29 21:04:29 +07:00
|
|
|
break;
|
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
}
|
2008-02-25 22:27:46 +07:00
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
local->total_ps_buffered = total;
|
2012-06-22 16:29:50 +07:00
|
|
|
ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2008-02-01 01:48:20 +07:00
|
|
|
static ieee80211_tx_result
|
2008-02-25 22:27:43 +07:00
|
|
|
ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2008-07-16 08:44:13 +07:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
2012-10-11 02:39:50 +07:00
|
|
|
struct ps_data *ps;
|
2008-05-15 17:55:29 +07:00
|
|
|
|
2007-12-19 07:31:25 +07:00
|
|
|
/*
|
|
|
|
* broadcast/multicast frame
|
|
|
|
*
|
2013-01-31 00:14:08 +07:00
|
|
|
* If any of the associated/peer stations is in power save mode,
|
2007-12-19 07:31:25 +07:00
|
|
|
* the frame is buffered to be sent after DTIM beacon frame.
|
|
|
|
* This is done either by the hardware or us.
|
|
|
|
*/
|
|
|
|
|
2013-01-31 00:14:08 +07:00
|
|
|
/* powersaving STAs currently only in AP/VLAN/mesh mode */
|
2012-10-11 02:39:50 +07:00
|
|
|
if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
|
|
|
|
tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
|
|
|
|
if (!tx->sdata->bss)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
ps = &tx->sdata->bss->ps;
|
2013-01-31 00:14:08 +07:00
|
|
|
} else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
|
|
|
|
ps = &tx->sdata->u.mesh.ps;
|
2012-10-11 02:39:50 +07:00
|
|
|
} else {
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2008-07-09 19:40:34 +07:00
|
|
|
return TX_CONTINUE;
|
2012-10-11 02:39:50 +07:00
|
|
|
}
|
|
|
|
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2008-07-09 19:40:34 +07:00
|
|
|
|
|
|
|
/* no buffering for ordered frames */
|
2008-07-16 08:44:13 +07:00
|
|
|
if (ieee80211_has_order(hdr->frame_control))
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-12-19 07:31:25 +07:00
|
|
|
|
2014-07-07 17:01:11 +07:00
|
|
|
if (ieee80211_is_probe_req(hdr->frame_control))
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2015-06-03 02:39:54 +07:00
|
|
|
if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
|
2013-05-28 22:24:15 +07:00
|
|
|
info->hw_queue = tx->sdata->vif.cab_queue;
|
|
|
|
|
2018-11-29 04:39:16 +07:00
|
|
|
/* no stations in PS mode and no buffered packets */
|
|
|
|
if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-12-19 07:31:25 +07:00
|
|
|
|
2009-10-29 18:19:21 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
|
2009-08-10 21:04:15 +07:00
|
|
|
|
2009-10-29 18:19:21 +07:00
|
|
|
/* device releases frame after DTIM beacon */
|
2015-06-03 02:39:54 +07:00
|
|
|
if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING))
|
2009-08-10 21:04:15 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2007-12-19 07:31:25 +07:00
|
|
|
/* buffered in mac80211 */
|
2009-08-10 21:04:15 +07:00
|
|
|
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
|
|
|
|
purge_old_ps_buffers(tx->local);
|
|
|
|
|
2012-10-11 02:39:50 +07:00
|
|
|
if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
|
2012-06-22 16:29:50 +07:00
|
|
|
ps_dbg(tx->sdata,
|
|
|
|
"BC TX buffer full - dropping the oldest frame\n");
|
2016-08-02 16:13:41 +07:00
|
|
|
ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
|
2009-08-10 21:04:15 +07:00
|
|
|
} else
|
|
|
|
tx->local->total_ps_buffered++;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2012-10-11 02:39:50 +07:00
|
|
|
skb_queue_tail(&ps->bc_buf, tx->skb);
|
2007-12-19 07:31:25 +07:00
|
|
|
|
2009-08-10 21:04:15 +07:00
|
|
|
return TX_QUEUED;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2009-01-08 18:32:00 +07:00
|
|
|
static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (!ieee80211_is_mgmt(fc))
|
|
|
|
return 0;
|
|
|
|
|
2011-09-29 21:04:36 +07:00
|
|
|
if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
|
2009-01-08 18:32:00 +07:00
|
|
|
return 0;
|
|
|
|
|
2014-01-23 22:20:29 +07:00
|
|
|
if (!ieee80211_is_robust_mgmt_frame(skb))
|
2009-01-08 18:32:00 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-02-01 01:48:20 +07:00
|
|
|
static ieee80211_tx_result
|
2008-02-25 22:27:43 +07:00
|
|
|
ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
|
|
|
struct sta_info *sta = tx->sta;
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2014-07-07 17:01:11 +07:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
2010-04-19 14:12:52 +07:00
|
|
|
struct ieee80211_local *local = tx->local;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2012-02-27 18:18:30 +07:00
|
|
|
if (unlikely(!sta))
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2011-09-29 21:04:36 +07:00
|
|
|
if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
|
mac80211: fix station/driver powersave race
It is currently possible to have a race due to the station PS
unblock work like this:
* station goes to sleep with frames buffered in the driver
* driver blocks wakeup
* station wakes up again
* driver flushes/returns frames, and unblocks, which schedules
the unblock work
* unblock work starts to run, and checks that the station is
awake (i.e. that the WLAN_STA_PS_STA flag isn't set)
* we process a received frame with PM=1, setting the flag again
* ieee80211_sta_ps_deliver_wakeup() runs, delivering all frames
to the driver, and then clearing the WLAN_STA_PS_DRIVER and
WLAN_STA_PS_STA flags
In this scenario, mac80211 will think that the station is awake,
while it really is asleep, and any TX'ed frames should be filtered
by the device (it will know that the station is sleeping) but then
passed to mac80211 again, which will not buffer it either as it
thinks the station is awake, and eventually the packets will be
dropped.
Fix this by moving the clearing of the flags to exactly where we
learn about the situation. This creates a problem of reordering,
so introduce another flag indicating that delivery is being done,
this new flag also queues frames and is cleared only while the
spinlock is held (which the queuing code also holds) so that any
concurrent delivery/TX is handled correctly.
Reported-by: Andrei Otcheretianski <andrei.otcheretianski@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-05-27 21:32:27 +07:00
|
|
|
test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
|
|
|
|
test_sta_flag(sta, WLAN_STA_PS_DELIVER)) &&
|
2012-02-27 18:18:30 +07:00
|
|
|
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
|
2011-09-29 21:04:29 +07:00
|
|
|
int ac = skb_get_queue_mapping(tx->skb);
|
|
|
|
|
2014-07-07 17:01:11 +07:00
|
|
|
if (ieee80211_is_mgmt(hdr->frame_control) &&
|
|
|
|
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
|
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2012-06-22 16:29:50 +07:00
|
|
|
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
|
|
|
|
sta->sta.addr, sta->sta.aid, ac);
|
2007-07-27 20:43:22 +07:00
|
|
|
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
|
|
|
|
purge_old_ps_buffers(tx->local);
|
mac80211: fix AP powersave TX vs. wakeup race
There is a race between the TX path and the STA wakeup: while
a station is sleeping, mac80211 buffers frames until it wakes
up, then the frames are transmitted. However, the RX and TX
path are concurrent, so the packet indicating wakeup can be
processed while a packet is being transmitted.
This can lead to a situation where the buffered frames list
is emptied on the one side, while a frame is being added on
the other side, as the station is still seen as sleeping in
the TX path.
As a result, the newly added frame will not be send anytime
soon. It might be sent much later (and out of order) when the
station goes to sleep and wakes up the next time.
Additionally, it can lead to the crash below.
Fix all this by synchronising both paths with a new lock.
Both path are not fastpath since they handle PS situations.
In a later patch we'll remove the extra skb queue locks to
reduce locking overhead.
BUG: unable to handle kernel
NULL pointer dereference at 000000b0
IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
*pde = 00000000
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1
EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000
ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000)
iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9
Stack:
e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0
ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210
ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002
Call Trace:
[<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211]
[<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211]
[<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211]
[<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211]
[<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211]
[<c149ef70>] dev_hard_start_xmit+0x450/0x950
[<c14b9aa9>] sch_direct_xmit+0xa9/0x250
[<c14b9c9b>] __qdisc_run+0x4b/0x150
[<c149f732>] dev_queue_xmit+0x2c2/0xca0
Cc: stable@vger.kernel.org
Reported-by: Yaara Rozenblum <yaara.rozenblum@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
[reword commit log, use a separate lock]
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-02-20 14:22:11 +07:00
|
|
|
|
|
|
|
/* sync with ieee80211_sta_ps_deliver_wakeup */
|
|
|
|
spin_lock(&sta->ps_lock);
|
|
|
|
/*
|
|
|
|
* STA woke up the meantime and all the frames on ps_tx_buf have
|
|
|
|
* been queued to pending queue. No reordering can happen, go
|
|
|
|
* ahead and Tx the packet.
|
|
|
|
*/
|
|
|
|
if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
|
mac80211: fix station/driver powersave race
It is currently possible to have a race due to the station PS
unblock work like this:
* station goes to sleep with frames buffered in the driver
* driver blocks wakeup
* station wakes up again
* driver flushes/returns frames, and unblocks, which schedules
the unblock work
* unblock work starts to run, and checks that the station is
awake (i.e. that the WLAN_STA_PS_STA flag isn't set)
* we process a received frame with PM=1, setting the flag again
* ieee80211_sta_ps_deliver_wakeup() runs, delivering all frames
to the driver, and then clearing the WLAN_STA_PS_DRIVER and
WLAN_STA_PS_STA flags
In this scenario, mac80211 will think that the station is awake,
while it really is asleep, and any TX'ed frames should be filtered
by the device (it will know that the station is sleeping) but then
passed to mac80211 again, which will not buffer it either as it
thinks the station is awake, and eventually the packets will be
dropped.
Fix this by moving the clearing of the flags to exactly where we
learn about the situation. This creates a problem of reordering,
so introduce another flag indicating that delivery is being done,
this new flag also queues frames and is cleared only while the
spinlock is held (which the queuing code also holds) so that any
concurrent delivery/TX is handled correctly.
Reported-by: Andrei Otcheretianski <andrei.otcheretianski@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-05-27 21:32:27 +07:00
|
|
|
!test_sta_flag(sta, WLAN_STA_PS_DRIVER) &&
|
|
|
|
!test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
|
mac80211: fix AP powersave TX vs. wakeup race
There is a race between the TX path and the STA wakeup: while
a station is sleeping, mac80211 buffers frames until it wakes
up, then the frames are transmitted. However, the RX and TX
path are concurrent, so the packet indicating wakeup can be
processed while a packet is being transmitted.
This can lead to a situation where the buffered frames list
is emptied on the one side, while a frame is being added on
the other side, as the station is still seen as sleeping in
the TX path.
As a result, the newly added frame will not be send anytime
soon. It might be sent much later (and out of order) when the
station goes to sleep and wakes up the next time.
Additionally, it can lead to the crash below.
Fix all this by synchronising both paths with a new lock.
Both path are not fastpath since they handle PS situations.
In a later patch we'll remove the extra skb queue locks to
reduce locking overhead.
BUG: unable to handle kernel
NULL pointer dereference at 000000b0
IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
*pde = 00000000
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1
EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000
ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000)
iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9
Stack:
e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0
ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210
ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002
Call Trace:
[<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211]
[<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211]
[<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211]
[<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211]
[<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211]
[<c149ef70>] dev_hard_start_xmit+0x450/0x950
[<c14b9aa9>] sch_direct_xmit+0xa9/0x250
[<c14b9c9b>] __qdisc_run+0x4b/0x150
[<c149f732>] dev_queue_xmit+0x2c2/0xca0
Cc: stable@vger.kernel.org
Reported-by: Yaara Rozenblum <yaara.rozenblum@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
[reword commit log, use a separate lock]
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-02-20 14:22:11 +07:00
|
|
|
spin_unlock(&sta->ps_lock);
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2011-09-29 21:04:29 +07:00
|
|
|
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
|
|
|
|
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
|
2012-06-22 16:29:50 +07:00
|
|
|
ps_dbg(tx->sdata,
|
|
|
|
"STA %pM TX buffer for AC %d full - dropping oldest frame\n",
|
|
|
|
sta->sta.addr, ac);
|
2012-10-08 19:39:33 +07:00
|
|
|
ieee80211_free_txskb(&local->hw, old);
|
2007-07-27 20:43:22 +07:00
|
|
|
} else
|
|
|
|
tx->local->total_ps_buffered++;
|
2008-02-20 17:21:35 +07:00
|
|
|
|
2008-05-15 17:55:29 +07:00
|
|
|
info->control.jiffies = jiffies;
|
2009-07-14 05:33:34 +07:00
|
|
|
info->control.vif = &tx->sdata->vif;
|
2009-06-08 02:58:37 +07:00
|
|
|
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
|
2014-01-09 07:45:28 +07:00
|
|
|
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
|
2011-09-29 21:04:29 +07:00
|
|
|
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
|
mac80211: fix AP powersave TX vs. wakeup race
There is a race between the TX path and the STA wakeup: while
a station is sleeping, mac80211 buffers frames until it wakes
up, then the frames are transmitted. However, the RX and TX
path are concurrent, so the packet indicating wakeup can be
processed while a packet is being transmitted.
This can lead to a situation where the buffered frames list
is emptied on the one side, while a frame is being added on
the other side, as the station is still seen as sleeping in
the TX path.
As a result, the newly added frame will not be send anytime
soon. It might be sent much later (and out of order) when the
station goes to sleep and wakes up the next time.
Additionally, it can lead to the crash below.
Fix all this by synchronising both paths with a new lock.
Both path are not fastpath since they handle PS situations.
In a later patch we'll remove the extra skb queue locks to
reduce locking overhead.
BUG: unable to handle kernel
NULL pointer dereference at 000000b0
IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
*pde = 00000000
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1
EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000
ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000)
iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9
Stack:
e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0
ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210
ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002
Call Trace:
[<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211]
[<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211]
[<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211]
[<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211]
[<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211]
[<c149ef70>] dev_hard_start_xmit+0x450/0x950
[<c14b9aa9>] sch_direct_xmit+0xa9/0x250
[<c14b9c9b>] __qdisc_run+0x4b/0x150
[<c149f732>] dev_queue_xmit+0x2c2/0xca0
Cc: stable@vger.kernel.org
Reported-by: Yaara Rozenblum <yaara.rozenblum@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
[reword commit log, use a separate lock]
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-02-20 14:22:11 +07:00
|
|
|
spin_unlock(&sta->ps_lock);
|
2010-04-19 14:12:52 +07:00
|
|
|
|
|
|
|
if (!timer_pending(&local->sta_cleanup))
|
|
|
|
mod_timer(&local->sta_cleanup,
|
|
|
|
round_jiffies(jiffies +
|
|
|
|
STA_INFO_CLEANUP_INTERVAL));
|
|
|
|
|
2011-09-29 21:04:27 +07:00
|
|
|
/*
|
|
|
|
* We queued up some frames, so the TIM bit might
|
|
|
|
* need to be set, recalculate it.
|
|
|
|
*/
|
|
|
|
sta_info_recalc_tim(sta);
|
|
|
|
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_QUEUED;
|
2012-06-22 16:29:50 +07:00
|
|
|
} else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
|
|
|
|
ps_dbg(tx->sdata,
|
|
|
|
"STA %pM in PS mode, but polling/in SP -> send frame\n",
|
|
|
|
sta->sta.addr);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2008-06-30 20:10:44 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-02-25 22:27:43 +07:00
|
|
|
ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2008-02-25 22:27:43 +07:00
|
|
|
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-02-25 22:27:43 +07:00
|
|
|
if (tx->flags & IEEE80211_TX_UNICAST)
|
2007-07-27 20:43:22 +07:00
|
|
|
return ieee80211_tx_h_unicast_ps_buf(tx);
|
|
|
|
else
|
|
|
|
return ieee80211_tx_h_multicast_ps_buf(tx);
|
|
|
|
}
|
|
|
|
|
2010-08-27 18:26:54 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
|
|
|
ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
|
|
|
2013-07-02 23:09:12 +07:00
|
|
|
if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
|
|
|
|
if (tx->sdata->control_port_no_encrypt)
|
|
|
|
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
|
mac80211: Send EAPOL frames at lowest rate
The current minstrel_ht rate control behavior is somewhat optimistic in
trying to find optimum TX rate. While this is usually fine for normal
Data frames, there are cases where a more conservative set of retry
parameters would be beneficial to make the connection more robust.
EAPOL frames are critical to the authentication and especially the
EAPOL-Key message 4/4 (the last message in the 4-way handshake) is
important to get through to the AP. If that message is lost, the only
recovery mechanism in many cases is to reassociate with the AP and start
from scratch. This can often be avoided by trying to send the frame with
more conservative rate and/or with more link layer retries.
In most cases, minstrel_ht is currently using the initial EAPOL-Key
frames for probing higher rates and this results in only five link layer
transmission attempts (one at high(ish) MCS and four at MCS0). While
this works with most APs, it looks like there are some deployed APs that
may have issues with the EAPOL frames using HT MCS immediately after
association. Similarly, there may be issues in cases where the signal
strength or radio environment is not good enough to be able to get
frames through even at couple of MCS 0 tries.
The best approach for this would likely to be to reduce the TX rate for
the last rate (3rd rate parameter in the set) to a low basic rate (say,
6 Mbps on 5 GHz and 2 or 5.5 Mbps on 2.4 GHz), but doing that cleanly
requires some more effort. For now, we can start with a simple one-liner
that forces the minimum rate to be used for EAPOL frames similarly how
the TX rate is selected for the IEEE 802.11 Management frames. This does
result in a small extra latency added to the cases where the AP would be
able to receive the higher rate, but taken into account how small number
of EAPOL frames are used, this is likely to be insignificant. A future
optimization in the minstrel_ht design can also allow this patch to be
reverted to get back to the more optimized initial TX rate.
It should also be noted that many drivers that do not use minstrel as
the rate control algorithm are already doing similar workarounds by
forcing the lowest TX rate to be used for EAPOL frames.
Cc: stable@vger.kernel.org
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Tested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Jouni Malinen <jouni@qca.qualcomm.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2015-02-26 20:50:50 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
|
2013-07-02 23:09:12 +07:00
|
|
|
}
|
2010-08-27 18:26:54 +07:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2008-06-30 20:10:44 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-02-25 22:27:43 +07:00
|
|
|
ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2012-07-04 23:10:07 +07:00
|
|
|
struct ieee80211_key *key;
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2008-07-16 08:44:13 +07:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
[MAC80211]: fix race conditions with keys
During receive processing, we select the key long before using it and
because there's no locking it is possible that we kfree() the key
after having selected it but before using it for crypto operations.
Obviously, this is bad.
Secondly, during transmit processing, there are two possible races: We
have a similar race between select_key() and using it for encryption,
but we also have a race here between select_key() and hardware
encryption (both when a key is removed.)
This patch solves these issues by using RCU: when a key is to be freed,
we first remove the pointer from the appropriate places (sdata->keys,
sdata->default_key, sta->key) using rcu_assign_pointer() and then
synchronize_rcu(). Then, we can safely kfree() the key and remove it
from the hardware. There's a window here where the hardware may still
be using it for decryption, but we can't work around that without having
two hardware callbacks, one to disable the key for RX and one to disable
it for TX; but the worst thing that will happen is that we receive a
packet decrypted that we don't find a key for any more and then drop it.
When we add a key, we first need to upload it to the hardware and then,
using rcu_assign_pointer() again, link it into our structures.
In the code using keys (TX/RX paths) we use rcu_dereference() to get the
key and enclose the whole tx/rx section in a rcu_read_lock() ...
rcu_read_unlock() block. Because we've uploaded the key to hardware
before linking it into internal structures, we can guarantee that it is
valid once get to into tx().
One possible race condition remains, however: when we have hardware
acceleration enabled and the driver shuts down the queues, we end up
queueing the frame. If now somebody removes the key, the key will be
removed from hwaccel and then then driver will be asked to encrypt the
frame with a key index that has been removed. Hence, drivers will need
to be aware that the hw_key_index they are passed might not be under
all circumstances. Most drivers will, however, simply ignore that
condition and encrypt the frame with the selected key anyway, this
only results in a frame being encrypted with a wrong key or dropped
(rightfully) because the key was not valid. There isn't much we can
do about it unless we want to walk the pending frame queue every time
a key is removed and remove all frames that used it.
This race condition, however, will most likely be solved once we add
multiqueue support to mac80211 because then frames will be queued
further up the stack instead of after being processed.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-14 22:10:24 +07:00
|
|
|
|
2020-03-26 20:09:42 +07:00
|
|
|
if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
|
2007-07-27 20:43:22 +07:00
|
|
|
tx->key = NULL;
|
2020-03-26 20:09:42 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx->sta &&
|
|
|
|
(key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
|
[MAC80211]: fix race conditions with keys
During receive processing, we select the key long before using it and
because there's no locking it is possible that we kfree() the key
after having selected it but before using it for crypto operations.
Obviously, this is bad.
Secondly, during transmit processing, there are two possible races: We
have a similar race between select_key() and using it for encryption,
but we also have a race here between select_key() and hardware
encryption (both when a key is removed.)
This patch solves these issues by using RCU: when a key is to be freed,
we first remove the pointer from the appropriate places (sdata->keys,
sdata->default_key, sta->key) using rcu_assign_pointer() and then
synchronize_rcu(). Then, we can safely kfree() the key and remove it
from the hardware. There's a window here where the hardware may still
be using it for decryption, but we can't work around that without having
two hardware callbacks, one to disable the key for RX and one to disable
it for TX; but the worst thing that will happen is that we receive a
packet decrypted that we don't find a key for any more and then drop it.
When we add a key, we first need to upload it to the hardware and then,
using rcu_assign_pointer() again, link it into our structures.
In the code using keys (TX/RX paths) we use rcu_dereference() to get the
key and enclose the whole tx/rx section in a rcu_read_lock() ...
rcu_read_unlock() block. Because we've uploaded the key to hardware
before linking it into internal structures, we can guarantee that it is
valid once get to into tx().
One possible race condition remains, however: when we have hardware
acceleration enabled and the driver shuts down the queues, we end up
queueing the frame. If now somebody removes the key, the key will be
removed from hwaccel and then then driver will be asked to encrypt the
frame with a key index that has been removed. Hence, drivers will need
to be aware that the hw_key_index they are passed might not be under
all circumstances. Most drivers will, however, simply ignore that
condition and encrypt the frame with the selected key anyway, this
only results in a frame being encrypted with a wrong key or dropped
(rightfully) because the key was not valid. There isn't much we can
do about it unless we want to walk the pending frame queue every time
a key is removed and remove all frames that used it.
This race condition, however, will most likely be solved once we add
multiqueue support to mac80211 because then frames will be queued
further up the stack instead of after being processed.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-14 22:10:24 +07:00
|
|
|
tx->key = key;
|
2016-06-22 17:55:20 +07:00
|
|
|
else if (ieee80211_is_group_privacy_action(tx->skb) &&
|
|
|
|
(key = rcu_dereference(tx->sdata->default_multicast_key)))
|
|
|
|
tx->key = key;
|
2009-01-08 18:32:02 +07:00
|
|
|
else if (ieee80211_is_mgmt(hdr->frame_control) &&
|
2010-03-30 13:35:23 +07:00
|
|
|
is_multicast_ether_addr(hdr->addr1) &&
|
2014-01-23 22:20:29 +07:00
|
|
|
ieee80211_is_robust_mgmt_frame(tx->skb) &&
|
2009-01-08 18:32:02 +07:00
|
|
|
(key = rcu_dereference(tx->sdata->default_mgmt_key)))
|
|
|
|
tx->key = key;
|
2010-12-10 01:49:02 +07:00
|
|
|
else if (is_multicast_ether_addr(hdr->addr1) &&
|
|
|
|
(key = rcu_dereference(tx->sdata->default_multicast_key)))
|
|
|
|
tx->key = key;
|
|
|
|
else if (!is_multicast_ether_addr(hdr->addr1) &&
|
|
|
|
(key = rcu_dereference(tx->sdata->default_unicast_key)))
|
[MAC80211]: fix race conditions with keys
During receive processing, we select the key long before using it and
because there's no locking it is possible that we kfree() the key
after having selected it but before using it for crypto operations.
Obviously, this is bad.
Secondly, during transmit processing, there are two possible races: We
have a similar race between select_key() and using it for encryption,
but we also have a race here between select_key() and hardware
encryption (both when a key is removed.)
This patch solves these issues by using RCU: when a key is to be freed,
we first remove the pointer from the appropriate places (sdata->keys,
sdata->default_key, sta->key) using rcu_assign_pointer() and then
synchronize_rcu(). Then, we can safely kfree() the key and remove it
from the hardware. There's a window here where the hardware may still
be using it for decryption, but we can't work around that without having
two hardware callbacks, one to disable the key for RX and one to disable
it for TX; but the worst thing that will happen is that we receive a
packet decrypted that we don't find a key for any more and then drop it.
When we add a key, we first need to upload it to the hardware and then,
using rcu_assign_pointer() again, link it into our structures.
In the code using keys (TX/RX paths) we use rcu_dereference() to get the
key and enclose the whole tx/rx section in a rcu_read_lock() ...
rcu_read_unlock() block. Because we've uploaded the key to hardware
before linking it into internal structures, we can guarantee that it is
valid once get to into tx().
One possible race condition remains, however: when we have hardware
acceleration enabled and the driver shuts down the queues, we end up
queueing the frame. If now somebody removes the key, the key will be
removed from hwaccel and then then driver will be asked to encrypt the
frame with a key index that has been removed. Hence, drivers will need
to be aware that the hw_key_index they are passed might not be under
all circumstances. Most drivers will, however, simply ignore that
condition and encrypt the frame with the selected key anyway, this
only results in a frame being encrypted with a wrong key or dropped
(rightfully) because the key was not valid. There isn't much we can
do about it unless we want to walk the pending frame queue every time
a key is removed and remove all frames that used it.
This race condition, however, will most likely be solved once we add
multiqueue support to mac80211 because then frames will be queued
further up the stack instead of after being processed.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-14 22:10:24 +07:00
|
|
|
tx->key = key;
|
2015-03-20 17:37:36 +07:00
|
|
|
else
|
2012-07-04 23:10:08 +07:00
|
|
|
tx->key = NULL;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
if (tx->key) {
|
2010-01-17 07:47:58 +07:00
|
|
|
bool skip_hw = false;
|
|
|
|
|
2007-09-17 12:29:25 +07:00
|
|
|
/* TODO: add threshold stuff again */
|
2007-12-18 21:27:47 +07:00
|
|
|
|
2010-08-10 14:46:38 +07:00
|
|
|
switch (tx->key->conf.cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
2008-07-16 08:44:13 +07:00
|
|
|
if (!ieee80211_is_data_present(hdr->frame_control))
|
2007-12-18 21:27:47 +07:00
|
|
|
tx->key = NULL;
|
|
|
|
break;
|
2010-08-10 14:46:38 +07:00
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
2015-01-25 00:52:07 +07:00
|
|
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
2015-01-25 00:52:06 +07:00
|
|
|
case WLAN_CIPHER_SUITE_GCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
2009-01-08 18:32:00 +07:00
|
|
|
if (!ieee80211_is_data_present(hdr->frame_control) &&
|
|
|
|
!ieee80211_use_mfp(hdr->frame_control, tx->sta,
|
2016-06-22 17:55:20 +07:00
|
|
|
tx->skb) &&
|
|
|
|
!ieee80211_is_group_privacy_action(tx->skb))
|
2009-01-08 18:32:00 +07:00
|
|
|
tx->key = NULL;
|
2010-01-24 01:27:14 +07:00
|
|
|
else
|
|
|
|
skip_hw = (tx->key->conf.flags &
|
2012-09-04 22:08:23 +07:00
|
|
|
IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
|
2010-01-24 01:27:14 +07:00
|
|
|
ieee80211_is_mgmt(hdr->frame_control);
|
2009-01-08 18:32:00 +07:00
|
|
|
break;
|
2010-08-10 14:46:38 +07:00
|
|
|
case WLAN_CIPHER_SUITE_AES_CMAC:
|
2015-01-25 00:52:08 +07:00
|
|
|
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
|
2015-01-25 00:52:09 +07:00
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
2009-01-08 18:32:02 +07:00
|
|
|
if (!ieee80211_is_mgmt(hdr->frame_control))
|
|
|
|
tx->key = NULL;
|
|
|
|
break;
|
2007-12-18 21:27:47 +07:00
|
|
|
}
|
2010-01-17 07:47:58 +07:00
|
|
|
|
2013-01-29 17:41:38 +07:00
|
|
|
if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
|
|
|
|
!ieee80211_is_deauth(hdr->frame_control)))
|
2011-07-12 17:30:59 +07:00
|
|
|
return TX_DROP;
|
|
|
|
|
2010-01-23 04:07:59 +07:00
|
|
|
if (!skip_hw && tx->key &&
|
2010-01-25 17:36:16 +07:00
|
|
|
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
|
2010-01-17 07:47:58 +07:00
|
|
|
info->control.hw_key = &tx->key->conf;
|
2020-03-26 20:09:42 +07:00
|
|
|
} else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
|
|
|
|
test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
|
|
|
|
return TX_DROP;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2008-06-30 20:10:44 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-02-25 22:27:43 +07:00
|
|
|
ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2008-10-21 17:40:02 +07:00
|
|
|
struct ieee80211_hdr *hdr = (void *)tx->skb->data;
|
|
|
|
struct ieee80211_supported_band *sband;
|
2010-04-28 01:15:12 +07:00
|
|
|
u32 len;
|
2008-10-21 17:40:02 +07:00
|
|
|
struct ieee80211_tx_rate_control txrc;
|
2013-04-22 21:14:41 +07:00
|
|
|
struct ieee80211_sta_rates *ratetbl = NULL;
|
2011-09-29 21:04:36 +07:00
|
|
|
bool assoc = false;
|
2008-01-25 01:38:38 +07:00
|
|
|
|
2008-10-21 17:40:02 +07:00
|
|
|
memset(&txrc, 0, sizeof(txrc));
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2012-07-23 20:12:51 +07:00
|
|
|
sband = tx->local->hw.wiphy->bands[info->band];
|
2007-09-26 22:53:18 +07:00
|
|
|
|
2010-04-28 01:15:12 +07:00
|
|
|
len = min_t(u32, tx->skb->len + FCS_LEN,
|
2009-04-20 23:39:05 +07:00
|
|
|
tx->local->hw.wiphy->frag_threshold);
|
2008-10-21 17:40:02 +07:00
|
|
|
|
|
|
|
/* set up the tx rate control struct we give the RC algo */
|
2012-02-26 17:24:35 +07:00
|
|
|
txrc.hw = &tx->local->hw;
|
2008-10-21 17:40:02 +07:00
|
|
|
txrc.sband = sband;
|
|
|
|
txrc.bss_conf = &tx->sdata->vif.bss_conf;
|
|
|
|
txrc.skb = tx->skb;
|
|
|
|
txrc.reported_rate.idx = -1;
|
2012-07-23 20:12:51 +07:00
|
|
|
txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
|
2013-04-16 18:38:42 +07:00
|
|
|
|
|
|
|
if (tx->sdata->rc_has_mcs_mask[info->band])
|
|
|
|
txrc.rate_idx_mcs_mask =
|
|
|
|
tx->sdata->rc_rateidx_mcs_mask[info->band];
|
|
|
|
|
2010-11-11 21:07:23 +07:00
|
|
|
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
|
2011-11-25 08:15:20 +07:00
|
|
|
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
|
2015-08-05 21:02:28 +07:00
|
|
|
tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
|
|
|
|
tx->sdata->vif.type == NL80211_IFTYPE_OCB);
|
2008-10-21 17:40:02 +07:00
|
|
|
|
|
|
|
/* set up RTS protection if desired */
|
2009-04-20 23:39:05 +07:00
|
|
|
if (len > tx->local->hw.wiphy->rts_threshold) {
|
2013-04-22 21:14:41 +07:00
|
|
|
txrc.rts = true;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2013-04-22 21:14:41 +07:00
|
|
|
info->control.use_rts = txrc.rts;
|
2013-04-16 18:38:43 +07:00
|
|
|
info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot;
|
|
|
|
|
2008-10-21 17:40:02 +07:00
|
|
|
/*
|
|
|
|
* Use short preamble if the BSS can handle it, but not for
|
|
|
|
* management frames unless we know the receiver can handle
|
|
|
|
* that -- the management frame might be to a station that
|
|
|
|
* just wants a probe response.
|
|
|
|
*/
|
|
|
|
if (tx->sdata->vif.bss_conf.use_short_preamble &&
|
|
|
|
(ieee80211_is_data(hdr->frame_control) ||
|
2011-09-29 21:04:36 +07:00
|
|
|
(tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
|
2013-04-22 21:14:41 +07:00
|
|
|
txrc.short_preamble = true;
|
|
|
|
|
|
|
|
info->control.short_preamble = txrc.short_preamble;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2016-01-26 23:11:13 +07:00
|
|
|
/* don't ask rate control when rate already injected via radiotap */
|
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2011-09-29 21:04:36 +07:00
|
|
|
if (tx->sta)
|
|
|
|
assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
|
2009-07-17 00:15:09 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lets not bother rate control if we're associated and cannot
|
|
|
|
* talk to the sta. This should not happen.
|
|
|
|
*/
|
2011-09-29 21:04:36 +07:00
|
|
|
if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
|
2009-07-17 00:15:09 +07:00
|
|
|
!rate_usable_index_exists(sband, &tx->sta->sta),
|
|
|
|
"%s: Dropped data frame as no usable bitrate found while "
|
|
|
|
"scanning and associated. Target station: "
|
|
|
|
"%pM on %d GHz band\n",
|
2009-11-25 23:46:19 +07:00
|
|
|
tx->sdata->name, hdr->addr1,
|
2012-07-23 20:12:51 +07:00
|
|
|
info->band ? 5 : 2))
|
2009-07-17 00:15:09 +07:00
|
|
|
return TX_DROP;
|
2008-05-15 17:55:27 +07:00
|
|
|
|
2009-07-17 00:15:09 +07:00
|
|
|
/*
|
|
|
|
* If we're associated with the sta at this point we know we can at
|
|
|
|
* least send the frame at the lowest bit rate.
|
|
|
|
*/
|
2008-10-21 17:40:02 +07:00
|
|
|
rate_control_get_rate(tx->sdata, tx->sta, &txrc);
|
|
|
|
|
2013-04-22 21:14:41 +07:00
|
|
|
if (tx->sta && !info->control.skip_table)
|
|
|
|
ratetbl = rcu_dereference(tx->sta->sta.rates);
|
|
|
|
|
|
|
|
if (unlikely(info->control.rates[0].idx < 0)) {
|
|
|
|
if (ratetbl) {
|
|
|
|
struct ieee80211_tx_rate rate = {
|
|
|
|
.idx = ratetbl->rate[0].idx,
|
|
|
|
.flags = ratetbl->rate[0].flags,
|
|
|
|
.count = ratetbl->rate[0].count
|
|
|
|
};
|
|
|
|
|
|
|
|
if (ratetbl->rate[0].idx < 0)
|
|
|
|
return TX_DROP;
|
|
|
|
|
|
|
|
tx->rate = rate;
|
|
|
|
} else {
|
|
|
|
return TX_DROP;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tx->rate = info->control.rates[0];
|
|
|
|
}
|
2008-10-21 17:40:02 +07:00
|
|
|
|
2010-12-01 22:34:45 +07:00
|
|
|
if (txrc.reported_rate.idx < 0) {
|
2013-04-22 21:14:41 +07:00
|
|
|
txrc.reported_rate = tx->rate;
|
2010-12-01 22:34:45 +07:00
|
|
|
if (tx->sta && ieee80211_is_data(hdr->frame_control))
|
2015-10-16 22:54:47 +07:00
|
|
|
tx->sta->tx_stats.last_rate = txrc.reported_rate;
|
2010-12-01 22:34:45 +07:00
|
|
|
} else if (tx->sta)
|
2015-10-16 22:54:47 +07:00
|
|
|
tx->sta->tx_stats.last_rate = txrc.reported_rate;
|
2008-05-15 17:55:29 +07:00
|
|
|
|
2013-04-22 21:14:41 +07:00
|
|
|
if (ratetbl)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-10-21 17:40:02 +07:00
|
|
|
if (unlikely(!info->control.rates[0].count))
|
|
|
|
info->control.rates[0].count = 1;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2009-04-24 00:36:14 +07:00
|
|
|
if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
|
|
|
|
(info->flags & IEEE80211_TX_CTL_NO_ACK)))
|
|
|
|
info->control.rates[0].count = 1;
|
|
|
|
|
2008-10-21 17:40:02 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2015-03-28 03:30:37 +07:00
|
|
|
static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
|
|
|
|
{
|
|
|
|
u16 *seq = &sta->tid_seq[tid];
|
|
|
|
__le16 ret = cpu_to_le16(*seq);
|
|
|
|
|
|
|
|
/* Increase the sequence number. */
|
|
|
|
*seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-10 16:21:26 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
|
|
|
ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
|
|
|
int tid;
|
|
|
|
|
2008-09-13 03:52:47 +07:00
|
|
|
/*
|
|
|
|
* Packet injection may want to control the sequence
|
|
|
|
* number, if we have no matching interface then we
|
|
|
|
* neither assign one ourselves nor ask the driver to.
|
|
|
|
*/
|
2009-07-14 05:33:34 +07:00
|
|
|
if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
|
2008-09-13 03:52:47 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-07-10 16:21:26 +07:00
|
|
|
if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
if (ieee80211_hdrlen(hdr->frame_control) < 24)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2011-09-29 21:04:41 +07:00
|
|
|
if (ieee80211_is_qos_nullfunc(hdr->frame_control))
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2020-07-23 17:01:50 +07:00
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_NO_SEQNO)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-10-10 18:21:59 +07:00
|
|
|
/*
|
|
|
|
* Anything but QoS data that has a sequence number field
|
|
|
|
* (is long enough) gets a sequence number from the global
|
2013-08-23 20:35:38 +07:00
|
|
|
* counter. QoS data frames with a multicast destination
|
|
|
|
* also use the global counter (802.11-2012 9.3.2.10).
|
2008-10-10 18:21:59 +07:00
|
|
|
*/
|
2013-08-23 20:35:38 +07:00
|
|
|
if (!ieee80211_is_data_qos(hdr->frame_control) ||
|
|
|
|
is_multicast_ether_addr(hdr->addr1)) {
|
2008-10-10 18:21:59 +07:00
|
|
|
/* driver should assign sequence number */
|
2008-07-10 16:21:26 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
2008-10-10 18:21:59 +07:00
|
|
|
/* for pure STA mode without beacons, we can do it */
|
|
|
|
hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
|
|
|
|
tx->sdata->sequence_number += 0x10;
|
2014-11-21 20:26:31 +07:00
|
|
|
if (tx->sta)
|
2015-10-16 22:54:47 +07:00
|
|
|
tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++;
|
2008-07-10 16:21:26 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This should be true for injected/management frames only, for
|
|
|
|
* management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
|
|
|
|
* above since they are not QoS-data frames.
|
|
|
|
*/
|
|
|
|
if (!tx->sta)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* include per-STA, per-TID sequence counter */
|
2018-02-19 19:48:40 +07:00
|
|
|
tid = ieee80211_get_tid(hdr);
|
2015-10-16 22:54:47 +07:00
|
|
|
tx->sta->tx_stats.msdu[tid]++;
|
2008-07-10 16:21:26 +07:00
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
|
2008-07-10 16:21:26 +07:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
static int ieee80211_fragment(struct ieee80211_tx_data *tx,
|
2009-03-23 23:28:35 +07:00
|
|
|
struct sk_buff *skb, int hdrlen,
|
|
|
|
int frag_threshold)
|
|
|
|
{
|
2011-11-16 21:28:55 +07:00
|
|
|
struct ieee80211_local *local = tx->local;
|
2011-11-16 21:28:56 +07:00
|
|
|
struct ieee80211_tx_info *info;
|
2011-11-16 21:28:55 +07:00
|
|
|
struct sk_buff *tmp;
|
2009-03-23 23:28:35 +07:00
|
|
|
int per_fragm = frag_threshold - hdrlen - FCS_LEN;
|
|
|
|
int pos = hdrlen + per_fragm;
|
|
|
|
int rem = skb->len - hdrlen - per_fragm;
|
|
|
|
|
|
|
|
if (WARN_ON(rem < 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
/* first fragment was already added to queue by caller */
|
|
|
|
|
2009-03-23 23:28:35 +07:00
|
|
|
while (rem) {
|
|
|
|
int fraglen = per_fragm;
|
|
|
|
|
|
|
|
if (fraglen > rem)
|
|
|
|
fraglen = rem;
|
|
|
|
rem -= fraglen;
|
|
|
|
tmp = dev_alloc_skb(local->tx_headroom +
|
|
|
|
frag_threshold +
|
2013-03-24 19:23:27 +07:00
|
|
|
tx->sdata->encrypt_headroom +
|
2009-03-23 23:28:35 +07:00
|
|
|
IEEE80211_ENCRYPT_TAILROOM);
|
|
|
|
if (!tmp)
|
|
|
|
return -ENOMEM;
|
2011-11-16 21:28:55 +07:00
|
|
|
|
|
|
|
__skb_queue_tail(&tx->skbs, tmp);
|
|
|
|
|
2013-03-24 19:23:27 +07:00
|
|
|
skb_reserve(tmp,
|
|
|
|
local->tx_headroom + tx->sdata->encrypt_headroom);
|
|
|
|
|
2009-03-23 23:28:35 +07:00
|
|
|
/* copy control information */
|
|
|
|
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
|
2011-11-16 21:28:56 +07:00
|
|
|
|
|
|
|
info = IEEE80211_SKB_CB(tmp);
|
|
|
|
info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
|
|
|
|
IEEE80211_TX_CTL_FIRST_FRAGMENT);
|
|
|
|
|
|
|
|
if (rem)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
|
|
|
|
|
2009-03-23 23:28:35 +07:00
|
|
|
skb_copy_queue_mapping(tmp, skb);
|
|
|
|
tmp->priority = skb->priority;
|
|
|
|
tmp->dev = skb->dev;
|
|
|
|
|
|
|
|
/* copy header and data */
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:20 +07:00
|
|
|
skb_put_data(tmp, skb->data, hdrlen);
|
|
|
|
skb_put_data(tmp, skb->data + pos, fraglen);
|
2009-03-23 23:28:35 +07:00
|
|
|
|
|
|
|
pos += fraglen;
|
|
|
|
}
|
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
/* adjust first fragment's length */
|
2014-02-01 06:16:23 +07:00
|
|
|
skb_trim(skb, hdrlen + per_fragm);
|
2009-03-23 23:28:35 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-30 20:10:44 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-05-15 17:55:28 +07:00
|
|
|
ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
2009-03-23 23:28:35 +07:00
|
|
|
struct sk_buff *skb = tx->skb;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_hdr *hdr = (void *)skb->data;
|
2009-04-20 23:39:05 +07:00
|
|
|
int frag_threshold = tx->local->hw.wiphy->frag_threshold;
|
2009-03-23 23:28:35 +07:00
|
|
|
int hdrlen;
|
|
|
|
int fragnum;
|
2008-05-15 17:55:28 +07:00
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
/* no matter what happens, tx->skb moves to tx->skbs */
|
|
|
|
__skb_queue_tail(&tx->skbs, skb);
|
|
|
|
tx->skb = NULL;
|
|
|
|
|
2011-10-07 19:01:25 +07:00
|
|
|
if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2016-10-19 03:12:11 +07:00
|
|
|
if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG))
|
2008-05-15 17:55:28 +07:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-05-17 05:57:13 +07:00
|
|
|
/*
|
|
|
|
* Warn when submitting a fragmented A-MPDU frame and drop it.
|
2009-06-17 22:43:56 +07:00
|
|
|
* This scenario is handled in ieee80211_tx_prepare but extra
|
2008-06-12 19:42:29 +07:00
|
|
|
* caution taken here as fragmented ampdu may cause Tx stop.
|
2008-05-17 05:57:13 +07:00
|
|
|
*/
|
2008-10-24 11:25:27 +07:00
|
|
|
if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
|
2008-05-17 05:57:13 +07:00
|
|
|
return TX_DROP;
|
|
|
|
|
2008-06-23 06:45:27 +07:00
|
|
|
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
2008-05-15 17:55:28 +07:00
|
|
|
|
2011-10-07 19:01:25 +07:00
|
|
|
/* internal error, why isn't DONTFRAG set? */
|
2009-04-30 04:35:56 +07:00
|
|
|
if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
|
2009-03-23 23:28:35 +07:00
|
|
|
return TX_DROP;
|
2008-10-21 17:40:02 +07:00
|
|
|
|
2009-03-23 23:28:35 +07:00
|
|
|
/*
|
|
|
|
* Now fragment the frame. This will allocate all the fragments and
|
|
|
|
* chain them (using skb as the first fragment) to skb->next.
|
|
|
|
* During transmission, we will remove the successfully transmitted
|
|
|
|
* fragments from this list. When the low-level driver rejects one
|
|
|
|
* of the fragments then we will simply pretend to accept the skb
|
|
|
|
* but store it away as pending.
|
|
|
|
*/
|
2011-11-16 21:28:55 +07:00
|
|
|
if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
|
2009-03-23 23:28:35 +07:00
|
|
|
return TX_DROP;
|
2008-10-21 17:40:02 +07:00
|
|
|
|
2009-03-23 23:28:35 +07:00
|
|
|
/* update duration/seq/flags of fragments */
|
|
|
|
fragnum = 0;
|
2011-11-16 21:28:55 +07:00
|
|
|
|
|
|
|
skb_queue_walk(&tx->skbs, skb) {
|
2009-03-23 23:28:35 +07:00
|
|
|
const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
|
2008-10-21 17:40:02 +07:00
|
|
|
|
2009-03-23 23:28:35 +07:00
|
|
|
hdr = (void *)skb->data;
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
2008-10-21 17:40:02 +07:00
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
if (!skb_queue_is_last(&tx->skbs, skb)) {
|
2009-03-23 23:28:35 +07:00
|
|
|
hdr->frame_control |= morefrags;
|
2008-10-21 17:40:02 +07:00
|
|
|
/*
|
|
|
|
* No multi-rate retries for fragmented frames, that
|
|
|
|
* would completely throw off the NAV at other STAs.
|
|
|
|
*/
|
|
|
|
info->control.rates[1].idx = -1;
|
|
|
|
info->control.rates[2].idx = -1;
|
|
|
|
info->control.rates[3].idx = -1;
|
2012-07-03 00:46:16 +07:00
|
|
|
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
|
2008-10-21 17:40:02 +07:00
|
|
|
info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
|
2009-03-23 23:28:35 +07:00
|
|
|
} else {
|
|
|
|
hdr->frame_control &= ~morefrags;
|
2008-10-21 17:40:02 +07:00
|
|
|
}
|
2009-03-23 23:28:35 +07:00
|
|
|
hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
|
|
|
|
fragnum++;
|
2011-11-16 21:28:55 +07:00
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2009-08-10 21:01:54 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
|
|
|
ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
2011-11-16 21:28:55 +07:00
|
|
|
struct sk_buff *skb;
|
2013-02-05 16:55:21 +07:00
|
|
|
int ac = -1;
|
2009-08-10 21:01:54 +07:00
|
|
|
|
|
|
|
if (!tx->sta)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
skb_queue_walk(&tx->skbs, skb) {
|
2013-02-05 16:55:21 +07:00
|
|
|
ac = skb_get_queue_mapping(skb);
|
2015-10-16 22:54:47 +07:00
|
|
|
tx->sta->tx_stats.bytes[ac] += skb->len;
|
2011-11-16 21:28:55 +07:00
|
|
|
}
|
2013-02-05 16:55:21 +07:00
|
|
|
if (ac >= 0)
|
2015-10-16 22:54:47 +07:00
|
|
|
tx->sta->tx_stats.packets[ac]++;
|
2009-08-10 21:01:54 +07:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2008-06-30 20:10:44 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-05-15 17:55:28 +07:00
|
|
|
ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
if (!tx->key)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2010-08-10 14:46:38 +07:00
|
|
|
switch (tx->key->conf.cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
2008-05-15 17:55:28 +07:00
|
|
|
return ieee80211_crypto_wep_encrypt(tx);
|
2010-08-10 14:46:38 +07:00
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
2008-05-15 17:55:28 +07:00
|
|
|
return ieee80211_crypto_tkip_encrypt(tx);
|
2010-08-10 14:46:38 +07:00
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
2015-01-25 00:52:07 +07:00
|
|
|
return ieee80211_crypto_ccmp_encrypt(
|
|
|
|
tx, IEEE80211_CCMP_MIC_LEN);
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
|
|
|
return ieee80211_crypto_ccmp_encrypt(
|
|
|
|
tx, IEEE80211_CCMP_256_MIC_LEN);
|
2010-08-10 14:46:38 +07:00
|
|
|
case WLAN_CIPHER_SUITE_AES_CMAC:
|
2009-01-08 18:32:02 +07:00
|
|
|
return ieee80211_crypto_aes_cmac_encrypt(tx);
|
2015-01-25 00:52:08 +07:00
|
|
|
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
|
|
|
|
return ieee80211_crypto_aes_cmac_256_encrypt(tx);
|
2015-01-25 00:52:09 +07:00
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
|
|
|
return ieee80211_crypto_aes_gmac_encrypt(tx);
|
2015-01-25 00:52:06 +07:00
|
|
|
case WLAN_CIPHER_SUITE_GCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
|
|
|
return ieee80211_crypto_gcmp_encrypt(tx);
|
2010-08-27 18:26:52 +07:00
|
|
|
default:
|
2012-01-16 20:18:59 +07:00
|
|
|
return ieee80211_crypto_hw_encrypt(tx);
|
2008-05-15 17:55:28 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return TX_DROP;
|
|
|
|
}
|
|
|
|
|
2008-06-30 20:10:44 +07:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-06-25 18:36:27 +07:00
|
|
|
ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
2011-11-16 21:28:55 +07:00
|
|
|
struct sk_buff *skb;
|
2009-03-23 23:28:35 +07:00
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
int next_len;
|
|
|
|
bool group_addr;
|
2008-06-25 18:36:27 +07:00
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
skb_queue_walk(&tx->skbs, skb) {
|
2009-03-23 23:28:35 +07:00
|
|
|
hdr = (void *) skb->data;
|
2009-05-19 23:25:58 +07:00
|
|
|
if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
|
|
|
|
break; /* must not overwrite AID */
|
2011-11-16 21:28:55 +07:00
|
|
|
if (!skb_queue_is_last(&tx->skbs, skb)) {
|
|
|
|
struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
|
|
|
|
next_len = next->len;
|
|
|
|
} else
|
|
|
|
next_len = 0;
|
2009-03-23 23:28:35 +07:00
|
|
|
group_addr = is_multicast_ether_addr(hdr->addr1);
|
2008-06-25 18:36:27 +07:00
|
|
|
|
2009-03-23 23:28:35 +07:00
|
|
|
hdr->duration_id =
|
2011-11-16 21:28:55 +07:00
|
|
|
ieee80211_duration(tx, skb, group_addr, next_len);
|
|
|
|
}
|
2008-06-25 18:36:27 +07:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
/* actual transmit path */
|
|
|
|
|
2010-06-10 15:21:39 +07:00
|
|
|
static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct ieee80211_tx_info *info,
|
|
|
|
struct tid_ampdu_tx *tid_tx,
|
|
|
|
int tid)
|
|
|
|
{
|
|
|
|
bool queued = false;
|
2011-11-23 09:50:28 +07:00
|
|
|
bool reset_agg_timer = false;
|
2012-03-07 23:20:30 +07:00
|
|
|
struct sk_buff *purge_skb = NULL;
|
2010-06-10 15:21:39 +07:00
|
|
|
|
|
|
|
if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
|
|
|
|
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
2011-11-23 09:50:28 +07:00
|
|
|
reset_agg_timer = true;
|
2010-06-10 15:21:42 +07:00
|
|
|
} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
|
|
|
|
/*
|
|
|
|
* nothing -- this aggregation session is being started
|
|
|
|
* but that might still fail with the driver
|
|
|
|
*/
|
2015-03-28 03:30:37 +07:00
|
|
|
} else if (!tx->sta->sta.txq[tid]) {
|
2010-06-10 15:21:39 +07:00
|
|
|
spin_lock(&tx->sta->lock);
|
|
|
|
/*
|
|
|
|
* Need to re-check now, because we may get here
|
|
|
|
*
|
|
|
|
* 1) in the window during which the setup is actually
|
|
|
|
* already done, but not marked yet because not all
|
|
|
|
* packets are spliced over to the driver pending
|
|
|
|
* queue yet -- if this happened we acquire the lock
|
|
|
|
* either before or after the splice happens, but
|
|
|
|
* need to recheck which of these cases happened.
|
|
|
|
*
|
|
|
|
* 2) during session teardown, if the OPERATIONAL bit
|
|
|
|
* was cleared due to the teardown but the pointer
|
|
|
|
* hasn't been assigned NULL yet (or we loaded it
|
|
|
|
* before it was assigned) -- in this case it may
|
|
|
|
* now be NULL which means we should just let the
|
|
|
|
* packet pass through because splicing the frames
|
|
|
|
* back is already done.
|
|
|
|
*/
|
2011-05-13 19:15:49 +07:00
|
|
|
tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid);
|
2010-06-10 15:21:39 +07:00
|
|
|
|
|
|
|
if (!tid_tx) {
|
|
|
|
/* do nothing, let packet pass through */
|
|
|
|
} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
|
|
|
|
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
2011-11-23 09:50:28 +07:00
|
|
|
reset_agg_timer = true;
|
2010-06-10 15:21:39 +07:00
|
|
|
} else {
|
|
|
|
queued = true;
|
2016-03-17 21:51:42 +07:00
|
|
|
if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
|
|
|
|
clear_sta_flag(tx->sta, WLAN_STA_SP);
|
|
|
|
ps_dbg(tx->sta->sdata,
|
|
|
|
"STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
|
|
|
|
tx->sta->sta.addr, tx->sta->sta.aid);
|
|
|
|
}
|
2010-06-10 15:21:39 +07:00
|
|
|
info->control.vif = &tx->sdata->vif;
|
|
|
|
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
|
2016-03-17 21:51:41 +07:00
|
|
|
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
|
2010-06-10 15:21:39 +07:00
|
|
|
__skb_queue_tail(&tid_tx->pending, skb);
|
2012-03-07 23:20:30 +07:00
|
|
|
if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
|
|
|
|
purge_skb = __skb_dequeue(&tid_tx->pending);
|
2010-06-10 15:21:39 +07:00
|
|
|
}
|
|
|
|
spin_unlock(&tx->sta->lock);
|
2012-03-07 23:20:30 +07:00
|
|
|
|
|
|
|
if (purge_skb)
|
2012-10-08 19:39:33 +07:00
|
|
|
ieee80211_free_txskb(&tx->local->hw, purge_skb);
|
2010-06-10 15:21:39 +07:00
|
|
|
}
|
|
|
|
|
2011-11-23 09:50:28 +07:00
|
|
|
/* reset session timer */
|
2018-04-20 17:49:19 +07:00
|
|
|
if (reset_agg_timer)
|
2012-03-19 04:58:06 +07:00
|
|
|
tid_tx->last_tx = jiffies;
|
2011-11-23 09:50:28 +07:00
|
|
|
|
2010-06-10 15:21:39 +07:00
|
|
|
return queued;
|
|
|
|
}
|
|
|
|
|
2007-09-26 22:53:18 +07:00
|
|
|
/*
|
|
|
|
* initialises @tx
|
2015-03-20 20:18:27 +07:00
|
|
|
* pass %NULL for the station if unknown, a valid pointer if known
|
|
|
|
* or an ERR_PTR() if the station is known not to exist
|
2007-09-26 22:53:18 +07:00
|
|
|
*/
|
2008-02-01 01:48:20 +07:00
|
|
|
static ieee80211_tx_result
|
2009-06-17 22:43:56 +07:00
|
|
|
ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_tx_data *tx,
|
2015-03-20 20:18:27 +07:00
|
|
|
struct sta_info *sta, struct sk_buff *skb)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2009-06-17 22:43:56 +07:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2007-09-26 22:53:18 +07:00
|
|
|
struct ieee80211_hdr *hdr;
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2011-10-07 19:01:24 +07:00
|
|
|
int tid;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
memset(tx, 0, sizeof(*tx));
|
|
|
|
tx->skb = skb;
|
|
|
|
tx->local = local;
|
2009-06-17 22:43:56 +07:00
|
|
|
tx->sdata = sdata;
|
2011-11-16 21:28:55 +07:00
|
|
|
__skb_queue_head_init(&tx->skbs);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2009-03-23 23:28:41 +07:00
|
|
|
/*
|
|
|
|
* If this flag is set to true anywhere, and we get here,
|
|
|
|
* we are doing the needed processing, so remove the flag
|
|
|
|
* now.
|
|
|
|
*/
|
|
|
|
info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
|
|
|
|
|
2007-09-26 22:53:18 +07:00
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
|
2015-03-20 20:18:27 +07:00
|
|
|
if (likely(sta)) {
|
|
|
|
if (!IS_ERR(sta))
|
|
|
|
tx->sta = sta;
|
|
|
|
} else {
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
|
|
|
|
tx->sta = rcu_dereference(sdata->u.vlan.sta);
|
|
|
|
if (!tx->sta && sdata->wdev.use_4addr)
|
|
|
|
return TX_DROP;
|
|
|
|
} else if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
|
|
|
|
IEEE80211_TX_CTL_INJECTED) ||
|
|
|
|
tx->sdata->control_port_protocol == tx->skb->protocol) {
|
|
|
|
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
|
|
|
|
}
|
|
|
|
if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
|
|
|
|
tx->sta = sta_info_get(sdata, hdr->addr1);
|
2010-01-09 00:15:13 +07:00
|
|
|
}
|
2007-09-26 22:53:18 +07:00
|
|
|
|
2009-03-23 23:28:41 +07:00
|
|
|
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
|
2011-09-29 21:04:41 +07:00
|
|
|
!ieee80211_is_qos_nullfunc(hdr->frame_control) &&
|
2015-06-03 02:39:54 +07:00
|
|
|
ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
|
|
|
|
!ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) {
|
2009-03-23 23:28:41 +07:00
|
|
|
struct tid_ampdu_tx *tid_tx;
|
|
|
|
|
2018-02-19 19:48:40 +07:00
|
|
|
tid = ieee80211_get_tid(hdr);
|
2008-10-24 11:25:27 +07:00
|
|
|
|
2010-06-10 15:21:39 +07:00
|
|
|
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
|
|
|
|
if (tid_tx) {
|
|
|
|
bool queued;
|
2009-03-23 23:28:41 +07:00
|
|
|
|
2010-06-10 15:21:39 +07:00
|
|
|
queued = ieee80211_tx_prep_agg(tx, skb, info,
|
|
|
|
tid_tx, tid);
|
|
|
|
|
|
|
|
if (unlikely(queued))
|
|
|
|
return TX_QUEUED;
|
|
|
|
}
|
2008-10-24 11:25:27 +07:00
|
|
|
}
|
|
|
|
|
2007-08-29 04:01:54 +07:00
|
|
|
if (is_multicast_ether_addr(hdr->addr1)) {
|
2008-02-25 22:27:43 +07:00
|
|
|
tx->flags &= ~IEEE80211_TX_UNICAST;
|
2008-05-15 17:55:29 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_ACK;
|
2011-11-18 20:20:42 +07:00
|
|
|
} else
|
2008-02-25 22:27:43 +07:00
|
|
|
tx->flags |= IEEE80211_TX_UNICAST;
|
2007-09-26 22:53:18 +07:00
|
|
|
|
2011-10-07 19:01:25 +07:00
|
|
|
if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
|
|
|
|
if (!(tx->flags & IEEE80211_TX_UNICAST) ||
|
|
|
|
skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
|
|
|
|
info->flags & IEEE80211_TX_CTL_AMPDU)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_DONTFRAG;
|
2007-09-26 22:53:18 +07:00
|
|
|
}
|
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
if (!tx->sta)
|
2008-05-15 17:55:29 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
2015-09-24 19:59:49 +07:00
|
|
|
else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
|
2008-05-15 17:55:29 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
2015-09-24 19:59:49 +07:00
|
|
|
ieee80211_check_fast_xmit(tx->sta);
|
|
|
|
}
|
2007-09-26 22:53:18 +07:00
|
|
|
|
2008-05-15 17:55:29 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-02-01 01:48:20 +07:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2016-05-19 15:37:48 +07:00
|
|
|
static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_vif *vif,
|
2017-01-13 19:32:51 +07:00
|
|
|
struct sta_info *sta,
|
2016-05-19 15:37:48 +07:00
|
|
|
struct sk_buff *skb)
|
2015-03-28 03:30:37 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_txq *txq = NULL;
|
|
|
|
|
2016-02-28 21:19:53 +07:00
|
|
|
if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
|
|
|
|
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
|
2016-05-19 15:37:48 +07:00
|
|
|
return NULL;
|
2015-03-28 03:30:37 +07:00
|
|
|
|
2019-11-25 17:04:37 +07:00
|
|
|
if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
|
|
|
|
unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
|
2018-08-31 15:31:08 +07:00
|
|
|
if ((!ieee80211_is_mgmt(hdr->frame_control) ||
|
2018-09-05 12:06:09 +07:00
|
|
|
ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
|
|
|
|
vif->type == NL80211_IFTYPE_STATION) &&
|
2018-08-31 15:31:08 +07:00
|
|
|
sta && sta->uploaded) {
|
|
|
|
/*
|
|
|
|
* This will be NULL if the driver didn't set the
|
|
|
|
* opt-in hardware flag.
|
|
|
|
*/
|
|
|
|
txq = sta->sta.txq[IEEE80211_NUM_TIDS];
|
|
|
|
}
|
|
|
|
} else if (sta) {
|
2015-03-28 03:30:37 +07:00
|
|
|
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
|
|
|
|
|
2017-01-13 19:32:51 +07:00
|
|
|
if (!sta->uploaded)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
txq = sta->sta.txq[tid];
|
2015-03-28 03:30:37 +07:00
|
|
|
} else if (vif) {
|
|
|
|
txq = vif->txq;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!txq)
|
2016-05-19 15:37:48 +07:00
|
|
|
return NULL;
|
2015-03-28 03:30:37 +07:00
|
|
|
|
2016-05-19 15:37:48 +07:00
|
|
|
return to_txq_info(txq);
|
|
|
|
}
|
2015-03-28 03:30:37 +07:00
|
|
|
|
2016-05-19 15:37:51 +07:00
|
|
|
static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 codel_skb_len_func(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return skb->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static codel_time_t codel_skb_time_func(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const struct ieee80211_tx_info *info;
|
|
|
|
|
|
|
|
info = (const struct ieee80211_tx_info *)skb->cb;
|
|
|
|
return info->control.enqueue_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars,
|
|
|
|
void *ctx)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
struct fq *fq;
|
|
|
|
struct fq_flow *flow;
|
|
|
|
|
|
|
|
txqi = ctx;
|
|
|
|
local = vif_to_sdata(txqi->txq.vif)->local;
|
|
|
|
fq = &local->fq;
|
|
|
|
|
|
|
|
if (cvars == &txqi->def_cvars)
|
|
|
|
flow = &txqi->def_flow;
|
|
|
|
else
|
|
|
|
flow = &fq->flows[cvars - local->cvars];
|
|
|
|
|
|
|
|
return fq_flow_dequeue(fq, flow);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void codel_drop_func(struct sk_buff *skb,
|
|
|
|
void *ctx)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct ieee80211_hw *hw;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
|
|
|
|
txqi = ctx;
|
|
|
|
local = vif_to_sdata(txqi->txq.vif)->local;
|
|
|
|
hw = &local->hw;
|
|
|
|
|
|
|
|
ieee80211_free_txskb(hw, skb);
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:37:49 +07:00
|
|
|
static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
|
|
|
|
struct fq_tin *tin,
|
|
|
|
struct fq_flow *flow)
|
|
|
|
{
|
2016-05-19 15:37:51 +07:00
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
struct codel_vars *cvars;
|
|
|
|
struct codel_params *cparams;
|
|
|
|
struct codel_stats *cstats;
|
|
|
|
|
|
|
|
local = container_of(fq, struct ieee80211_local, fq);
|
|
|
|
txqi = container_of(tin, struct txq_info, tin);
|
2016-09-12 20:55:43 +07:00
|
|
|
cstats = &txqi->cstats;
|
2016-05-19 15:37:51 +07:00
|
|
|
|
2017-04-06 16:38:26 +07:00
|
|
|
if (txqi->txq.sta) {
|
|
|
|
struct sta_info *sta = container_of(txqi->txq.sta,
|
|
|
|
struct sta_info, sta);
|
|
|
|
cparams = &sta->cparams;
|
|
|
|
} else {
|
|
|
|
cparams = &local->cparams;
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:37:51 +07:00
|
|
|
if (flow == &txqi->def_flow)
|
|
|
|
cvars = &txqi->def_cvars;
|
|
|
|
else
|
|
|
|
cvars = &local->cvars[flow - fq->flows];
|
|
|
|
|
|
|
|
return codel_dequeue(txqi,
|
|
|
|
&flow->backlog,
|
|
|
|
cparams,
|
|
|
|
cvars,
|
|
|
|
cstats,
|
|
|
|
codel_skb_len_func,
|
|
|
|
codel_skb_time_func,
|
|
|
|
codel_drop_func,
|
|
|
|
codel_dequeue_func);
|
2016-05-19 15:37:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void fq_skb_free_func(struct fq *fq,
|
|
|
|
struct fq_tin *tin,
|
|
|
|
struct fq_flow *flow,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
|
|
|
|
local = container_of(fq, struct ieee80211_local, fq);
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct fq_flow *fq_flow_get_default_func(struct fq *fq,
|
|
|
|
struct fq_tin *tin,
|
|
|
|
int idx,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct txq_info *txqi;
|
|
|
|
|
|
|
|
txqi = container_of(tin, struct txq_info, tin);
|
|
|
|
return &txqi->def_flow;
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:37:48 +07:00
|
|
|
static void ieee80211_txq_enqueue(struct ieee80211_local *local,
|
|
|
|
struct txq_info *txqi,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
2016-05-19 15:37:49 +07:00
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct fq_tin *tin = &txqi->tin;
|
2019-03-17 00:06:32 +07:00
|
|
|
u32 flow_idx = fq_flow_idx(fq, skb);
|
2016-01-27 21:26:12 +07:00
|
|
|
|
2016-05-19 15:37:51 +07:00
|
|
|
ieee80211_set_skb_enqueue_time(skb);
|
2019-03-17 00:06:32 +07:00
|
|
|
|
|
|
|
spin_lock_bh(&fq->lock);
|
|
|
|
fq_tin_enqueue(fq, tin, flow_idx, skb,
|
2016-05-19 15:37:49 +07:00
|
|
|
fq_skb_free_func,
|
|
|
|
fq_flow_get_default_func);
|
2019-03-17 00:06:32 +07:00
|
|
|
spin_unlock_bh(&fq->lock);
|
2016-05-19 15:37:49 +07:00
|
|
|
}
|
2015-03-28 03:30:37 +07:00
|
|
|
|
2017-10-06 16:53:33 +07:00
|
|
|
static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin,
|
|
|
|
struct fq_flow *flow, struct sk_buff *skb,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
|
|
|
return info->control.vif == data;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
struct fq_tin *tin;
|
|
|
|
struct ieee80211_sub_if_data *ap;
|
|
|
|
|
|
|
|
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
|
|
|
|
|
|
|
|
if (!ap->vif.txq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
txqi = to_txq_info(ap->vif.txq);
|
|
|
|
tin = &txqi->tin;
|
|
|
|
|
|
|
|
spin_lock_bh(&fq->lock);
|
|
|
|
fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif,
|
|
|
|
fq_skb_free_func);
|
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:37:49 +07:00
|
|
|
void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct txq_info *txqi, int tid)
|
|
|
|
{
|
|
|
|
fq_tin_init(&txqi->tin);
|
|
|
|
fq_flow_init(&txqi->def_flow);
|
2016-05-19 15:37:51 +07:00
|
|
|
codel_vars_init(&txqi->def_cvars);
|
2016-09-12 20:55:43 +07:00
|
|
|
codel_stats_init(&txqi->cstats);
|
2016-09-23 00:04:20 +07:00
|
|
|
__skb_queue_head_init(&txqi->frags);
|
2018-12-19 08:02:06 +07:00
|
|
|
INIT_LIST_HEAD(&txqi->schedule_order);
|
2016-05-19 15:37:49 +07:00
|
|
|
|
|
|
|
txqi->txq.vif = &sdata->vif;
|
|
|
|
|
2018-08-31 15:31:08 +07:00
|
|
|
if (!sta) {
|
2016-05-19 15:37:49 +07:00
|
|
|
sdata->vif.txq = &txqi->txq;
|
|
|
|
txqi->txq.tid = 0;
|
|
|
|
txqi->txq.ac = IEEE80211_AC_BE;
|
2018-08-31 15:31:08 +07:00
|
|
|
|
|
|
|
return;
|
2016-05-19 15:37:48 +07:00
|
|
|
}
|
2018-08-31 15:31:08 +07:00
|
|
|
|
|
|
|
if (tid == IEEE80211_NUM_TIDS) {
|
2018-09-05 12:06:09 +07:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
|
|
|
|
/* Drivers need to opt in to the management MPDU TXQ */
|
|
|
|
if (!ieee80211_hw_check(&sdata->local->hw,
|
|
|
|
STA_MMPDU_TXQ))
|
|
|
|
return;
|
|
|
|
} else if (!ieee80211_hw_check(&sdata->local->hw,
|
|
|
|
BUFF_MMPDU_TXQ)) {
|
|
|
|
/* Drivers need to opt in to the bufferable MMPDU TXQ */
|
2018-08-31 15:31:08 +07:00
|
|
|
return;
|
2018-09-05 12:06:09 +07:00
|
|
|
}
|
2018-08-31 15:31:08 +07:00
|
|
|
txqi->txq.ac = IEEE80211_AC_VO;
|
|
|
|
} else {
|
|
|
|
txqi->txq.ac = ieee80211_ac_from_tid(tid);
|
|
|
|
}
|
|
|
|
|
|
|
|
txqi->txq.sta = &sta->sta;
|
|
|
|
txqi->txq.tid = tid;
|
|
|
|
sta->sta.txq[tid] = &txqi->txq;
|
2016-05-19 15:37:49 +07:00
|
|
|
}
|
2015-03-28 03:30:37 +07:00
|
|
|
|
2016-05-19 15:37:49 +07:00
|
|
|
void ieee80211_txq_purge(struct ieee80211_local *local,
|
|
|
|
struct txq_info *txqi)
|
|
|
|
{
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct fq_tin *tin = &txqi->tin;
|
|
|
|
|
2018-12-19 08:02:08 +07:00
|
|
|
spin_lock_bh(&fq->lock);
|
2016-05-19 15:37:49 +07:00
|
|
|
fq_tin_reset(fq, tin, fq_skb_free_func);
|
2016-09-23 00:04:20 +07:00
|
|
|
ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
|
2018-12-19 08:02:08 +07:00
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
|
2018-12-19 08:02:06 +07:00
|
|
|
spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
|
|
|
|
list_del_init(&txqi->schedule_order);
|
|
|
|
spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
|
2016-05-19 15:37:49 +07:00
|
|
|
}
|
|
|
|
|
2018-05-08 18:03:50 +07:00
|
|
|
void ieee80211_txq_set_params(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
if (local->hw.wiphy->txq_limit)
|
|
|
|
local->fq.limit = local->hw.wiphy->txq_limit;
|
|
|
|
else
|
|
|
|
local->hw.wiphy->txq_limit = local->fq.limit;
|
|
|
|
|
|
|
|
if (local->hw.wiphy->txq_memory_limit)
|
|
|
|
local->fq.memory_limit = local->hw.wiphy->txq_memory_limit;
|
|
|
|
else
|
|
|
|
local->hw.wiphy->txq_memory_limit = local->fq.memory_limit;
|
|
|
|
|
|
|
|
if (local->hw.wiphy->txq_quantum)
|
|
|
|
local->fq.quantum = local->hw.wiphy->txq_quantum;
|
|
|
|
else
|
|
|
|
local->hw.wiphy->txq_quantum = local->fq.quantum;
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:37:49 +07:00
|
|
|
int ieee80211_txq_setup_flows(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
int ret;
|
2016-05-19 15:37:51 +07:00
|
|
|
int i;
|
2016-09-24 02:59:11 +07:00
|
|
|
bool supp_vht = false;
|
|
|
|
enum nl80211_band band;
|
2016-05-19 15:37:49 +07:00
|
|
|
|
|
|
|
if (!local->ops->wake_tx_queue)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = fq_init(fq, 4096);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-09-24 02:59:11 +07:00
|
|
|
/*
|
|
|
|
* If the hardware doesn't support VHT, it is safe to limit the maximum
|
|
|
|
* queue size. 4 Mbytes is 64 max-size aggregates in 802.11n.
|
|
|
|
*/
|
|
|
|
for (band = 0; band < NUM_NL80211_BANDS; band++) {
|
|
|
|
struct ieee80211_supported_band *sband;
|
|
|
|
|
|
|
|
sband = local->hw.wiphy->bands[band];
|
|
|
|
if (!sband)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
supp_vht = supp_vht || sband->vht_cap.vht_supported;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!supp_vht)
|
|
|
|
fq->memory_limit = 4 << 20; /* 4 Mbytes */
|
|
|
|
|
2016-05-19 15:37:51 +07:00
|
|
|
codel_params_init(&local->cparams);
|
|
|
|
local->cparams.interval = MS2TIME(100);
|
|
|
|
local->cparams.target = MS2TIME(20);
|
|
|
|
local->cparams.ecn = true;
|
|
|
|
|
|
|
|
local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!local->cvars) {
|
2016-06-29 19:00:34 +07:00
|
|
|
spin_lock_bh(&fq->lock);
|
2016-05-19 15:37:51 +07:00
|
|
|
fq_reset(fq, fq_skb_free_func);
|
2016-06-29 19:00:34 +07:00
|
|
|
spin_unlock_bh(&fq->lock);
|
2016-05-19 15:37:51 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < fq->flows_cnt; i++)
|
|
|
|
codel_vars_init(&local->cvars[i]);
|
|
|
|
|
2018-05-08 18:03:50 +07:00
|
|
|
ieee80211_txq_set_params(local);
|
|
|
|
|
2016-05-19 15:37:49 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
|
|
|
|
if (!local->ops->wake_tx_queue)
|
|
|
|
return;
|
|
|
|
|
2016-05-19 15:37:51 +07:00
|
|
|
kfree(local->cvars);
|
|
|
|
local->cvars = NULL;
|
|
|
|
|
2016-06-29 19:00:34 +07:00
|
|
|
spin_lock_bh(&fq->lock);
|
2016-05-19 15:37:49 +07:00
|
|
|
fq_reset(fq, fq_skb_free_func);
|
2016-06-29 19:00:34 +07:00
|
|
|
spin_unlock_bh(&fq->lock);
|
2015-03-28 03:30:37 +07:00
|
|
|
}
|
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
static bool ieee80211_queue_skb(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_vif *vif;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
|
|
|
|
if (!local->ops->wake_tx_queue ||
|
|
|
|
sdata->vif.type == NL80211_IFTYPE_MONITOR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
|
|
sdata = container_of(sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
|
|
|
|
|
|
|
vif = &sdata->vif;
|
2017-01-13 19:32:51 +07:00
|
|
|
txqi = ieee80211_get_txq(local, vif, sta, skb);
|
2016-09-23 00:04:20 +07:00
|
|
|
|
|
|
|
if (!txqi)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ieee80211_txq_enqueue(local, txqi, skb);
|
|
|
|
|
2018-12-19 08:02:06 +07:00
|
|
|
schedule_and_wake_txq(local, txqi);
|
2016-09-23 00:04:20 +07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-11-16 22:02:47 +07:00
|
|
|
static bool ieee80211_tx_frags(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_vif *vif,
|
2019-10-02 04:26:35 +07:00
|
|
|
struct sta_info *sta,
|
2011-11-16 22:02:47 +07:00
|
|
|
struct sk_buff_head *skbs,
|
|
|
|
bool txpending)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2016-05-19 15:37:48 +07:00
|
|
|
struct ieee80211_tx_control control = {};
|
2011-11-16 21:28:55 +07:00
|
|
|
struct sk_buff *skb, *tmp;
|
2009-06-17 22:43:56 +07:00
|
|
|
unsigned long flags;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2011-11-16 21:28:55 +07:00
|
|
|
skb_queue_walk_safe(skbs, skb, tmp) {
|
2012-04-03 21:28:50 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
int q = info->hw_queue;
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
|
|
|
|
if (WARN_ON_ONCE(q >= local->hw.queues)) {
|
|
|
|
__skb_unlink(skb, skbs);
|
2012-10-08 19:39:33 +07:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2012-04-03 21:28:50 +07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
2009-06-17 22:43:56 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
|
|
if (local->queue_stop_reasons[q] ||
|
2011-02-24 20:42:06 +07:00
|
|
|
(!txpending && !skb_queue_empty(&local->pending[q]))) {
|
2013-02-12 00:21:07 +07:00
|
|
|
if (unlikely(info->flags &
|
2013-02-26 03:58:05 +07:00
|
|
|
IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
|
|
|
|
if (local->queue_stop_reasons[q] &
|
|
|
|
~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
|
|
|
|
/*
|
|
|
|
* Drop off-channel frames if queues
|
|
|
|
* are stopped for any reason other
|
|
|
|
* than off-channel operation. Never
|
|
|
|
* queue them.
|
|
|
|
*/
|
|
|
|
spin_unlock_irqrestore(
|
|
|
|
&local->queue_stop_reason_lock,
|
|
|
|
flags);
|
|
|
|
ieee80211_purge_tx_queue(&local->hw,
|
|
|
|
skbs);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
|
2013-02-12 00:21:07 +07:00
|
|
|
/*
|
2013-02-26 03:58:05 +07:00
|
|
|
* Since queue is stopped, queue up frames for
|
|
|
|
* later transmission from the tx-pending
|
|
|
|
* tasklet when the queue is woken again.
|
2013-02-12 00:21:07 +07:00
|
|
|
*/
|
2013-02-26 03:58:05 +07:00
|
|
|
if (txpending)
|
|
|
|
skb_queue_splice_init(skbs,
|
|
|
|
&local->pending[q]);
|
|
|
|
else
|
|
|
|
skb_queue_splice_tail_init(skbs,
|
|
|
|
&local->pending[q]);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
|
|
|
|
flags);
|
|
|
|
return false;
|
2013-02-12 00:21:07 +07:00
|
|
|
}
|
2011-02-24 20:42:06 +07:00
|
|
|
}
|
2009-06-17 22:43:56 +07:00
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
2008-07-24 22:46:44 +07:00
|
|
|
|
2011-11-16 22:02:47 +07:00
|
|
|
info->control.vif = vif;
|
2019-10-02 04:26:35 +07:00
|
|
|
control.sta = sta ? &sta->sta : NULL;
|
2009-03-23 23:28:36 +07:00
|
|
|
|
2011-11-16 22:02:47 +07:00
|
|
|
__skb_unlink(skb, skbs);
|
2016-05-19 15:37:48 +07:00
|
|
|
drv_tx(local, &control, skb);
|
2011-11-16 22:02:47 +07:00
|
|
|
}
|
2009-07-14 05:33:34 +07:00
|
|
|
|
2011-11-16 22:02:47 +07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns false if the frame couldn't be transmitted but was queued instead.
|
|
|
|
*/
|
|
|
|
static bool __ieee80211_tx(struct ieee80211_local *local,
|
|
|
|
struct sk_buff_head *skbs, int led_len,
|
|
|
|
struct sta_info *sta, bool txpending)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct ieee80211_vif *vif;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
bool result = true;
|
|
|
|
__le16 fc;
|
2009-07-14 05:33:34 +07:00
|
|
|
|
2011-11-16 22:02:47 +07:00
|
|
|
if (WARN_ON(skb_queue_empty(skbs)))
|
|
|
|
return true;
|
2010-07-22 22:11:28 +07:00
|
|
|
|
2011-11-16 22:02:47 +07:00
|
|
|
skb = skb_peek(skbs);
|
|
|
|
fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
sdata = vif_to_sdata(info->control.vif);
|
|
|
|
if (sta && !sta->uploaded)
|
|
|
|
sta = NULL;
|
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_MONITOR:
|
2016-08-30 03:25:15 +07:00
|
|
|
if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
|
2013-07-15 03:39:20 +07:00
|
|
|
vif = &sdata->vif;
|
|
|
|
break;
|
|
|
|
}
|
2012-04-03 19:35:57 +07:00
|
|
|
sdata = rcu_dereference(local->monitor_sdata);
|
2012-04-03 21:28:50 +07:00
|
|
|
if (sdata) {
|
2012-04-03 19:35:57 +07:00
|
|
|
vif = &sdata->vif;
|
2012-04-03 21:28:50 +07:00
|
|
|
info->hw_queue =
|
|
|
|
vif->hw_queue[skb_get_queue_mapping(skb)];
|
2015-06-03 02:39:54 +07:00
|
|
|
} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
|
2015-11-24 21:41:50 +07:00
|
|
|
ieee80211_purge_tx_queue(&local->hw, skbs);
|
2012-04-03 21:28:50 +07:00
|
|
|
return true;
|
|
|
|
} else
|
2012-04-03 19:35:57 +07:00
|
|
|
vif = NULL;
|
2011-11-16 22:02:47 +07:00
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
sdata = container_of(sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
2020-07-08 03:45:48 +07:00
|
|
|
fallthrough;
|
2011-11-16 22:02:47 +07:00
|
|
|
default:
|
|
|
|
vif = &sdata->vif;
|
|
|
|
break;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
2009-03-23 23:28:35 +07:00
|
|
|
|
2019-10-02 04:26:35 +07:00
|
|
|
result = ieee80211_tx_frags(local, vif, sta, skbs, txpending);
|
2011-11-16 22:02:47 +07:00
|
|
|
|
2011-11-16 21:28:57 +07:00
|
|
|
ieee80211_tpt_led_trig_tx(local, fc, led_len);
|
|
|
|
|
2011-11-24 20:47:36 +07:00
|
|
|
WARN_ON_ONCE(!skb_queue_empty(skbs));
|
2011-11-16 21:28:55 +07:00
|
|
|
|
2011-11-16 22:02:47 +07:00
|
|
|
return result;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2008-06-20 06:22:30 +07:00
|
|
|
/*
|
|
|
|
* Invoke TX handlers, return 0 on success and non-zero if the
|
|
|
|
* frame was dropped or queued.
|
2016-09-23 00:04:20 +07:00
|
|
|
*
|
|
|
|
* The handlers are split into an early and late part. The latter is everything
|
|
|
|
* that can be sensitive to reordering, and will be deferred to after packets
|
|
|
|
* are dequeued from the intermediate queues (when they are enabled).
|
2008-06-20 06:22:30 +07:00
|
|
|
*/
|
2016-09-23 00:04:20 +07:00
|
|
|
static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
|
2008-06-20 06:22:30 +07:00
|
|
|
{
|
|
|
|
ieee80211_tx_result res = TX_DROP;
|
|
|
|
|
2009-10-29 14:43:48 +07:00
|
|
|
#define CALL_TXH(txh) \
|
|
|
|
do { \
|
|
|
|
res = txh(tx); \
|
|
|
|
if (res != TX_CONTINUE) \
|
|
|
|
goto txh_done; \
|
|
|
|
} while (0)
|
|
|
|
|
2010-01-12 15:42:46 +07:00
|
|
|
CALL_TXH(ieee80211_tx_h_dynamic_ps);
|
2009-10-29 14:43:48 +07:00
|
|
|
CALL_TXH(ieee80211_tx_h_check_assoc);
|
|
|
|
CALL_TXH(ieee80211_tx_h_ps_buf);
|
2010-08-27 18:26:54 +07:00
|
|
|
CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
|
2009-10-29 14:43:48 +07:00
|
|
|
CALL_TXH(ieee80211_tx_h_select_key);
|
2015-06-03 02:39:54 +07:00
|
|
|
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
|
2009-11-18 00:18:36 +07:00
|
|
|
CALL_TXH(ieee80211_tx_h_rate_ctrl);
|
2010-01-17 07:47:59 +07:00
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
txh_done:
|
|
|
|
if (unlikely(res == TX_DROP)) {
|
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_drop);
|
|
|
|
if (tx->skb)
|
|
|
|
ieee80211_free_txskb(&tx->local->hw, tx->skb);
|
|
|
|
else
|
|
|
|
ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
|
|
|
|
return -1;
|
|
|
|
} else if (unlikely(res == TX_QUEUED)) {
|
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_queued);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Late handlers can be called while the sta lock is held. Handlers that can
|
|
|
|
* cause packets to be generated will cause deadlock!
|
|
|
|
*/
|
|
|
|
static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
|
|
ieee80211_tx_result res = TX_CONTINUE;
|
|
|
|
|
2011-12-03 04:08:52 +07:00
|
|
|
if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
|
|
|
|
__skb_queue_tail(&tx->skbs, tx->skb);
|
|
|
|
tx->skb = NULL;
|
2010-01-17 07:47:59 +07:00
|
|
|
goto txh_done;
|
2011-12-03 04:08:52 +07:00
|
|
|
}
|
2010-01-17 07:47:59 +07:00
|
|
|
|
|
|
|
CALL_TXH(ieee80211_tx_h_michael_mic_add);
|
2009-10-29 14:43:48 +07:00
|
|
|
CALL_TXH(ieee80211_tx_h_sequence);
|
|
|
|
CALL_TXH(ieee80211_tx_h_fragment);
|
2008-06-30 20:10:44 +07:00
|
|
|
/* handlers after fragment must be aware of tx info fragmentation! */
|
2009-10-29 14:43:48 +07:00
|
|
|
CALL_TXH(ieee80211_tx_h_stats);
|
|
|
|
CALL_TXH(ieee80211_tx_h_encrypt);
|
2015-06-03 02:39:54 +07:00
|
|
|
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
|
2011-02-01 03:29:12 +07:00
|
|
|
CALL_TXH(ieee80211_tx_h_calculate_duration);
|
2008-06-30 20:10:44 +07:00
|
|
|
#undef CALL_TXH
|
2008-06-20 06:22:30 +07:00
|
|
|
|
2008-06-30 20:10:44 +07:00
|
|
|
txh_done:
|
2008-06-20 06:22:30 +07:00
|
|
|
if (unlikely(res == TX_DROP)) {
|
2008-06-28 07:15:03 +07:00
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_drop);
|
2011-11-16 21:28:55 +07:00
|
|
|
if (tx->skb)
|
2012-10-08 19:39:33 +07:00
|
|
|
ieee80211_free_txskb(&tx->local->hw, tx->skb);
|
2011-11-16 21:28:55 +07:00
|
|
|
else
|
2012-11-10 09:44:14 +07:00
|
|
|
ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
|
2008-06-20 06:22:30 +07:00
|
|
|
return -1;
|
|
|
|
} else if (unlikely(res == TX_QUEUED)) {
|
2008-06-28 07:15:03 +07:00
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_queued);
|
2008-06-20 06:22:30 +07:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
int r = invoke_tx_handlers_early(tx);
|
|
|
|
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
return invoke_tx_handlers_late(tx);
|
|
|
|
}
|
|
|
|
|
2013-10-14 23:01:00 +07:00
|
|
|
bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif, struct sk_buff *skb,
|
|
|
|
int band, struct ieee80211_sta **sta)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_tx_data tx;
|
2015-03-01 14:10:12 +07:00
|
|
|
struct sk_buff *skb2;
|
2013-10-14 23:01:00 +07:00
|
|
|
|
2015-03-20 20:18:27 +07:00
|
|
|
if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP)
|
2013-10-14 23:01:00 +07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
info->band = band;
|
|
|
|
info->control.vif = vif;
|
|
|
|
info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
|
|
|
|
if (invoke_tx_handlers(&tx))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (sta) {
|
|
|
|
if (tx.sta)
|
|
|
|
*sta = &tx.sta->sta;
|
|
|
|
else
|
|
|
|
*sta = NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-01 14:10:12 +07:00
|
|
|
/* this function isn't suitable for fragmented data frames */
|
|
|
|
skb2 = __skb_dequeue(&tx.skbs);
|
|
|
|
if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) {
|
|
|
|
ieee80211_free_txskb(hw, skb2);
|
|
|
|
ieee80211_purge_tx_queue(hw, &tx.skbs);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-10-14 23:01:00 +07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
|
|
|
|
|
2011-02-24 20:42:06 +07:00
|
|
|
/*
|
|
|
|
* Returns false if the frame couldn't be transmitted but was queued instead.
|
|
|
|
*/
|
|
|
|
static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
|
2015-03-20 20:18:27 +07:00
|
|
|
struct sta_info *sta, struct sk_buff *skb,
|
2020-07-23 17:01:52 +07:00
|
|
|
bool txpending)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2009-06-17 22:43:56 +07:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2008-02-25 22:27:43 +07:00
|
|
|
struct ieee80211_tx_data tx;
|
2008-06-20 06:22:30 +07:00
|
|
|
ieee80211_tx_result res_prepare;
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2011-02-24 20:42:06 +07:00
|
|
|
bool result = true;
|
2011-11-16 21:28:57 +07:00
|
|
|
int led_len;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
if (unlikely(skb->len < 10)) {
|
|
|
|
dev_kfree_skb(skb);
|
2011-02-24 20:42:06 +07:00
|
|
|
return true;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2007-09-26 22:53:18 +07:00
|
|
|
/* initialises tx */
|
2011-11-16 21:28:57 +07:00
|
|
|
led_len = skb->len;
|
2015-03-20 20:18:27 +07:00
|
|
|
res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2009-03-23 23:28:41 +07:00
|
|
|
if (unlikely(res_prepare == TX_DROP)) {
|
2012-10-08 19:39:33 +07:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2012-07-26 22:24:39 +07:00
|
|
|
return true;
|
2009-03-23 23:28:41 +07:00
|
|
|
} else if (unlikely(res_prepare == TX_QUEUED)) {
|
2012-07-26 22:24:39 +07:00
|
|
|
return true;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2012-04-03 21:28:50 +07:00
|
|
|
/* set up hw_queue value early */
|
|
|
|
if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
|
2015-06-03 02:39:54 +07:00
|
|
|
!ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
|
2012-04-03 21:28:50 +07:00
|
|
|
info->hw_queue =
|
|
|
|
sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
if (invoke_tx_handlers_early(&tx))
|
2018-09-05 17:22:59 +07:00
|
|
|
return true;
|
2016-09-23 00:04:20 +07:00
|
|
|
|
|
|
|
if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!invoke_tx_handlers_late(&tx))
|
2011-11-16 21:28:57 +07:00
|
|
|
result = __ieee80211_tx(local, &tx.skbs, led_len,
|
|
|
|
tx.sta, txpending);
|
2012-07-26 22:24:39 +07:00
|
|
|
|
2011-02-24 20:42:06 +07:00
|
|
|
return result;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* device xmit handlers */
|
|
|
|
|
2011-06-28 20:11:37 +07:00
|
|
|
static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
|
2008-05-29 15:38:53 +07:00
|
|
|
struct sk_buff *skb,
|
|
|
|
int head_need, bool may_encrypt)
|
|
|
|
{
|
2011-06-28 20:11:37 +07:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2019-01-29 17:10:57 +07:00
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
bool enc_tailroom;
|
2008-05-29 15:38:53 +07:00
|
|
|
int tail_need = 0;
|
|
|
|
|
2019-01-29 17:10:57 +07:00
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
enc_tailroom = may_encrypt &&
|
|
|
|
(sdata->crypto_tx_tailroom_needed_cnt ||
|
|
|
|
ieee80211_is_mgmt(hdr->frame_control));
|
|
|
|
|
|
|
|
if (enc_tailroom) {
|
2008-05-29 15:38:53 +07:00
|
|
|
tail_need = IEEE80211_ENCRYPT_TAILROOM;
|
|
|
|
tail_need -= skb_tailroom(skb);
|
|
|
|
tail_need = max_t(int, tail_need, 0);
|
|
|
|
}
|
|
|
|
|
2014-07-29 19:39:14 +07:00
|
|
|
if (skb_cloned(skb) &&
|
2015-06-03 02:39:54 +07:00
|
|
|
(!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
|
2019-01-29 17:10:57 +07:00
|
|
|
!skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
|
2008-05-29 15:38:53 +07:00
|
|
|
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
|
2010-12-19 01:30:49 +07:00
|
|
|
else if (head_need || tail_need)
|
2008-05-29 15:38:53 +07:00
|
|
|
I802_DEBUG_INC(local->tx_expand_skb_head);
|
2010-12-19 01:30:49 +07:00
|
|
|
else
|
|
|
|
return 0;
|
2008-05-29 15:38:53 +07:00
|
|
|
|
|
|
|
if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
|
2010-08-21 06:25:38 +07:00
|
|
|
wiphy_debug(local->hw.wiphy,
|
|
|
|
"failed to reallocate TX buffer\n");
|
2008-05-29 15:38:53 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-20 20:18:27 +07:00
|
|
|
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
|
2020-07-23 17:01:52 +07:00
|
|
|
struct sta_info *sta, struct sk_buff *skb)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2009-06-17 22:43:56 +07:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2018-02-02 23:31:42 +07:00
|
|
|
struct ieee80211_hdr *hdr;
|
2007-07-27 20:43:22 +07:00
|
|
|
int headroom;
|
2008-05-29 15:38:53 +07:00
|
|
|
bool may_encrypt;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2009-06-17 22:43:56 +07:00
|
|
|
may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
|
2008-05-29 15:38:53 +07:00
|
|
|
|
2009-06-17 22:43:56 +07:00
|
|
|
headroom = local->tx_headroom;
|
2008-05-29 15:38:53 +07:00
|
|
|
if (may_encrypt)
|
2013-03-24 19:23:27 +07:00
|
|
|
headroom += sdata->encrypt_headroom;
|
2008-05-29 15:38:53 +07:00
|
|
|
headroom -= skb_headroom(skb);
|
|
|
|
headroom = max_t(int, 0, headroom);
|
|
|
|
|
2011-06-28 20:11:37 +07:00
|
|
|
if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
|
2012-10-08 19:39:33 +07:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2009-06-17 22:43:56 +07:00
|
|
|
return;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2010-09-12 10:01:31 +07:00
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
2009-07-14 05:33:34 +07:00
|
|
|
info->control.vif = &sdata->vif;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2013-01-31 00:14:08 +07:00
|
|
|
if (ieee80211_vif_is_mesh(&sdata->vif)) {
|
|
|
|
if (ieee80211_is_data(hdr->frame_control) &&
|
|
|
|
is_unicast_ether_addr(hdr->addr1)) {
|
2013-02-15 20:40:31 +07:00
|
|
|
if (mesh_nexthop_resolve(sdata, skb))
|
2013-01-31 00:14:08 +07:00
|
|
|
return; /* skb queued: don't free */
|
|
|
|
} else {
|
|
|
|
ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
|
|
|
|
}
|
2012-03-27 19:18:36 +07:00
|
|
|
}
|
2009-08-11 07:29:29 +07:00
|
|
|
|
2011-09-08 07:49:53 +07:00
|
|
|
ieee80211_set_qos_hdr(sdata, skb);
|
2020-07-23 17:01:52 +07:00
|
|
|
ieee80211_tx(sdata, sta, skb, false);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2020-07-23 17:01:53 +07:00
|
|
|
bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2011-10-07 19:01:26 +07:00
|
|
|
{
|
2020-07-23 17:01:53 +07:00
|
|
|
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
|
2011-10-07 19:01:26 +07:00
|
|
|
struct ieee80211_radiotap_iterator iterator;
|
|
|
|
struct ieee80211_radiotap_header *rthdr =
|
|
|
|
(struct ieee80211_radiotap_header *) skb->data;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2016-01-26 23:11:13 +07:00
|
|
|
struct ieee80211_supported_band *sband =
|
|
|
|
local->hw.wiphy->bands[info->band];
|
2011-10-07 19:01:26 +07:00
|
|
|
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
|
|
|
|
NULL);
|
|
|
|
u16 txflags;
|
2016-01-26 23:11:13 +07:00
|
|
|
u16 rate = 0;
|
|
|
|
bool rate_found = false;
|
|
|
|
u8 rate_retries = 0;
|
|
|
|
u16 rate_flags = 0;
|
2016-02-24 22:25:49 +07:00
|
|
|
u8 mcs_known, mcs_flags, mcs_bw;
|
2016-02-23 21:43:35 +07:00
|
|
|
u16 vht_known;
|
|
|
|
u8 vht_mcs = 0, vht_nss = 0;
|
2016-01-26 23:11:13 +07:00
|
|
|
int i;
|
2011-10-07 19:01:26 +07:00
|
|
|
|
2020-07-23 17:01:53 +07:00
|
|
|
/* check for not even having the fixed radiotap header part */
|
|
|
|
if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
|
|
|
|
return false; /* too short to be possibly valid */
|
|
|
|
|
|
|
|
/* is it a header version we can trust to find length from? */
|
|
|
|
if (unlikely(rthdr->it_version))
|
|
|
|
return false; /* only version 0 is supported */
|
|
|
|
|
|
|
|
/* does the skb contain enough to deliver on the alleged length? */
|
|
|
|
if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
|
|
|
|
return false; /* skb too short for claimed rt header extent */
|
|
|
|
|
2011-10-07 19:01:26 +07:00
|
|
|
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
|
|
|
|
IEEE80211_TX_CTL_DONTFRAG;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for every radiotap entry that is present
|
|
|
|
* (ieee80211_radiotap_iterator_next returns -ENOENT when no more
|
|
|
|
* entries present, or -EINVAL on error)
|
|
|
|
*/
|
|
|
|
|
|
|
|
while (!ret) {
|
|
|
|
ret = ieee80211_radiotap_iterator_next(&iterator);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* see if this argument is something we can use */
|
|
|
|
switch (iterator.this_arg_index) {
|
|
|
|
/*
|
|
|
|
* You must take care when dereferencing iterator.this_arg
|
|
|
|
* for multibyte types... the pointer is not aligned. Use
|
|
|
|
* get_unaligned((type *)iterator.this_arg) to dereference
|
|
|
|
* iterator.this_arg for type "type" safely on all arches.
|
|
|
|
*/
|
|
|
|
case IEEE80211_RADIOTAP_FLAGS:
|
|
|
|
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
|
|
|
|
/*
|
|
|
|
* this indicates that the skb we have been
|
|
|
|
* handed has the 32-bit FCS CRC at the end...
|
|
|
|
* we should react to that by snipping it off
|
|
|
|
* because it will be recomputed and added
|
|
|
|
* on transmission
|
|
|
|
*/
|
|
|
|
if (skb->len < (iterator._max_length + FCS_LEN))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
skb_trim(skb, skb->len - FCS_LEN);
|
|
|
|
}
|
|
|
|
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
|
|
|
|
info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
|
|
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
|
|
|
|
info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IEEE80211_RADIOTAP_TX_FLAGS:
|
|
|
|
txflags = get_unaligned_le16(iterator.this_arg);
|
|
|
|
if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_ACK;
|
2020-07-23 17:01:49 +07:00
|
|
|
if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO)
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO;
|
2011-10-07 19:01:26 +07:00
|
|
|
break;
|
|
|
|
|
2016-01-26 23:11:13 +07:00
|
|
|
case IEEE80211_RADIOTAP_RATE:
|
|
|
|
rate = *iterator.this_arg;
|
|
|
|
rate_flags = 0;
|
|
|
|
rate_found = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IEEE80211_RADIOTAP_DATA_RETRIES:
|
|
|
|
rate_retries = *iterator.this_arg;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IEEE80211_RADIOTAP_MCS:
|
|
|
|
mcs_known = iterator.this_arg[0];
|
|
|
|
mcs_flags = iterator.this_arg[1];
|
|
|
|
if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS))
|
|
|
|
break;
|
|
|
|
|
|
|
|
rate_found = true;
|
|
|
|
rate = iterator.this_arg[2];
|
|
|
|
rate_flags = IEEE80211_TX_RC_MCS;
|
|
|
|
|
|
|
|
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI &&
|
|
|
|
mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
|
|
|
|
rate_flags |= IEEE80211_TX_RC_SHORT_GI;
|
|
|
|
|
2016-02-24 22:25:49 +07:00
|
|
|
mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK;
|
2016-01-26 23:11:13 +07:00
|
|
|
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
|
2016-02-24 22:25:49 +07:00
|
|
|
mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40)
|
2016-01-26 23:11:13 +07:00
|
|
|
rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
|
|
|
|
break;
|
|
|
|
|
2016-02-23 21:43:35 +07:00
|
|
|
case IEEE80211_RADIOTAP_VHT:
|
|
|
|
vht_known = get_unaligned_le16(iterator.this_arg);
|
|
|
|
rate_found = true;
|
|
|
|
|
|
|
|
rate_flags = IEEE80211_TX_RC_VHT_MCS;
|
|
|
|
if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) &&
|
|
|
|
(iterator.this_arg[2] &
|
|
|
|
IEEE80211_RADIOTAP_VHT_FLAG_SGI))
|
|
|
|
rate_flags |= IEEE80211_TX_RC_SHORT_GI;
|
|
|
|
if (vht_known &
|
|
|
|
IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
|
|
|
|
if (iterator.this_arg[3] == 1)
|
|
|
|
rate_flags |=
|
|
|
|
IEEE80211_TX_RC_40_MHZ_WIDTH;
|
|
|
|
else if (iterator.this_arg[3] == 4)
|
|
|
|
rate_flags |=
|
|
|
|
IEEE80211_TX_RC_80_MHZ_WIDTH;
|
|
|
|
else if (iterator.this_arg[3] == 11)
|
|
|
|
rate_flags |=
|
|
|
|
IEEE80211_TX_RC_160_MHZ_WIDTH;
|
|
|
|
}
|
|
|
|
|
|
|
|
vht_mcs = iterator.this_arg[4] >> 4;
|
|
|
|
vht_nss = iterator.this_arg[4] & 0xF;
|
|
|
|
break;
|
|
|
|
|
2011-10-07 19:01:26 +07:00
|
|
|
/*
|
|
|
|
* Please update the file
|
2020-04-30 23:03:59 +07:00
|
|
|
* Documentation/networking/mac80211-injection.rst
|
2011-10-07 19:01:26 +07:00
|
|
|
* when parsing new fields here.
|
|
|
|
*/
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
|
|
|
|
return false;
|
|
|
|
|
2016-01-26 23:11:13 +07:00
|
|
|
if (rate_found) {
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
|
|
|
|
|
|
|
|
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
|
|
|
|
info->control.rates[i].idx = -1;
|
|
|
|
info->control.rates[i].flags = 0;
|
|
|
|
info->control.rates[i].count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rate_flags & IEEE80211_TX_RC_MCS) {
|
|
|
|
info->control.rates[0].idx = rate;
|
2016-02-23 21:43:35 +07:00
|
|
|
} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
|
|
|
|
ieee80211_rate_set_vht(info->control.rates, vht_mcs,
|
|
|
|
vht_nss);
|
2016-01-26 23:11:13 +07:00
|
|
|
} else {
|
|
|
|
for (i = 0; i < sband->n_bitrates; i++) {
|
|
|
|
if (rate * 5 != sband->bitrates[i].bitrate)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
info->control.rates[0].idx = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-02 21:54:51 +07:00
|
|
|
if (info->control.rates[0].idx < 0)
|
|
|
|
info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT;
|
|
|
|
|
2016-01-26 23:11:13 +07:00
|
|
|
info->control.rates[0].flags = rate_flags;
|
|
|
|
info->control.rates[0].count = min_t(u8, rate_retries + 1,
|
|
|
|
local->hw.max_rate_tries);
|
|
|
|
}
|
|
|
|
|
2011-10-07 19:01:26 +07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-09-01 02:50:57 +07:00
|
|
|
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
|
2012-07-26 22:24:39 +07:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
2009-06-17 22:43:56 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2011-08-01 16:32:52 +07:00
|
|
|
struct ieee80211_hdr *hdr;
|
2011-10-07 19:01:23 +07:00
|
|
|
struct ieee80211_sub_if_data *tmp_sdata, *sdata;
|
2014-06-05 13:12:57 +07:00
|
|
|
struct cfg80211_chan_def *chandef;
|
2007-07-27 20:43:24 +07:00
|
|
|
u16 len_rthdr;
|
2011-10-07 19:01:23 +07:00
|
|
|
int hdrlen;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2020-07-23 17:01:53 +07:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
|
|
|
|
IEEE80211_TX_CTL_INJECTED;
|
2007-07-27 20:43:24 +07:00
|
|
|
|
2020-07-23 17:01:53 +07:00
|
|
|
/* Sanity-check and process the injection radiotap header */
|
|
|
|
if (!ieee80211_parse_tx_radiotap(skb, dev))
|
|
|
|
goto fail;
|
2007-07-27 20:43:24 +07:00
|
|
|
|
2020-07-23 17:01:53 +07:00
|
|
|
/* we now know there is a radiotap header with a length we can use */
|
2007-07-27 20:43:24 +07:00
|
|
|
len_rthdr = ieee80211_get_radiotap_len(skb->data);
|
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
/*
|
|
|
|
* fix up the pointers accounting for the radiotap
|
|
|
|
* header still being in there. We are being given
|
|
|
|
* a precooked IEEE80211 header so no need for
|
|
|
|
* normal processing
|
|
|
|
*/
|
2007-07-27 20:43:24 +07:00
|
|
|
skb_set_mac_header(skb, len_rthdr);
|
2007-07-27 20:43:22 +07:00
|
|
|
/*
|
2007-07-27 20:43:24 +07:00
|
|
|
* these are just fixed to the end of the rt area since we
|
|
|
|
* don't have any better information and at this point, nobody cares
|
2007-07-27 20:43:22 +07:00
|
|
|
*/
|
2007-07-27 20:43:24 +07:00
|
|
|
skb_set_network_header(skb, len_rthdr);
|
|
|
|
skb_set_transport_header(skb, len_rthdr);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2011-10-07 19:01:23 +07:00
|
|
|
if (skb->len < len_rthdr + 2)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
|
|
|
|
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
|
|
|
|
|
|
|
if (skb->len < len_rthdr + hdrlen)
|
|
|
|
goto fail;
|
|
|
|
|
2011-08-01 16:32:52 +07:00
|
|
|
/*
|
|
|
|
* Initialize skb->protocol if the injected frame is a data frame
|
|
|
|
* carrying a rfc1042 header
|
|
|
|
*/
|
2011-10-07 19:01:23 +07:00
|
|
|
if (ieee80211_is_data(hdr->frame_control) &&
|
|
|
|
skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
|
|
|
|
u8 *payload = (u8 *)hdr + hdrlen;
|
|
|
|
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-09 01:56:52 +07:00
|
|
|
if (ether_addr_equal(payload, rfc1042_header))
|
2011-10-07 19:01:23 +07:00
|
|
|
skb->protocol = cpu_to_be16((payload[6] << 8) |
|
|
|
|
payload[7]);
|
2011-08-01 16:32:52 +07:00
|
|
|
}
|
|
|
|
|
2019-11-19 20:34:51 +07:00
|
|
|
/*
|
|
|
|
* Initialize skb->priority for QoS frames. This is put in the TID field
|
|
|
|
* of the frame before passing it to the driver.
|
|
|
|
*/
|
|
|
|
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
|
|
|
u8 *p = ieee80211_get_qos_ctl(hdr);
|
|
|
|
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
}
|
|
|
|
|
2011-10-07 19:01:23 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We process outgoing injected frames that have a local address
|
|
|
|
* we handle as though they are non-injected frames.
|
|
|
|
* This code here isn't entirely correct, the local MAC address
|
|
|
|
* isn't always enough to find the interface to use; for proper
|
|
|
|
* VLAN/WDS support we will need a different mechanism (which
|
|
|
|
* likely isn't going to be monitor interfaces).
|
2019-11-22 18:42:42 +07:00
|
|
|
*
|
|
|
|
* This is necessary, for example, for old hostapd versions that
|
|
|
|
* don't use nl80211-based management TX/RX.
|
2011-10-07 19:01:23 +07:00
|
|
|
*/
|
|
|
|
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
|
|
|
|
if (!ieee80211_sdata_running(tmp_sdata))
|
|
|
|
continue;
|
|
|
|
if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
|
|
|
|
tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
|
|
|
|
tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
|
|
|
|
continue;
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-09 01:56:52 +07:00
|
|
|
if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
|
2011-10-07 19:01:23 +07:00
|
|
|
sdata = tmp_sdata;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2009-11-19 07:08:30 +07:00
|
|
|
|
2012-07-26 22:24:39 +07:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
|
|
|
if (!chanctx_conf) {
|
|
|
|
tmp_sdata = rcu_dereference(local->monitor_sdata);
|
|
|
|
if (tmp_sdata)
|
|
|
|
chanctx_conf =
|
|
|
|
rcu_dereference(tmp_sdata->vif.chanctx_conf);
|
|
|
|
}
|
|
|
|
|
2013-01-14 05:10:26 +07:00
|
|
|
if (chanctx_conf)
|
2014-06-05 13:12:57 +07:00
|
|
|
chandef = &chanctx_conf->def;
|
2013-01-14 05:10:26 +07:00
|
|
|
else if (!local->use_chanctx)
|
2014-06-05 13:12:57 +07:00
|
|
|
chandef = &local->_oper_chandef;
|
2013-01-14 05:10:26 +07:00
|
|
|
else
|
|
|
|
goto fail_rcu;
|
2012-07-26 22:24:39 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Frame injection is not allowed if beaconing is not allowed
|
|
|
|
* or if we need radar detection. Beaconing is usually not allowed when
|
|
|
|
* the mode or operation (Adhoc, AP, Mesh) does not support DFS.
|
|
|
|
* Passive scan is also used in world regulatory domains where
|
|
|
|
* your country is not known and as such it should be treated as
|
|
|
|
* NO TX unless the channel is explicitly allowed in which case
|
|
|
|
* your current regulatory domain would not have the passive scan
|
|
|
|
* flag.
|
|
|
|
*
|
|
|
|
* Since AP mode uses monitor interfaces to inject/TX management
|
|
|
|
* frames we can make AP mode the exception to this rule once it
|
|
|
|
* supports radar detection as its implementation can deal with
|
|
|
|
* radar detection by itself. We can do that later by adding a
|
|
|
|
* monitor flag interfaces used for AP support.
|
|
|
|
*/
|
2014-06-05 13:12:57 +07:00
|
|
|
if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef,
|
|
|
|
sdata->vif.type))
|
2012-07-26 22:24:39 +07:00
|
|
|
goto fail_rcu;
|
|
|
|
|
2014-11-09 23:50:09 +07:00
|
|
|
info->band = chandef->chan->band;
|
2016-02-19 18:18:01 +07:00
|
|
|
|
2020-07-23 17:01:53 +07:00
|
|
|
/* remove the injection radiotap header */
|
|
|
|
skb_pull(skb, len_rthdr);
|
2016-02-19 18:18:01 +07:00
|
|
|
|
2020-07-23 17:01:52 +07:00
|
|
|
ieee80211_xmit(sdata, NULL, skb);
|
2011-10-07 19:01:23 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
return NETDEV_TX_OK;
|
2007-07-27 20:43:24 +07:00
|
|
|
|
2012-07-26 22:24:39 +07:00
|
|
|
fail_rcu:
|
|
|
|
rcu_read_unlock();
|
2007-07-27 20:43:24 +07:00
|
|
|
fail:
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK; /* meaning, we dealt with the skb */
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2015-03-21 15:13:45 +07:00
|
|
|
static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
|
2013-11-19 00:06:45 +07:00
|
|
|
{
|
2015-03-21 15:13:45 +07:00
|
|
|
u16 ethertype = (skb->data[12] << 8) | skb->data[13];
|
2013-11-19 00:06:45 +07:00
|
|
|
|
2015-03-21 15:13:45 +07:00
|
|
|
return ethertype == ETH_P_TDLS &&
|
|
|
|
skb->len > 14 &&
|
|
|
|
skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
|
|
|
|
}
|
|
|
|
|
2019-11-25 17:04:37 +07:00
|
|
|
int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct sta_info **sta_out)
|
2015-03-21 15:13:45 +07:00
|
|
|
{
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
sta = rcu_dereference(sdata->u.vlan.sta);
|
|
|
|
if (sta) {
|
|
|
|
*sta_out = sta;
|
|
|
|
return 0;
|
|
|
|
} else if (sdata->wdev.use_4addr) {
|
|
|
|
return -ENOLINK;
|
|
|
|
}
|
2020-07-08 03:45:48 +07:00
|
|
|
fallthrough;
|
2015-03-21 15:13:45 +07:00
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
case NL80211_IFTYPE_OCB:
|
|
|
|
case NL80211_IFTYPE_ADHOC:
|
|
|
|
if (is_multicast_ether_addr(skb->data)) {
|
|
|
|
*sta_out = ERR_PTR(-ENOENT);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
sta = sta_info_get_bss(sdata, skb->data);
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_WDS:
|
|
|
|
sta = sta_info_get(sdata, sdata->u.wds.remote_addr);
|
|
|
|
break;
|
|
|
|
#ifdef CONFIG_MAC80211_MESH
|
|
|
|
case NL80211_IFTYPE_MESH_POINT:
|
|
|
|
/* determined much later */
|
|
|
|
*sta_out = NULL;
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
|
|
|
|
sta = sta_info_get(sdata, skb->data);
|
2016-09-13 13:28:22 +07:00
|
|
|
if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
|
|
|
|
if (test_sta_flag(sta,
|
|
|
|
WLAN_STA_TDLS_PEER_AUTH)) {
|
2015-03-21 15:13:45 +07:00
|
|
|
*sta_out = sta;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TDLS link during setup - throw out frames to
|
|
|
|
* peer. Allow TDLS-setup frames to unauthorized
|
|
|
|
* peers for the special case of a link teardown
|
|
|
|
* after a TDLS sta is removed due to being
|
|
|
|
* unreachable.
|
|
|
|
*/
|
2016-09-13 13:28:22 +07:00
|
|
|
if (!ieee80211_is_tdls_setup(skb))
|
2015-03-21 15:13:45 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
sta = sta_info_get(sdata, sdata->u.mgd.bssid);
|
|
|
|
if (!sta)
|
|
|
|
return -ENOLINK;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*sta_out = sta ?: ERR_PTR(-ENOENT);
|
|
|
|
return 0;
|
2013-11-19 00:06:45 +07:00
|
|
|
}
|
|
|
|
|
2020-05-27 23:03:34 +07:00
|
|
|
static u16 ieee80211_store_ack_skb(struct ieee80211_local *local,
|
2019-10-29 16:13:02 +07:00
|
|
|
struct sk_buff *skb,
|
2020-05-27 23:03:34 +07:00
|
|
|
u32 *info_flags,
|
|
|
|
u64 *cookie)
|
2019-10-29 16:13:02 +07:00
|
|
|
{
|
2020-05-27 23:03:34 +07:00
|
|
|
struct sk_buff *ack_skb;
|
2019-10-29 16:13:02 +07:00
|
|
|
u16 info_id = 0;
|
|
|
|
|
2020-05-27 23:03:34 +07:00
|
|
|
if (skb->sk)
|
|
|
|
ack_skb = skb_clone_sk(skb);
|
|
|
|
else
|
|
|
|
ack_skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
|
2019-10-29 16:13:02 +07:00
|
|
|
if (ack_skb) {
|
|
|
|
unsigned long flags;
|
|
|
|
int id;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&local->ack_status_lock, flags);
|
|
|
|
id = idr_alloc(&local->ack_status_frames, ack_skb,
|
2020-01-15 18:25:50 +07:00
|
|
|
1, 0x2000, GFP_ATOMIC);
|
2019-10-29 16:13:02 +07:00
|
|
|
spin_unlock_irqrestore(&local->ack_status_lock, flags);
|
|
|
|
|
|
|
|
if (id >= 0) {
|
|
|
|
info_id = id;
|
|
|
|
*info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
|
2020-05-27 23:03:34 +07:00
|
|
|
if (cookie) {
|
|
|
|
*cookie = ieee80211_mgmt_tx_cookie(local);
|
|
|
|
IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
|
|
|
|
}
|
2019-10-29 16:13:02 +07:00
|
|
|
} else {
|
|
|
|
kfree_skb(ack_skb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return info_id;
|
|
|
|
}
|
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
/**
|
2014-11-09 23:50:10 +07:00
|
|
|
* ieee80211_build_hdr - build 802.11 header in the given frame
|
|
|
|
* @sdata: virtual interface to build the header for
|
|
|
|
* @skb: the skb to build the header in
|
2014-11-09 23:50:07 +07:00
|
|
|
* @info_flags: skb flags to set
|
2019-04-12 03:47:25 +07:00
|
|
|
* @ctrl_flags: info control flags to set
|
2007-07-27 20:43:22 +07:00
|
|
|
*
|
2014-11-09 23:50:10 +07:00
|
|
|
* This function takes the skb with 802.3 header and reformats the header to
|
|
|
|
* the appropriate IEEE 802.11 header based on which interface the packet is
|
|
|
|
* being transmitted on.
|
|
|
|
*
|
|
|
|
* Note that this function also takes care of the TX status request and
|
|
|
|
* potential unsharing of the SKB - this needs to be interleaved with the
|
|
|
|
* header building.
|
2007-07-27 20:43:22 +07:00
|
|
|
*
|
2014-11-09 23:50:10 +07:00
|
|
|
* The function requires the read-side RCU lock held
|
|
|
|
*
|
|
|
|
* Returns: the (possibly reallocated) skb or an ERR_PTR() code
|
2007-07-27 20:43:22 +07:00
|
|
|
*/
|
2014-11-09 23:50:10 +07:00
|
|
|
static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
|
2015-03-20 20:18:27 +07:00
|
|
|
struct sk_buff *skb, u32 info_flags,
|
2020-05-27 23:03:34 +07:00
|
|
|
struct sta_info *sta, u32 ctrl_flags,
|
|
|
|
u64 *cookie)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
2008-09-16 19:18:59 +07:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2010-12-19 01:30:48 +07:00
|
|
|
struct ieee80211_tx_info *info;
|
2012-07-30 20:11:56 +07:00
|
|
|
int head_need;
|
2008-06-23 06:45:27 +07:00
|
|
|
u16 ethertype, hdrlen, meshhdrlen = 0;
|
|
|
|
__le16 fc;
|
2007-07-27 20:43:22 +07:00
|
|
|
struct ieee80211_hdr hdr;
|
2010-08-21 21:23:29 +07:00
|
|
|
struct ieee80211s_hdr mesh_hdr __maybe_unused;
|
2012-05-30 08:30:41 +07:00
|
|
|
struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
|
2007-07-27 20:43:22 +07:00
|
|
|
const u8 *encaps_data;
|
|
|
|
int encaps_len, skip_header_bytes;
|
2015-03-21 15:13:45 +07:00
|
|
|
bool wme_sta = false, authorized = false;
|
|
|
|
bool tdls_peer;
|
2011-11-06 20:13:34 +07:00
|
|
|
bool multicast;
|
|
|
|
u16 info_id = 0;
|
2012-07-26 22:24:39 +07:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
struct ieee80211_sub_if_data *ap_sdata;
|
2016-04-12 20:56:15 +07:00
|
|
|
enum nl80211_band band;
|
2014-11-09 23:50:10 +07:00
|
|
|
int ret;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2015-03-21 15:13:45 +07:00
|
|
|
if (IS_ERR(sta))
|
|
|
|
sta = NULL;
|
|
|
|
|
2019-03-29 03:01:06 +07:00
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
if (local->force_tx_status)
|
|
|
|
info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
|
|
|
|
#endif
|
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
/* convert Ethernet header to proper 802.11 header (based on
|
|
|
|
* operation mode) */
|
|
|
|
ethertype = (skb->data[12] << 8) | skb->data[13];
|
2008-06-23 06:45:27 +07:00
|
|
|
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2007-12-19 07:31:27 +07:00
|
|
|
switch (sdata->vif.type) {
|
2008-09-11 05:01:58 +07:00
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
2015-03-21 15:13:45 +07:00
|
|
|
if (sdata->wdev.use_4addr) {
|
2009-11-11 02:10:05 +07:00
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
|
|
|
|
/* RA TA DA SA */
|
|
|
|
memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
|
2009-11-25 23:46:19 +07:00
|
|
|
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
|
2009-11-11 02:10:05 +07:00
|
|
|
memcpy(hdr.addr3, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
hdrlen = 30;
|
2011-09-29 21:04:36 +07:00
|
|
|
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
|
2014-07-22 19:50:47 +07:00
|
|
|
wme_sta = sta->sta.wme;
|
2009-11-11 02:10:05 +07:00
|
|
|
}
|
2012-07-26 22:24:39 +07:00
|
|
|
ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
|
|
|
|
u.ap);
|
|
|
|
chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
|
2014-11-09 23:50:10 +07:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2012-11-09 17:39:59 +07:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2015-03-21 15:13:45 +07:00
|
|
|
if (sdata->wdev.use_4addr)
|
2009-11-11 02:10:05 +07:00
|
|
|
break;
|
2020-07-08 03:45:48 +07:00
|
|
|
fallthrough;
|
2009-11-11 02:10:05 +07:00
|
|
|
case NL80211_IFTYPE_AP:
|
2013-01-25 21:14:33 +07:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-09 23:50:10 +07:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2008-06-23 06:45:27 +07:00
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
|
2007-07-27 20:43:22 +07:00
|
|
|
/* DA BSSID SA */
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
2009-11-25 23:46:19 +07:00
|
|
|
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
|
2007-07-27 20:43:22 +07:00
|
|
|
memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
hdrlen = 24;
|
2012-11-09 17:39:59 +07:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2007-08-29 04:01:54 +07:00
|
|
|
break;
|
2008-09-11 05:01:58 +07:00
|
|
|
case NL80211_IFTYPE_WDS:
|
2008-06-23 06:45:27 +07:00
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
|
2007-07-27 20:43:22 +07:00
|
|
|
/* RA TA DA SA */
|
|
|
|
memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
|
2009-11-25 23:46:19 +07:00
|
|
|
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
|
2007-07-27 20:43:22 +07:00
|
|
|
memcpy(hdr.addr3, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
hdrlen = 30;
|
2012-07-26 22:24:39 +07:00
|
|
|
/*
|
|
|
|
* This is the exception! WDS style interfaces are prohibited
|
|
|
|
* when channel contexts are in used so this must be valid
|
|
|
|
*/
|
2013-03-25 22:26:57 +07:00
|
|
|
band = local->hw.conf.chandef.chan->band;
|
2007-08-29 04:01:54 +07:00
|
|
|
break;
|
2008-02-23 21:17:10 +07:00
|
|
|
#ifdef CONFIG_MAC80211_MESH
|
2008-09-11 05:01:58 +07:00
|
|
|
case NL80211_IFTYPE_MESH_POINT:
|
2012-05-30 08:30:41 +07:00
|
|
|
if (!is_multicast_ether_addr(skb->data)) {
|
2013-02-19 09:04:50 +07:00
|
|
|
struct sta_info *next_hop;
|
|
|
|
bool mpp_lookup = true;
|
|
|
|
|
2013-02-15 20:40:31 +07:00
|
|
|
mpath = mesh_path_lookup(sdata, skb->data);
|
2013-02-19 09:04:50 +07:00
|
|
|
if (mpath) {
|
|
|
|
mpp_lookup = false;
|
|
|
|
next_hop = rcu_dereference(mpath->next_hop);
|
|
|
|
if (!next_hop ||
|
|
|
|
!(mpath->flags & (MESH_PATH_ACTIVE |
|
|
|
|
MESH_PATH_RESOLVING)))
|
|
|
|
mpp_lookup = true;
|
|
|
|
}
|
|
|
|
|
2016-02-03 19:58:37 +07:00
|
|
|
if (mpp_lookup) {
|
2013-02-15 20:40:31 +07:00
|
|
|
mppath = mpp_path_lookup(sdata, skb->data);
|
2016-02-03 19:58:37 +07:00
|
|
|
if (mppath)
|
|
|
|
mppath->exp_time = jiffies;
|
|
|
|
}
|
2013-02-19 09:04:50 +07:00
|
|
|
|
|
|
|
if (mppath && mpath)
|
2016-02-29 08:03:56 +07:00
|
|
|
mesh_path_del(sdata, mpath->dst);
|
2012-05-30 08:30:41 +07:00
|
|
|
}
|
2008-09-22 12:30:32 +07:00
|
|
|
|
2010-12-29 08:28:11 +07:00
|
|
|
/*
|
2011-01-10 13:44:23 +07:00
|
|
|
* Use address extension if it is a packet from
|
|
|
|
* another interface or if we know the destination
|
|
|
|
* is being proxied by a portal (i.e. portal address
|
|
|
|
* differs from proxied address)
|
2010-12-29 08:28:11 +07:00
|
|
|
*/
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-09 01:56:52 +07:00
|
|
|
if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
|
|
|
|
!(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
|
2009-08-11 02:15:48 +07:00
|
|
|
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
|
|
|
|
skb->data, skb->data + ETH_ALEN);
|
2013-02-15 20:40:31 +07:00
|
|
|
meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr,
|
|
|
|
NULL, NULL);
|
2008-09-22 12:30:32 +07:00
|
|
|
} else {
|
2012-08-21 01:28:25 +07:00
|
|
|
/* DS -> MBSS (802.11-2012 13.11.3.3).
|
|
|
|
* For unicast with unknown forwarding information,
|
|
|
|
* destination might be in the MBSS or if that fails
|
|
|
|
* forwarded to another mesh gate. In either case
|
|
|
|
* resolution will be handled in ieee80211_xmit(), so
|
|
|
|
* leave the original DA. This also works for mcast */
|
|
|
|
const u8 *mesh_da = skb->data;
|
|
|
|
|
|
|
|
if (mppath)
|
|
|
|
mesh_da = mppath->mpp;
|
|
|
|
else if (mpath)
|
|
|
|
mesh_da = mpath->dst;
|
2008-09-22 12:30:32 +07:00
|
|
|
|
2009-08-11 02:15:48 +07:00
|
|
|
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
|
2009-11-25 23:46:19 +07:00
|
|
|
mesh_da, sdata->vif.addr);
|
2012-08-21 01:28:25 +07:00
|
|
|
if (is_multicast_ether_addr(mesh_da))
|
|
|
|
/* DA TA mSA AE:SA */
|
2013-02-15 20:40:31 +07:00
|
|
|
meshhdrlen = ieee80211_new_mesh_header(
|
|
|
|
sdata, &mesh_hdr,
|
|
|
|
skb->data + ETH_ALEN, NULL);
|
2009-08-11 02:15:48 +07:00
|
|
|
else
|
2012-08-21 01:28:25 +07:00
|
|
|
/* RA TA mDA mSA AE:DA SA */
|
2013-02-15 20:40:31 +07:00
|
|
|
meshhdrlen = ieee80211_new_mesh_header(
|
|
|
|
sdata, &mesh_hdr, skb->data,
|
|
|
|
skb->data + ETH_ALEN);
|
2008-09-22 12:30:32 +07:00
|
|
|
|
|
|
|
}
|
2012-07-26 22:24:39 +07:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-09 23:50:10 +07:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2012-11-09 17:39:59 +07:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2019-04-12 03:47:26 +07:00
|
|
|
|
|
|
|
/* For injected frames, fill RA right away as nexthop lookup
|
|
|
|
* will be skipped.
|
|
|
|
*/
|
|
|
|
if ((ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) &&
|
|
|
|
is_zero_ether_addr(hdr.addr1))
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
2008-02-23 21:17:10 +07:00
|
|
|
break;
|
|
|
|
#endif
|
2008-09-11 05:01:58 +07:00
|
|
|
case NL80211_IFTYPE_STATION:
|
2015-03-21 15:13:45 +07:00
|
|
|
/* we already did checks when looking up the RA STA */
|
|
|
|
tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER);
|
2011-09-28 18:12:54 +07:00
|
|
|
|
2015-03-21 15:13:45 +07:00
|
|
|
if (tdls_peer) {
|
2011-09-28 18:12:54 +07:00
|
|
|
/* DA SA BSSID */
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
|
|
|
|
hdrlen = 24;
|
|
|
|
} else if (sdata->u.mgd.use_4addr &&
|
|
|
|
cpu_to_be16(ethertype) != sdata->control_port_protocol) {
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
|
|
|
|
IEEE80211_FCTL_TODS);
|
2009-11-11 02:10:05 +07:00
|
|
|
/* RA TA DA SA */
|
2011-09-28 18:12:54 +07:00
|
|
|
memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
|
2009-11-25 23:46:19 +07:00
|
|
|
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
|
2009-11-11 02:10:05 +07:00
|
|
|
memcpy(hdr.addr3, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
hdrlen = 30;
|
|
|
|
} else {
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
|
|
|
|
/* BSSID SA DA */
|
2011-09-28 18:12:54 +07:00
|
|
|
memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
|
2009-11-11 02:10:05 +07:00
|
|
|
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr3, skb->data, ETH_ALEN);
|
|
|
|
hdrlen = 24;
|
|
|
|
}
|
2012-07-26 22:24:39 +07:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-09 23:50:10 +07:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2012-11-09 17:39:59 +07:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2007-08-29 04:01:54 +07:00
|
|
|
break;
|
2014-11-03 16:33:19 +07:00
|
|
|
case NL80211_IFTYPE_OCB:
|
|
|
|
/* DA SA BSSID */
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
eth_broadcast_addr(hdr.addr3);
|
|
|
|
hdrlen = 24;
|
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-09 23:50:10 +07:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2014-11-03 16:33:19 +07:00
|
|
|
band = chanctx_conf->def.chan->band;
|
|
|
|
break;
|
2008-09-11 05:01:58 +07:00
|
|
|
case NL80211_IFTYPE_ADHOC:
|
2007-07-27 20:43:22 +07:00
|
|
|
/* DA SA BSSID */
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
2009-02-15 18:44:28 +07:00
|
|
|
memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
|
2007-07-27 20:43:22 +07:00
|
|
|
hdrlen = 24;
|
2012-07-26 22:24:39 +07:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-09 23:50:10 +07:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2012-11-09 17:39:59 +07:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2007-08-29 04:01:54 +07:00
|
|
|
break;
|
|
|
|
default:
|
2014-11-09 23:50:10 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2011-11-06 20:13:34 +07:00
|
|
|
multicast = is_multicast_ether_addr(hdr.addr1);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2015-03-21 15:13:45 +07:00
|
|
|
/* sta is always NULL for mesh */
|
|
|
|
if (sta) {
|
|
|
|
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
|
|
|
|
wme_sta = sta->sta.wme;
|
|
|
|
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
|
|
|
|
/* For mesh, the use of the QoS header is mandatory */
|
2011-09-29 21:04:36 +07:00
|
|
|
wme_sta = true;
|
2015-03-21 15:13:45 +07:00
|
|
|
}
|
2011-09-08 07:49:52 +07:00
|
|
|
|
2015-03-21 14:09:55 +07:00
|
|
|
/* receiver does QoS (which also means we do) use it */
|
|
|
|
if (wme_sta) {
|
2008-06-23 06:45:27 +07:00
|
|
|
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
|
2007-12-19 07:31:22 +07:00
|
|
|
hdrlen += 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-01-28 23:19:37 +07:00
|
|
|
* Drop unicast frames to unauthorised stations unless they are
|
|
|
|
* EAPOL frames from the local station.
|
2007-12-19 07:31:22 +07:00
|
|
|
*/
|
2011-10-12 22:28:21 +07:00
|
|
|
if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
|
2014-11-03 16:33:19 +07:00
|
|
|
(sdata->vif.type != NL80211_IFTYPE_OCB) &&
|
2013-08-30 04:35:09 +07:00
|
|
|
!multicast && !authorized &&
|
2011-10-12 22:28:21 +07:00
|
|
|
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-09 01:56:52 +07:00
|
|
|
!ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
|
2007-12-19 07:31:22 +07:00
|
|
|
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
|
2012-06-22 16:29:50 +07:00
|
|
|
net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
|
2014-11-09 23:50:10 +07:00
|
|
|
sdata->name, hdr.addr1);
|
2007-12-19 07:31:22 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
|
|
|
|
|
2014-11-09 23:50:10 +07:00
|
|
|
ret = -EPERM;
|
|
|
|
goto free;
|
2007-12-19 07:31:22 +07:00
|
|
|
}
|
|
|
|
|
2020-05-27 23:03:34 +07:00
|
|
|
if (unlikely(!multicast && ((skb->sk &&
|
|
|
|
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
|
|
|
|
ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS)))
|
|
|
|
info_id = ieee80211_store_ack_skb(local, skb, &info_flags,
|
|
|
|
cookie);
|
2011-11-06 20:13:34 +07:00
|
|
|
|
2010-12-03 00:44:09 +07:00
|
|
|
/*
|
|
|
|
* If the skb is shared we need to obtain our own copy.
|
|
|
|
*/
|
|
|
|
if (skb_shared(skb)) {
|
2011-11-06 20:13:34 +07:00
|
|
|
struct sk_buff *tmp_skb = skb;
|
|
|
|
|
|
|
|
/* can't happen -- skb is a clone if info_id != 0 */
|
|
|
|
WARN_ON(info_id);
|
|
|
|
|
2010-12-19 01:30:50 +07:00
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
2010-12-03 00:44:09 +07:00
|
|
|
kfree_skb(tmp_skb);
|
|
|
|
|
2014-11-09 23:50:10 +07:00
|
|
|
if (!skb) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free;
|
|
|
|
}
|
2010-12-03 00:44:09 +07:00
|
|
|
}
|
|
|
|
|
2008-06-23 06:45:27 +07:00
|
|
|
hdr.frame_control = fc;
|
2007-07-27 20:43:22 +07:00
|
|
|
hdr.duration_id = 0;
|
|
|
|
hdr.seq_ctrl = 0;
|
|
|
|
|
|
|
|
skip_header_bytes = ETH_HLEN;
|
|
|
|
if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
|
|
|
|
encaps_data = bridge_tunnel_header;
|
|
|
|
encaps_len = sizeof(bridge_tunnel_header);
|
|
|
|
skip_header_bytes -= 2;
|
2013-03-28 11:38:25 +07:00
|
|
|
} else if (ethertype >= ETH_P_802_3_MIN) {
|
2007-07-27 20:43:22 +07:00
|
|
|
encaps_data = rfc1042_header;
|
|
|
|
encaps_len = sizeof(rfc1042_header);
|
|
|
|
skip_header_bytes -= 2;
|
|
|
|
} else {
|
|
|
|
encaps_data = NULL;
|
|
|
|
encaps_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb_pull(skb, skip_header_bytes);
|
2008-05-29 15:38:53 +07:00
|
|
|
head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-05-29 15:38:53 +07:00
|
|
|
/*
|
|
|
|
* So we need to modify the skb header and hence need a copy of
|
|
|
|
* that. The head_need variable above doesn't, so far, include
|
|
|
|
* the needed header space that we don't need right away. If we
|
|
|
|
* can, then we don't reallocate right now but only after the
|
|
|
|
* frame arrives at the master device (if it does...)
|
|
|
|
*
|
|
|
|
* If we cannot, however, then we will reallocate to include all
|
|
|
|
* the ever needed space. Also, if we need to reallocate it anyway,
|
|
|
|
* make it big enough for everything we may ever need.
|
|
|
|
*/
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-06-18 15:19:51 +07:00
|
|
|
if (head_need > 0 || skb_cloned(skb)) {
|
2013-03-24 19:23:27 +07:00
|
|
|
head_need += sdata->encrypt_headroom;
|
2008-05-29 15:38:53 +07:00
|
|
|
head_need += local->tx_headroom;
|
|
|
|
head_need = max_t(int, 0, head_need);
|
2012-10-08 19:39:33 +07:00
|
|
|
if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2012-11-07 20:02:30 +07:00
|
|
|
skb = NULL;
|
2014-11-09 23:50:10 +07:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2012-10-08 19:39:33 +07:00
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2016-07-13 16:00:02 +07:00
|
|
|
if (encaps_data)
|
2007-07-27 20:43:22 +07:00
|
|
|
memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
|
2007-09-14 22:10:24 +07:00
|
|
|
|
2010-06-29 18:08:06 +07:00
|
|
|
#ifdef CONFIG_MAC80211_MESH
|
2016-07-13 16:00:02 +07:00
|
|
|
if (meshhdrlen > 0)
|
2008-02-23 21:17:10 +07:00
|
|
|
memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
|
2010-06-29 18:08:06 +07:00
|
|
|
#endif
|
2008-02-23 21:17:10 +07:00
|
|
|
|
2008-06-23 06:45:27 +07:00
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
2007-09-14 22:10:24 +07:00
|
|
|
__le16 *qos_control;
|
|
|
|
|
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:23 +07:00
|
|
|
qos_control = skb_push(skb, 2);
|
2007-09-14 22:10:24 +07:00
|
|
|
memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
|
|
|
|
/*
|
|
|
|
* Maybe we could actually set some fields here, for now just
|
|
|
|
* initialise to zero to indicate no special operation.
|
|
|
|
*/
|
|
|
|
*qos_control = 0;
|
|
|
|
} else
|
|
|
|
memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
|
|
|
|
|
2016-03-03 08:16:56 +07:00
|
|
|
skb_reset_mac_header(skb);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2010-12-19 01:30:48 +07:00
|
|
|
info = IEEE80211_SKB_CB(skb);
|
2009-06-17 22:43:56 +07:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
2011-11-06 20:13:34 +07:00
|
|
|
info->flags = info_flags;
|
|
|
|
info->ack_frame_id = info_id;
|
2014-11-09 23:50:09 +07:00
|
|
|
info->band = band;
|
2019-04-12 03:47:25 +07:00
|
|
|
info->control.flags = ctrl_flags;
|
2011-11-06 20:13:34 +07:00
|
|
|
|
2014-11-09 23:50:10 +07:00
|
|
|
return skb;
|
|
|
|
free:
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2015-03-21 21:25:43 +07:00
|
|
|
/*
|
|
|
|
* fast-xmit overview
|
|
|
|
*
|
|
|
|
* The core idea of this fast-xmit is to remove per-packet checks by checking
|
|
|
|
* them out of band. ieee80211_check_fast_xmit() implements the out-of-band
|
|
|
|
* checks that are needed to get the sta->fast_tx pointer assigned, after which
|
|
|
|
* much less work can be done per packet. For example, fragmentation must be
|
|
|
|
* disabled or the fast_tx pointer will not be set. All the conditions are seen
|
|
|
|
* in the code here.
|
|
|
|
*
|
|
|
|
* Once assigned, the fast_tx data structure also caches the per-packet 802.11
|
|
|
|
* header and other data to aid packet processing in ieee80211_xmit_fast().
|
|
|
|
*
|
|
|
|
* The most difficult part of this is that when any of these assumptions
|
|
|
|
* change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
|
|
|
|
* ieee80211_check_fast_xmit() or friends) is required to reset the data,
|
|
|
|
* since the per-packet code no longer checks the conditions. This is reflected
|
|
|
|
* by the calls to these functions throughout the rest of the code, and must be
|
|
|
|
* maintained if any of the TX path checks change.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void ieee80211_check_fast_xmit(struct sta_info *sta)
|
|
|
|
{
|
|
|
|
struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old;
|
|
|
|
struct ieee80211_local *local = sta->local;
|
|
|
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
|
|
|
struct ieee80211_hdr *hdr = (void *)build.hdr;
|
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
__le16 fc;
|
|
|
|
|
2015-06-03 02:39:54 +07:00
|
|
|
if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT))
|
2015-03-21 21:25:43 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Locking here protects both the pointer itself, and against concurrent
|
|
|
|
* invocations winning data access races to, e.g., the key pointer that
|
|
|
|
* is used.
|
|
|
|
* Without it, the invocation of this function right after the key
|
|
|
|
* pointer changes wouldn't be sufficient, as another CPU could access
|
|
|
|
* the pointer, then stall, and then do the cache update after the CPU
|
|
|
|
* that invalidated the key.
|
|
|
|
* With the locking, such scenarios cannot happen as the check for the
|
|
|
|
* key and the fast-tx assignment are done atomically, so the CPU that
|
|
|
|
* modifies the key will either wait or other one will see the key
|
|
|
|
* cleared/changed already.
|
|
|
|
*/
|
|
|
|
spin_lock_bh(&sta->lock);
|
2015-06-03 02:39:54 +07:00
|
|
|
if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
|
|
|
|
!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
|
2015-03-21 21:25:43 +07:00
|
|
|
sdata->vif.type == NL80211_IFTYPE_STATION)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
|
|
|
|
test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
|
2015-09-24 19:59:49 +07:00
|
|
|
test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
|
|
|
|
test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
|
2015-03-21 21:25:43 +07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (sdata->noack_map)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* fast-xmit doesn't handle fragmentation at all */
|
2015-04-10 19:02:08 +07:00
|
|
|
if (local->hw.wiphy->frag_threshold != (u32)-1 &&
|
2016-10-19 03:12:11 +07:00
|
|
|
!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG))
|
2015-03-21 21:25:43 +07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
|
|
|
if (!chanctx_conf) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
build.band = chanctx_conf->def.chan->band;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
|
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
2015-04-14 15:28:37 +07:00
|
|
|
case NL80211_IFTYPE_ADHOC:
|
|
|
|
/* DA SA BSSID */
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
|
|
|
|
memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN);
|
|
|
|
build.hdr_len = 24;
|
|
|
|
break;
|
2015-03-21 21:25:43 +07:00
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
|
|
|
|
/* DA SA BSSID */
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
|
|
|
|
memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN);
|
|
|
|
build.hdr_len = 24;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sdata->u.mgd.use_4addr) {
|
|
|
|
/* non-regular ethertype cannot use the fastpath */
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
|
|
|
|
IEEE80211_FCTL_TODS);
|
|
|
|
/* RA TA DA SA */
|
|
|
|
memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
|
|
|
|
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
|
|
|
|
build.hdr_len = 30;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
|
|
|
|
/* BSSID SA DA */
|
|
|
|
memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
|
|
|
|
build.hdr_len = 24;
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
if (sdata->wdev.use_4addr) {
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
|
|
|
|
IEEE80211_FCTL_TODS);
|
|
|
|
/* RA TA DA SA */
|
|
|
|
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
|
|
|
|
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
|
|
|
|
build.hdr_len = 30;
|
|
|
|
break;
|
|
|
|
}
|
2020-07-08 03:45:48 +07:00
|
|
|
fallthrough;
|
2015-03-21 21:25:43 +07:00
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
|
|
|
|
/* DA BSSID SA */
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
|
|
|
|
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr3);
|
|
|
|
build.hdr_len = 24;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* not handled on fast-xmit */
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sta->sta.wme) {
|
|
|
|
build.hdr_len += 2;
|
|
|
|
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We store the key here so there's no point in using rcu_dereference()
|
|
|
|
* but that's fine because the code that changes the pointers will call
|
|
|
|
* this function after doing so. For a single CPU that would be enough,
|
|
|
|
* for multiple see the comment above.
|
|
|
|
*/
|
|
|
|
build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
|
|
|
|
if (!build.key)
|
|
|
|
build.key = rcu_access_pointer(sdata->default_unicast_key);
|
|
|
|
if (build.key) {
|
2015-04-10 19:03:17 +07:00
|
|
|
bool gen_iv, iv_spc, mmic;
|
2015-03-21 21:25:43 +07:00
|
|
|
|
|
|
|
gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
|
|
|
|
iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
|
2017-12-01 18:50:52 +07:00
|
|
|
mmic = build.key->conf.flags &
|
|
|
|
(IEEE80211_KEY_FLAG_GENERATE_MMIC |
|
|
|
|
IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
|
2015-03-21 21:25:43 +07:00
|
|
|
|
|
|
|
/* don't handle software crypto */
|
|
|
|
if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
|
|
|
|
goto out;
|
|
|
|
|
2018-08-31 20:00:38 +07:00
|
|
|
/* Key is being removed */
|
|
|
|
if (build.key->flags & KEY_FLAG_TAINTED)
|
|
|
|
goto out;
|
|
|
|
|
2015-03-21 21:25:43 +07:00
|
|
|
switch (build.key->conf.cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
2019-03-20 03:34:08 +07:00
|
|
|
if (gen_iv)
|
2015-03-21 21:25:43 +07:00
|
|
|
build.pn_offs = build.hdr_len;
|
|
|
|
if (gen_iv || iv_spc)
|
|
|
|
build.hdr_len += IEEE80211_CCMP_HDR_LEN;
|
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
2019-03-20 03:34:08 +07:00
|
|
|
if (gen_iv)
|
2015-03-21 21:25:43 +07:00
|
|
|
build.pn_offs = build.hdr_len;
|
|
|
|
if (gen_iv || iv_spc)
|
|
|
|
build.hdr_len += IEEE80211_GCMP_HDR_LEN;
|
|
|
|
break;
|
2015-04-10 19:03:17 +07:00
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
|
|
|
/* cannot handle MMIC or IV generation in xmit-fast */
|
|
|
|
if (mmic || gen_iv)
|
|
|
|
goto out;
|
|
|
|
if (iv_spc)
|
|
|
|
build.hdr_len += IEEE80211_TKIP_IV_LEN;
|
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
|
|
|
/* cannot handle IV generation in fast-xmit */
|
|
|
|
if (gen_iv)
|
|
|
|
goto out;
|
|
|
|
if (iv_spc)
|
|
|
|
build.hdr_len += IEEE80211_WEP_IV_LEN;
|
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_AES_CMAC:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
|
|
|
WARN(1,
|
|
|
|
"management cipher suite 0x%x enabled for data\n",
|
|
|
|
build.key->conf.cipher);
|
2015-03-21 21:25:43 +07:00
|
|
|
goto out;
|
2015-04-10 19:03:17 +07:00
|
|
|
default:
|
|
|
|
/* we don't know how to generate IVs for this at all */
|
|
|
|
if (WARN_ON(gen_iv))
|
|
|
|
goto out;
|
|
|
|
/* pure hardware keys are OK, of course */
|
|
|
|
if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME))
|
|
|
|
break;
|
|
|
|
/* cipher scheme might require space allocation */
|
|
|
|
if (iv_spc &&
|
|
|
|
build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV)
|
|
|
|
goto out;
|
|
|
|
if (iv_spc)
|
|
|
|
build.hdr_len += build.key->conf.iv_len;
|
2015-03-21 21:25:43 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
|
|
|
|
}
|
|
|
|
|
|
|
|
hdr->frame_control = fc;
|
|
|
|
|
|
|
|
memcpy(build.hdr + build.hdr_len,
|
|
|
|
rfc1042_header, sizeof(rfc1042_header));
|
|
|
|
build.hdr_len += sizeof(rfc1042_header);
|
|
|
|
|
|
|
|
fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
|
|
|
|
/* if the kmemdup fails, continue w/o fast_tx */
|
|
|
|
if (!fast_tx)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* we might have raced against another call to this function */
|
|
|
|
old = rcu_dereference_protected(sta->fast_tx,
|
|
|
|
lockdep_is_held(&sta->lock));
|
|
|
|
rcu_assign_pointer(sta->fast_tx, fast_tx);
|
|
|
|
if (old)
|
|
|
|
kfree_rcu(old, rcu_head);
|
|
|
|
spin_unlock_bh(&sta->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_check_fast_xmit_all(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(sta, &local->sta_list, list)
|
|
|
|
ieee80211_check_fast_xmit(sta);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(sta, &local->sta_list, list) {
|
|
|
|
if (sdata != sta->sdata &&
|
|
|
|
(!sta->sdata->bss || sta->sdata->bss != sdata->bss))
|
|
|
|
continue;
|
|
|
|
ieee80211_check_fast_xmit(sta);
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_clear_fast_xmit(struct sta_info *sta)
|
|
|
|
{
|
|
|
|
struct ieee80211_fast_tx *fast_tx;
|
|
|
|
|
|
|
|
spin_lock_bh(&sta->lock);
|
|
|
|
fast_tx = rcu_dereference_protected(sta->fast_tx,
|
|
|
|
lockdep_is_held(&sta->lock));
|
|
|
|
RCU_INIT_POINTER(sta->fast_tx, NULL);
|
|
|
|
spin_unlock_bh(&sta->lock);
|
|
|
|
|
|
|
|
if (fast_tx)
|
|
|
|
kfree_rcu(fast_tx, rcu_head);
|
|
|
|
}
|
|
|
|
|
2016-03-04 04:59:00 +07:00
|
|
|
static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
|
2018-08-29 13:57:02 +07:00
|
|
|
struct sk_buff *skb, int headroom)
|
2016-03-04 04:59:00 +07:00
|
|
|
{
|
2018-08-29 13:57:02 +07:00
|
|
|
if (skb_headroom(skb) < headroom) {
|
2016-03-04 04:59:00 +07:00
|
|
|
I802_DEBUG_INC(local->tx_expand_skb_head);
|
|
|
|
|
2018-08-29 13:57:02 +07:00
|
|
|
if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
|
2016-03-04 04:59:00 +07:00
|
|
|
wiphy_debug(local->hw.wiphy,
|
|
|
|
"failed to reallocate TX buffer\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_fast_tx *fast_tx,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_hdr *hdr;
|
2016-10-15 18:28:18 +07:00
|
|
|
struct ethhdr *amsdu_hdr;
|
2016-03-04 04:59:00 +07:00
|
|
|
int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header);
|
|
|
|
int subframe_len = skb->len - hdr_len;
|
|
|
|
void *data;
|
2016-10-15 18:28:18 +07:00
|
|
|
u8 *qc, *h_80211_src, *h_80211_dst;
|
2016-10-15 18:28:19 +07:00
|
|
|
const u8 *bssid;
|
2016-03-04 04:59:00 +07:00
|
|
|
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
|
|
|
|
return true;
|
|
|
|
|
2018-08-29 13:57:02 +07:00
|
|
|
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
|
2016-03-04 04:59:00 +07:00
|
|
|
return false;
|
|
|
|
|
2016-10-15 18:28:18 +07:00
|
|
|
data = skb_push(skb, sizeof(*amsdu_hdr));
|
|
|
|
memmove(data, data + sizeof(*amsdu_hdr), hdr_len);
|
|
|
|
hdr = data;
|
|
|
|
amsdu_hdr = data + hdr_len;
|
|
|
|
/* h_80211_src/dst is addr* field within hdr */
|
|
|
|
h_80211_src = data + fast_tx->sa_offs;
|
|
|
|
h_80211_dst = data + fast_tx->da_offs;
|
2016-03-04 04:59:00 +07:00
|
|
|
|
2016-10-15 18:28:18 +07:00
|
|
|
amsdu_hdr->h_proto = cpu_to_be16(subframe_len);
|
|
|
|
ether_addr_copy(amsdu_hdr->h_source, h_80211_src);
|
|
|
|
ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst);
|
2016-03-04 04:59:00 +07:00
|
|
|
|
2016-10-15 18:28:19 +07:00
|
|
|
/* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA
|
|
|
|
* fields needs to be changed to BSSID for A-MSDU frames depending
|
|
|
|
* on FromDS/ToDS values.
|
|
|
|
*/
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
bssid = sdata->u.mgd.bssid;
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
bssid = sdata->vif.addr;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bssid = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bssid && ieee80211_has_fromds(hdr->frame_control))
|
|
|
|
ether_addr_copy(h_80211_src, bssid);
|
|
|
|
|
|
|
|
if (bssid && ieee80211_has_tods(hdr->frame_control))
|
|
|
|
ether_addr_copy(h_80211_dst, bssid);
|
|
|
|
|
2016-03-04 04:59:00 +07:00
|
|
|
qc = ieee80211_get_qos_ctl(hdr);
|
|
|
|
*qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
|
|
|
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_AMSDU;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct ieee80211_fast_tx *fast_tx,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
2016-05-19 15:37:49 +07:00
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct fq_tin *tin;
|
|
|
|
struct fq_flow *flow;
|
2016-03-04 04:59:00 +07:00
|
|
|
u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
struct ieee80211_txq *txq = sta->sta.txq[tid];
|
|
|
|
struct txq_info *txqi;
|
|
|
|
struct sk_buff **frag_tail, *head;
|
|
|
|
int subframe_len = skb->len - ETH_ALEN;
|
|
|
|
u8 max_subframes = sta->sta.max_amsdu_subframes;
|
|
|
|
int max_frags = local->hw.max_tx_fragments;
|
|
|
|
int max_amsdu_len = sta->sta.max_amsdu_len;
|
2019-03-17 00:06:31 +07:00
|
|
|
int orig_truesize;
|
2019-03-17 00:06:32 +07:00
|
|
|
u32 flow_idx;
|
2016-03-04 04:59:00 +07:00
|
|
|
__be16 len;
|
|
|
|
void *data;
|
|
|
|
bool ret = false;
|
2016-05-19 15:37:49 +07:00
|
|
|
unsigned int orig_len;
|
2018-08-31 06:04:13 +07:00
|
|
|
int n = 2, nfrags, pad = 0;
|
2018-08-29 13:57:02 +07:00
|
|
|
u16 hdrlen;
|
2016-03-04 04:59:00 +07:00
|
|
|
|
|
|
|
if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
|
|
|
|
return false;
|
|
|
|
|
2018-12-15 16:03:07 +07:00
|
|
|
if (skb_is_gso(skb))
|
|
|
|
return false;
|
|
|
|
|
2016-03-04 04:59:00 +07:00
|
|
|
if (!txq)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
txqi = to_txq_info(txq);
|
|
|
|
if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (sta->sta.max_rc_amsdu_len)
|
|
|
|
max_amsdu_len = min_t(int, max_amsdu_len,
|
|
|
|
sta->sta.max_rc_amsdu_len);
|
|
|
|
|
2018-09-05 12:06:10 +07:00
|
|
|
if (sta->sta.max_tid_amsdu_len[tid])
|
|
|
|
max_amsdu_len = min_t(int, max_amsdu_len,
|
|
|
|
sta->sta.max_tid_amsdu_len[tid]);
|
|
|
|
|
2019-03-17 00:06:32 +07:00
|
|
|
flow_idx = fq_flow_idx(fq, skb);
|
|
|
|
|
2016-05-19 15:37:49 +07:00
|
|
|
spin_lock_bh(&fq->lock);
|
2016-03-04 04:59:00 +07:00
|
|
|
|
2016-05-19 15:37:49 +07:00
|
|
|
/* TODO: Ideally aggregation should be done on dequeue to remain
|
|
|
|
* responsive to environment changes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
tin = &txqi->tin;
|
2019-03-17 00:06:32 +07:00
|
|
|
flow = fq_flow_classify(fq, tin, flow_idx, skb,
|
|
|
|
fq_flow_get_default_func);
|
2016-05-19 15:37:49 +07:00
|
|
|
head = skb_peek_tail(&flow->queue);
|
2018-12-15 16:03:07 +07:00
|
|
|
if (!head || skb_is_gso(head))
|
2016-03-04 04:59:00 +07:00
|
|
|
goto out;
|
|
|
|
|
2019-03-17 00:06:31 +07:00
|
|
|
orig_truesize = head->truesize;
|
2016-05-19 15:37:49 +07:00
|
|
|
orig_len = head->len;
|
|
|
|
|
2016-03-04 04:59:00 +07:00
|
|
|
if (skb->len + head->len > max_amsdu_len)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
nfrags = 1 + skb_shinfo(skb)->nr_frags;
|
|
|
|
nfrags += 1 + skb_shinfo(head)->nr_frags;
|
|
|
|
frag_tail = &skb_shinfo(head)->frag_list;
|
|
|
|
while (*frag_tail) {
|
|
|
|
nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags;
|
|
|
|
frag_tail = &(*frag_tail)->next;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (max_subframes && n > max_subframes)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (max_frags && nfrags > max_frags)
|
|
|
|
goto out;
|
2018-09-05 12:06:11 +07:00
|
|
|
|
|
|
|
if (!drv_can_aggregate_in_amsdu(local, head, skb))
|
|
|
|
goto out;
|
2016-03-04 04:59:00 +07:00
|
|
|
|
2018-08-30 02:03:25 +07:00
|
|
|
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
|
2016-03-04 04:59:00 +07:00
|
|
|
goto out;
|
|
|
|
|
2018-08-29 13:57:02 +07:00
|
|
|
/*
|
|
|
|
* Pad out the previous subframe to a multiple of 4 by adding the
|
|
|
|
* padding to the next one, that's being added. Note that head->len
|
|
|
|
* is the length of the full A-MSDU, but that works since each time
|
|
|
|
* we add a new subframe we pad out the previous one to a multiple
|
|
|
|
* of 4 and thus it no longer matters in the next round.
|
|
|
|
*/
|
|
|
|
hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
|
|
|
|
if ((head->len - hdrlen) & 3)
|
|
|
|
pad = 4 - ((head->len - hdrlen) & 3);
|
|
|
|
|
|
|
|
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
|
|
|
|
2 + pad))
|
2018-08-30 15:55:49 +07:00
|
|
|
goto out_recalc;
|
2016-03-04 04:59:00 +07:00
|
|
|
|
|
|
|
ret = true;
|
|
|
|
data = skb_push(skb, ETH_ALEN + 2);
|
|
|
|
memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
|
|
|
|
|
|
|
|
data += 2 * ETH_ALEN;
|
|
|
|
len = cpu_to_be16(subframe_len);
|
|
|
|
memcpy(data, &len, 2);
|
|
|
|
memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
|
|
|
|
|
2018-08-29 13:57:02 +07:00
|
|
|
memset(skb_push(skb, pad), 0, pad);
|
|
|
|
|
2016-03-04 04:59:00 +07:00
|
|
|
head->len += skb->len;
|
|
|
|
head->data_len += skb->len;
|
|
|
|
*frag_tail = skb;
|
|
|
|
|
2018-08-30 15:55:49 +07:00
|
|
|
out_recalc:
|
2019-03-17 00:06:31 +07:00
|
|
|
fq->memory_usage += head->truesize - orig_truesize;
|
2018-08-30 15:55:49 +07:00
|
|
|
if (head->len != orig_len) {
|
|
|
|
flow->backlog += head->len - orig_len;
|
|
|
|
tin->backlog_bytes += head->len - orig_len;
|
2016-05-19 15:37:49 +07:00
|
|
|
|
2018-08-30 15:55:49 +07:00
|
|
|
fq_recalc_backlog(fq, tin, flow);
|
|
|
|
}
|
2016-03-04 04:59:00 +07:00
|
|
|
out:
|
2016-05-19 15:37:49 +07:00
|
|
|
spin_unlock_bh(&fq->lock);
|
2016-03-04 04:59:00 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
/*
|
|
|
|
* Can be called while the sta lock is held. Anything that can cause packets to
|
|
|
|
* be generated will cause deadlock!
|
|
|
|
*/
|
|
|
|
static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta, u8 pn_offs,
|
|
|
|
struct ieee80211_key *key,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
|
|
u8 tid = IEEE80211_NUM_TIDS;
|
|
|
|
|
|
|
|
if (key)
|
|
|
|
info->control.hw_key = &key->conf;
|
|
|
|
|
|
|
|
ieee80211_tx_stats(skb->dev, skb->len);
|
|
|
|
|
|
|
|
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
|
|
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
|
|
|
|
} else {
|
|
|
|
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
|
|
|
hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
|
|
|
|
sdata->sequence_number += 0x10;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_shinfo(skb)->gso_size)
|
|
|
|
sta->tx_stats.msdu[tid] +=
|
|
|
|
DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
|
|
|
|
else
|
|
|
|
sta->tx_stats.msdu[tid]++;
|
|
|
|
|
|
|
|
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
|
|
|
|
/* statistics normally done by ieee80211_tx_h_stats (but that
|
|
|
|
* has to consider fragmentation, so is more complex)
|
|
|
|
*/
|
|
|
|
sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
|
|
|
|
sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
|
|
|
|
|
|
|
|
if (pn_offs) {
|
|
|
|
u64 pn;
|
|
|
|
u8 *crypto_hdr = skb->data + pn_offs;
|
|
|
|
|
|
|
|
switch (key->conf.cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
|
|
|
pn = atomic64_inc_return(&key->conf.tx_pn);
|
|
|
|
crypto_hdr[0] = pn;
|
|
|
|
crypto_hdr[1] = pn >> 8;
|
2019-03-20 03:34:08 +07:00
|
|
|
crypto_hdr[3] = 0x20 | (key->conf.keyidx << 6);
|
2016-09-23 00:04:20 +07:00
|
|
|
crypto_hdr[4] = pn >> 16;
|
|
|
|
crypto_hdr[5] = pn >> 24;
|
|
|
|
crypto_hdr[6] = pn >> 32;
|
|
|
|
crypto_hdr[7] = pn >> 40;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-21 21:25:43 +07:00
|
|
|
static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
2016-09-23 00:04:20 +07:00
|
|
|
struct sta_info *sta,
|
2015-03-21 21:25:43 +07:00
|
|
|
struct ieee80211_fast_tx *fast_tx,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
u16 ethertype = (skb->data[12] << 8) | skb->data[13];
|
|
|
|
int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
|
|
|
|
int hw_headroom = sdata->local->hw.extra_tx_headroom;
|
|
|
|
struct ethhdr eth;
|
2017-01-02 17:19:29 +07:00
|
|
|
struct ieee80211_tx_info *info;
|
2015-03-21 21:25:43 +07:00
|
|
|
struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
|
|
|
|
struct ieee80211_tx_data tx;
|
|
|
|
ieee80211_tx_result r;
|
|
|
|
struct tid_ampdu_tx *tid_tx = NULL;
|
|
|
|
u8 tid = IEEE80211_NUM_TIDS;
|
|
|
|
|
|
|
|
/* control port protocol needs a lot of special handling */
|
|
|
|
if (cpu_to_be16(ethertype) == sdata->control_port_protocol)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* only RFC 1042 SNAP */
|
|
|
|
if (ethertype < ETH_P_802_3_MIN)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* don't handle TX status request here either */
|
|
|
|
if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
|
|
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
|
2015-06-09 21:45:08 +07:00
|
|
|
if (tid_tx) {
|
|
|
|
if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
|
|
|
|
return false;
|
|
|
|
if (tid_tx->timeout)
|
|
|
|
tid_tx->last_tx = jiffies;
|
|
|
|
}
|
2015-03-21 21:25:43 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* after this point (skb is modified) we cannot return false */
|
|
|
|
|
|
|
|
if (skb_shared(skb)) {
|
|
|
|
struct sk_buff *tmp_skb = skb;
|
|
|
|
|
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
kfree_skb(tmp_skb);
|
|
|
|
|
|
|
|
if (!skb)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-03-04 04:59:00 +07:00
|
|
|
if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
|
|
|
|
ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
|
|
|
|
return true;
|
|
|
|
|
2015-03-21 21:25:43 +07:00
|
|
|
/* will not be crypto-handled beyond what we do here, so use false
|
|
|
|
* as the may-encrypt argument for the resize to not account for
|
|
|
|
* more room than we already have in 'extra_head'
|
|
|
|
*/
|
|
|
|
if (unlikely(ieee80211_skb_resize(sdata, skb,
|
|
|
|
max_t(int, extra_head + hw_headroom -
|
|
|
|
skb_headroom(skb), 0),
|
|
|
|
false))) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(ð, skb->data, ETH_HLEN - 2);
|
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:23 +07:00
|
|
|
hdr = skb_push(skb, extra_head);
|
2015-03-21 21:25:43 +07:00
|
|
|
memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
|
|
|
|
memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
|
|
|
|
memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
|
|
|
|
|
2017-01-02 17:19:29 +07:00
|
|
|
info = IEEE80211_SKB_CB(skb);
|
2015-03-21 21:25:43 +07:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->band = fast_tx->band;
|
|
|
|
info->control.vif = &sdata->vif;
|
|
|
|
info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
|
|
|
|
IEEE80211_TX_CTL_DONTFRAG |
|
|
|
|
(tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
|
2016-09-23 00:04:20 +07:00
|
|
|
info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
|
2015-03-21 21:25:43 +07:00
|
|
|
|
2019-03-29 03:01:06 +07:00
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
if (local->force_tx_status)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
|
|
|
|
#endif
|
|
|
|
|
2016-11-04 16:27:54 +07:00
|
|
|
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
|
|
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
*ieee80211_get_qos_ctl(hdr) = tid;
|
|
|
|
}
|
|
|
|
|
2015-03-21 21:25:43 +07:00
|
|
|
__skb_queue_head_init(&tx.skbs);
|
|
|
|
|
|
|
|
tx.flags = IEEE80211_TX_UNICAST;
|
|
|
|
tx.local = local;
|
|
|
|
tx.sdata = sdata;
|
|
|
|
tx.sta = sta;
|
|
|
|
tx.key = fast_tx->key;
|
|
|
|
|
2015-06-03 02:39:54 +07:00
|
|
|
if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
|
2015-03-21 21:25:43 +07:00
|
|
|
tx.skb = skb;
|
|
|
|
r = ieee80211_tx_h_rate_ctrl(&tx);
|
|
|
|
skb = tx.skb;
|
|
|
|
tx.skb = NULL;
|
|
|
|
|
|
|
|
if (r != TX_CONTINUE) {
|
|
|
|
if (r != TX_QUEUED)
|
|
|
|
kfree_skb(skb);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
if (ieee80211_queue_skb(local, sdata, sta, skb))
|
|
|
|
return true;
|
2015-03-21 21:25:43 +07:00
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
|
|
|
|
fast_tx->key, skb);
|
2015-03-21 21:25:43 +07:00
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
|
|
sdata = container_of(sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
|
|
|
|
|
|
|
__skb_queue_tail(&tx.skbs, skb);
|
2019-10-02 04:26:35 +07:00
|
|
|
ieee80211_tx_frags(local, &sdata->vif, sta, &tx.skbs, false);
|
2015-03-21 21:25:43 +07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-09-23 00:04:19 +07:00
|
|
|
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct txq_info *txqi = container_of(txq, struct txq_info, txq);
|
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct fq_tin *tin = &txqi->tin;
|
2016-09-23 00:04:20 +07:00
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
struct ieee80211_tx_data tx;
|
|
|
|
ieee80211_tx_result r;
|
mac80211: add stop/start logic for software TXQs
Sometimes, it is required to stop the transmissions momentarily and
resume it later; stopping the txqs becomes very critical in scenarios where
the packet transmission has to be ceased completely. For example, during
the hardware restart, during off channel operations,
when initiating CSA(upon detecting a radar on the DFS channel), etc.
The TX queue stop/start logic in mac80211 works well in stopping the TX
when drivers make use of netdev queues, i.e, when Qdiscs in network layer
take care of traffic scheduling. Since the devices implementing
wake_tx_queue can run without Qdiscs, packets will be handed to mac80211
directly without queueing them in the netdev queues.
Also, mac80211 does not invoke any of the
netif_stop_*/netif_wake_* APIs if wake_tx_queue is implemented.
Since the queues are not stopped in this case, transmissions can continue
and this will impact negatively on the operation of the wireless device.
For example,
During hardware restart, we stop the netdev queues so that packets are
not sent to the driver. Since ath10k implements wake_tx_queue,
TX queues will not be stopped and packets might reach the hardware while
it is restarting; this can make hardware unresponsive and the only
possible option for recovery is to reboot the entire system.
There is another problem to this, it is observed that the packets
were sent on the DFS channel for a prolonged duration after radar
detection impacting the channel closing time.
We can still invoke netif stop/wake APIs when wake_tx_queue is implemented
but this could lead to packet drops in network layer; adding stop/start
logic for software TXQs in mac80211 instead makes more sense; the change
proposed adds the same in mac80211.
Signed-off-by: Manikanta Pubbisetty <mpubbise@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2018-07-11 01:42:53 +07:00
|
|
|
struct ieee80211_vif *vif = txq->vif;
|
2016-09-23 00:04:19 +07:00
|
|
|
|
2019-06-18 03:01:39 +07:00
|
|
|
WARN_ON_ONCE(softirq_count() == 0);
|
|
|
|
|
2019-11-19 13:06:10 +07:00
|
|
|
if (!ieee80211_txq_airtime_check(hw, txq))
|
|
|
|
return NULL;
|
|
|
|
|
2019-03-17 00:06:33 +07:00
|
|
|
begin:
|
2016-09-23 00:04:19 +07:00
|
|
|
spin_lock_bh(&fq->lock);
|
|
|
|
|
mac80211: add stop/start logic for software TXQs
Sometimes, it is required to stop the transmissions momentarily and
resume it later; stopping the txqs becomes very critical in scenarios where
the packet transmission has to be ceased completely. For example, during
the hardware restart, during off channel operations,
when initiating CSA(upon detecting a radar on the DFS channel), etc.
The TX queue stop/start logic in mac80211 works well in stopping the TX
when drivers make use of netdev queues, i.e, when Qdiscs in network layer
take care of traffic scheduling. Since the devices implementing
wake_tx_queue can run without Qdiscs, packets will be handed to mac80211
directly without queueing them in the netdev queues.
Also, mac80211 does not invoke any of the
netif_stop_*/netif_wake_* APIs if wake_tx_queue is implemented.
Since the queues are not stopped in this case, transmissions can continue
and this will impact negatively on the operation of the wireless device.
For example,
During hardware restart, we stop the netdev queues so that packets are
not sent to the driver. Since ath10k implements wake_tx_queue,
TX queues will not be stopped and packets might reach the hardware while
it is restarting; this can make hardware unresponsive and the only
possible option for recovery is to reboot the entire system.
There is another problem to this, it is observed that the packets
were sent on the DFS channel for a prolonged duration after radar
detection impacting the channel closing time.
We can still invoke netif stop/wake APIs when wake_tx_queue is implemented
but this could lead to packet drops in network layer; adding stop/start
logic for software TXQs in mac80211 instead makes more sense; the change
proposed adds the same in mac80211.
Signed-off-by: Manikanta Pubbisetty <mpubbise@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2018-07-11 01:42:53 +07:00
|
|
|
if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
|
|
|
|
test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
|
2016-09-23 00:04:19 +07:00
|
|
|
goto out;
|
|
|
|
|
mac80211: add stop/start logic for software TXQs
Sometimes, it is required to stop the transmissions momentarily and
resume it later; stopping the txqs becomes very critical in scenarios where
the packet transmission has to be ceased completely. For example, during
the hardware restart, during off channel operations,
when initiating CSA(upon detecting a radar on the DFS channel), etc.
The TX queue stop/start logic in mac80211 works well in stopping the TX
when drivers make use of netdev queues, i.e, when Qdiscs in network layer
take care of traffic scheduling. Since the devices implementing
wake_tx_queue can run without Qdiscs, packets will be handed to mac80211
directly without queueing them in the netdev queues.
Also, mac80211 does not invoke any of the
netif_stop_*/netif_wake_* APIs if wake_tx_queue is implemented.
Since the queues are not stopped in this case, transmissions can continue
and this will impact negatively on the operation of the wireless device.
For example,
During hardware restart, we stop the netdev queues so that packets are
not sent to the driver. Since ath10k implements wake_tx_queue,
TX queues will not be stopped and packets might reach the hardware while
it is restarting; this can make hardware unresponsive and the only
possible option for recovery is to reboot the entire system.
There is another problem to this, it is observed that the packets
were sent on the DFS channel for a prolonged duration after radar
detection impacting the channel closing time.
We can still invoke netif stop/wake APIs when wake_tx_queue is implemented
but this could lead to packet drops in network layer; adding stop/start
logic for software TXQs in mac80211 instead makes more sense; the change
proposed adds the same in mac80211.
Signed-off-by: Manikanta Pubbisetty <mpubbise@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2018-07-11 01:42:53 +07:00
|
|
|
if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
|
|
|
|
set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
/* Make sure fragments stay together. */
|
|
|
|
skb = __skb_dequeue(&txqi->frags);
|
|
|
|
if (skb)
|
|
|
|
goto out;
|
|
|
|
|
2016-09-23 00:04:19 +07:00
|
|
|
skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
|
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
2019-03-17 00:06:33 +07:00
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
|
2016-09-23 00:04:19 +07:00
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
2016-09-23 00:04:20 +07:00
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
|
|
|
memset(&tx, 0, sizeof(tx));
|
|
|
|
__skb_queue_head_init(&tx.skbs);
|
|
|
|
tx.local = local;
|
|
|
|
tx.skb = skb;
|
|
|
|
tx.sdata = vif_to_sdata(info->control.vif);
|
|
|
|
|
2020-07-23 17:01:48 +07:00
|
|
|
if (txq->sta && !(info->flags & IEEE80211_TX_CTL_INJECTED)) {
|
2016-09-23 00:04:20 +07:00
|
|
|
tx.sta = container_of(txq->sta, struct sta_info, sta);
|
2020-03-26 21:51:34 +07:00
|
|
|
/*
|
|
|
|
* Drop unicast frames to unauthorised stations unless they are
|
|
|
|
* EAPOL frames from the local station.
|
|
|
|
*/
|
2020-03-30 03:50:06 +07:00
|
|
|
if (unlikely(ieee80211_is_data(hdr->frame_control) &&
|
|
|
|
!ieee80211_vif_is_mesh(&tx.sdata->vif) &&
|
2020-03-26 21:51:34 +07:00
|
|
|
tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
|
|
|
|
!is_multicast_ether_addr(hdr->addr1) &&
|
|
|
|
!test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
|
|
|
|
(!(info->control.flags &
|
|
|
|
IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
|
|
|
|
!ether_addr_equal(tx.sdata->vif.addr,
|
|
|
|
hdr->addr2)))) {
|
|
|
|
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
}
|
|
|
|
}
|
2016-09-23 00:04:20 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The key can be removed while the packet was queued, so need to call
|
|
|
|
* this here to get the current key.
|
|
|
|
*/
|
|
|
|
r = ieee80211_tx_h_select_key(&tx);
|
|
|
|
if (r != TX_CONTINUE) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
}
|
|
|
|
|
2016-11-04 16:27:52 +07:00
|
|
|
if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
|
|
|
|
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
|
|
|
else
|
|
|
|
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
|
|
|
|
2019-11-25 17:04:37 +07:00
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
|
|
|
|
goto encap_out;
|
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
|
2016-09-23 00:04:19 +07:00
|
|
|
struct sta_info *sta = container_of(txq->sta, struct sta_info,
|
|
|
|
sta);
|
2016-09-23 00:04:20 +07:00
|
|
|
u8 pn_offs = 0;
|
2016-09-23 00:04:19 +07:00
|
|
|
|
2016-09-23 00:04:20 +07:00
|
|
|
if (tx.key &&
|
|
|
|
(tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
|
|
|
|
pn_offs = ieee80211_hdrlen(hdr->frame_control);
|
|
|
|
|
|
|
|
ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
|
|
|
|
tx.key, skb);
|
|
|
|
} else {
|
|
|
|
if (invoke_tx_handlers_late(&tx))
|
|
|
|
goto begin;
|
|
|
|
|
|
|
|
skb = __skb_dequeue(&tx.skbs);
|
|
|
|
|
2019-03-17 00:06:33 +07:00
|
|
|
if (!skb_queue_empty(&tx.skbs)) {
|
|
|
|
spin_lock_bh(&fq->lock);
|
2016-09-23 00:04:20 +07:00
|
|
|
skb_queue_splice_tail(&tx.skbs, &txqi->frags);
|
2019-03-17 00:06:33 +07:00
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
}
|
2016-09-23 00:04:19 +07:00
|
|
|
}
|
|
|
|
|
2018-12-15 16:03:09 +07:00
|
|
|
if (skb_has_frag_list(skb) &&
|
2016-10-04 14:22:19 +07:00
|
|
|
!ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
|
|
|
|
if (skb_linearize(skb)) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-22 17:20:30 +07:00
|
|
|
switch (tx.sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_MONITOR:
|
|
|
|
if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
|
|
|
|
vif = &tx.sdata->vif;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tx.sdata = rcu_dereference(local->monitor_sdata);
|
|
|
|
if (tx.sdata) {
|
|
|
|
vif = &tx.sdata->vif;
|
|
|
|
info->hw_queue =
|
|
|
|
vif->hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
} else {
|
|
|
|
vif = NULL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
tx.sdata = container_of(tx.sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
2020-07-08 03:45:48 +07:00
|
|
|
fallthrough;
|
2017-06-22 17:20:30 +07:00
|
|
|
default:
|
|
|
|
vif = &tx.sdata->vif;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-25 17:04:37 +07:00
|
|
|
encap_out:
|
2017-06-22 17:20:30 +07:00
|
|
|
IEEE80211_SKB_CB(skb)->control.vif = vif;
|
2019-11-19 13:06:10 +07:00
|
|
|
|
2020-02-21 16:45:45 +07:00
|
|
|
if (vif &&
|
|
|
|
wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
|
2020-07-25 01:28:16 +07:00
|
|
|
bool ampdu = txq->ac != IEEE80211_AC_VO;
|
2019-11-19 13:06:10 +07:00
|
|
|
u32 airtime;
|
|
|
|
|
|
|
|
airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
|
2020-07-25 01:28:16 +07:00
|
|
|
skb->len, ampdu);
|
2019-11-19 13:06:10 +07:00
|
|
|
if (airtime) {
|
|
|
|
airtime = ieee80211_info_set_tx_time_est(info, airtime);
|
|
|
|
ieee80211_sta_update_pending_airtime(local, tx.sta,
|
|
|
|
txq->ac,
|
|
|
|
airtime,
|
|
|
|
false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-17 00:06:33 +07:00
|
|
|
return skb;
|
mac80211: add stop/start logic for software TXQs
Sometimes, it is required to stop the transmissions momentarily and
resume it later; stopping the txqs becomes very critical in scenarios where
the packet transmission has to be ceased completely. For example, during
the hardware restart, during off channel operations,
when initiating CSA(upon detecting a radar on the DFS channel), etc.
The TX queue stop/start logic in mac80211 works well in stopping the TX
when drivers make use of netdev queues, i.e, when Qdiscs in network layer
take care of traffic scheduling. Since the devices implementing
wake_tx_queue can run without Qdiscs, packets will be handed to mac80211
directly without queueing them in the netdev queues.
Also, mac80211 does not invoke any of the
netif_stop_*/netif_wake_* APIs if wake_tx_queue is implemented.
Since the queues are not stopped in this case, transmissions can continue
and this will impact negatively on the operation of the wireless device.
For example,
During hardware restart, we stop the netdev queues so that packets are
not sent to the driver. Since ath10k implements wake_tx_queue,
TX queues will not be stopped and packets might reach the hardware while
it is restarting; this can make hardware unresponsive and the only
possible option for recovery is to reboot the entire system.
There is another problem to this, it is observed that the packets
were sent on the DFS channel for a prolonged duration after radar
detection impacting the channel closing time.
We can still invoke netif stop/wake APIs when wake_tx_queue is implemented
but this could lead to packet drops in network layer; adding stop/start
logic for software TXQs in mac80211 instead makes more sense; the change
proposed adds the same in mac80211.
Signed-off-by: Manikanta Pubbisetty <mpubbise@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2018-07-11 01:42:53 +07:00
|
|
|
|
2016-09-23 00:04:19 +07:00
|
|
|
out:
|
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_tx_dequeue);
|
|
|
|
|
2018-12-19 08:02:06 +07:00
|
|
|
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2019-03-15 17:03:35 +07:00
|
|
|
struct ieee80211_txq *ret = NULL;
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 13:06:09 +07:00
|
|
|
struct txq_info *txqi = NULL, *head = NULL;
|
|
|
|
bool found_eligible_txq = false;
|
2018-12-19 08:02:06 +07:00
|
|
|
|
2019-03-15 17:03:35 +07:00
|
|
|
spin_lock_bh(&local->active_txq_lock[ac]);
|
2018-12-19 08:02:06 +07:00
|
|
|
|
2018-12-19 08:02:08 +07:00
|
|
|
begin:
|
2018-12-19 08:02:06 +07:00
|
|
|
txqi = list_first_entry_or_null(&local->active_txqs[ac],
|
|
|
|
struct txq_info,
|
|
|
|
schedule_order);
|
2018-12-19 08:02:08 +07:00
|
|
|
if (!txqi)
|
2019-03-15 17:03:35 +07:00
|
|
|
goto out;
|
2018-12-19 08:02:08 +07:00
|
|
|
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 13:06:09 +07:00
|
|
|
if (txqi == head) {
|
|
|
|
if (!found_eligible_txq)
|
|
|
|
goto out;
|
|
|
|
else
|
|
|
|
found_eligible_txq = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!head)
|
|
|
|
head = txqi;
|
|
|
|
|
2018-12-19 08:02:08 +07:00
|
|
|
if (txqi->txq.sta) {
|
|
|
|
struct sta_info *sta = container_of(txqi->txq.sta,
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 13:06:09 +07:00
|
|
|
struct sta_info, sta);
|
|
|
|
bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
|
|
|
|
s64 deficit = sta->airtime[txqi->txq.ac].deficit;
|
2018-12-19 08:02:08 +07:00
|
|
|
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 13:06:09 +07:00
|
|
|
if (aql_check)
|
|
|
|
found_eligible_txq = true;
|
|
|
|
|
|
|
|
if (deficit < 0)
|
2018-12-19 08:02:08 +07:00
|
|
|
sta->airtime[txqi->txq.ac].deficit +=
|
|
|
|
sta->airtime_weight;
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 13:06:09 +07:00
|
|
|
|
|
|
|
if (deficit < 0 || !aql_check) {
|
2018-12-19 08:02:08 +07:00
|
|
|
list_move_tail(&txqi->schedule_order,
|
|
|
|
&local->active_txqs[txqi->txq.ac]);
|
|
|
|
goto begin;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-19 08:02:06 +07:00
|
|
|
|
2018-12-19 08:02:08 +07:00
|
|
|
if (txqi->schedule_round == local->schedule_round[ac])
|
2019-03-15 17:03:35 +07:00
|
|
|
goto out;
|
2018-12-19 08:02:06 +07:00
|
|
|
|
|
|
|
list_del_init(&txqi->schedule_order);
|
|
|
|
txqi->schedule_round = local->schedule_round[ac];
|
2019-03-15 17:03:35 +07:00
|
|
|
ret = &txqi->txq;
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&local->active_txq_lock[ac]);
|
|
|
|
return ret;
|
2018-12-19 08:02:06 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_next_txq);
|
|
|
|
|
2019-03-18 18:00:58 +07:00
|
|
|
void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq,
|
|
|
|
bool force)
|
2018-12-19 08:02:06 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct txq_info *txqi = to_txq_info(txq);
|
|
|
|
|
2019-03-15 17:03:35 +07:00
|
|
|
spin_lock_bh(&local->active_txq_lock[txq->ac]);
|
2018-12-19 08:02:06 +07:00
|
|
|
|
|
|
|
if (list_empty(&txqi->schedule_order) &&
|
2019-03-18 18:00:58 +07:00
|
|
|
(force || !skb_queue_empty(&txqi->frags) ||
|
|
|
|
txqi->tin.backlog_packets)) {
|
2018-12-19 08:02:08 +07:00
|
|
|
/* If airtime accounting is active, always enqueue STAs at the
|
|
|
|
* head of the list to ensure that they only get moved to the
|
|
|
|
* back by the airtime DRR scheduler once they have a negative
|
|
|
|
* deficit. A station that already has a negative deficit will
|
|
|
|
* get immediately moved to the back of the list on the next
|
|
|
|
* call to ieee80211_next_txq().
|
|
|
|
*/
|
|
|
|
if (txqi->txq.sta &&
|
|
|
|
wiphy_ext_feature_isset(local->hw.wiphy,
|
|
|
|
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
|
|
|
|
list_add(&txqi->schedule_order,
|
|
|
|
&local->active_txqs[txq->ac]);
|
|
|
|
else
|
|
|
|
list_add_tail(&txqi->schedule_order,
|
|
|
|
&local->active_txqs[txq->ac]);
|
|
|
|
}
|
2018-12-19 08:02:06 +07:00
|
|
|
|
2019-01-22 21:20:16 +07:00
|
|
|
spin_unlock_bh(&local->active_txq_lock[txq->ac]);
|
|
|
|
}
|
2019-03-18 18:00:58 +07:00
|
|
|
EXPORT_SYMBOL(__ieee80211_schedule_txq);
|
2019-01-22 21:20:16 +07:00
|
|
|
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 13:06:09 +07:00
|
|
|
bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq)
|
|
|
|
{
|
|
|
|
struct sta_info *sta;
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
|
2019-12-12 18:14:37 +07:00
|
|
|
if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 13:06:09 +07:00
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!txq->sta)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
sta = container_of(txq->sta, struct sta_info, sta);
|
|
|
|
if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
|
|
|
|
sta->airtime[txq->ac].aql_limit_low)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (atomic_read(&local->aql_total_pending_airtime) <
|
|
|
|
local->aql_threshold &&
|
|
|
|
atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
|
|
|
|
sta->airtime[txq->ac].aql_limit_high)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_txq_airtime_check);
|
|
|
|
|
2018-12-19 08:02:08 +07:00
|
|
|
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
|
|
|
|
struct sta_info *sta;
|
|
|
|
u8 ac = txq->ac;
|
|
|
|
|
2019-03-15 17:03:35 +07:00
|
|
|
spin_lock_bh(&local->active_txq_lock[ac]);
|
2018-12-19 08:02:08 +07:00
|
|
|
|
|
|
|
if (!txqi->txq.sta)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (list_empty(&txqi->schedule_order))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
|
|
|
|
schedule_order) {
|
|
|
|
if (iter == txqi)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!iter->txq.sta) {
|
|
|
|
list_move_tail(&iter->schedule_order,
|
|
|
|
&local->active_txqs[ac]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
sta = container_of(iter->txq.sta, struct sta_info, sta);
|
|
|
|
if (sta->airtime[ac].deficit < 0)
|
|
|
|
sta->airtime[ac].deficit += sta->airtime_weight;
|
|
|
|
list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
|
|
|
|
}
|
|
|
|
|
|
|
|
sta = container_of(txqi->txq.sta, struct sta_info, sta);
|
|
|
|
if (sta->airtime[ac].deficit >= 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
sta->airtime[ac].deficit += sta->airtime_weight;
|
|
|
|
list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
|
2019-03-15 17:03:35 +07:00
|
|
|
spin_unlock_bh(&local->active_txq_lock[ac]);
|
2018-12-19 08:02:08 +07:00
|
|
|
|
|
|
|
return false;
|
|
|
|
out:
|
|
|
|
if (!list_empty(&txqi->schedule_order))
|
|
|
|
list_del_init(&txqi->schedule_order);
|
2019-03-15 17:03:35 +07:00
|
|
|
spin_unlock_bh(&local->active_txq_lock[ac]);
|
2018-12-19 08:02:08 +07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
|
|
|
|
|
2018-12-19 08:02:06 +07:00
|
|
|
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
|
|
|
|
spin_lock_bh(&local->active_txq_lock[ac]);
|
|
|
|
local->schedule_round[ac]++;
|
|
|
|
spin_unlock_bh(&local->active_txq_lock[ac]);
|
|
|
|
}
|
2019-03-15 17:03:35 +07:00
|
|
|
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
2018-12-19 08:02:06 +07:00
|
|
|
|
2014-11-09 23:50:10 +07:00
|
|
|
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
2019-04-12 03:47:25 +07:00
|
|
|
u32 info_flags,
|
2020-05-27 23:03:34 +07:00
|
|
|
u32 ctrl_flags,
|
|
|
|
u64 *cookie)
|
2014-11-09 23:50:10 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
2019-03-25 14:59:23 +07:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2015-03-21 15:13:45 +07:00
|
|
|
struct sta_info *sta;
|
2015-04-14 19:50:41 +07:00
|
|
|
struct sk_buff *next;
|
2014-11-09 23:50:10 +07:00
|
|
|
|
|
|
|
if (unlikely(skb->len < ETH_HLEN)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2015-04-10 19:10:10 +07:00
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
|
|
|
|
goto out_free;
|
2014-11-09 23:50:10 +07:00
|
|
|
|
2019-03-25 14:59:23 +07:00
|
|
|
if (IS_ERR(sta))
|
|
|
|
sta = NULL;
|
|
|
|
|
|
|
|
if (local->ops->wake_tx_queue) {
|
|
|
|
u16 queue = __ieee80211_select_queue(sdata, sta, skb);
|
|
|
|
skb_set_queue_mapping(skb, queue);
|
2020-07-26 20:09:47 +07:00
|
|
|
skb_get_hash(skb);
|
2019-03-25 14:59:23 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sta) {
|
2015-03-21 21:25:43 +07:00
|
|
|
struct ieee80211_fast_tx *fast_tx;
|
|
|
|
|
2018-08-08 17:40:01 +07:00
|
|
|
sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
|
2018-02-02 22:11:05 +07:00
|
|
|
|
2015-03-21 21:25:43 +07:00
|
|
|
fast_tx = rcu_dereference(sta->fast_tx);
|
|
|
|
|
|
|
|
if (fast_tx &&
|
2016-09-23 00:04:20 +07:00
|
|
|
ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
|
2015-03-21 21:25:43 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-04-14 19:50:41 +07:00
|
|
|
if (skb_is_gso(skb)) {
|
|
|
|
struct sk_buff *segs;
|
2015-04-13 21:58:25 +07:00
|
|
|
|
2015-04-14 19:50:41 +07:00
|
|
|
segs = skb_gso_segment(skb, 0);
|
|
|
|
if (IS_ERR(segs)) {
|
2015-04-10 19:10:10 +07:00
|
|
|
goto out_free;
|
2015-04-14 19:50:41 +07:00
|
|
|
} else if (segs) {
|
|
|
|
consume_skb(skb);
|
|
|
|
skb = segs;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* we cannot process non-linear frames on this path */
|
|
|
|
if (skb_linearize(skb)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the frame could be fragmented, software-encrypted, and other
|
|
|
|
* things so we cannot really handle checksum offload with it -
|
|
|
|
* fix it up in software before we handle anything else.
|
|
|
|
*/
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
2015-05-05 20:25:33 +07:00
|
|
|
skb_set_transport_header(skb,
|
|
|
|
skb_checksum_start_offset(skb));
|
2015-04-14 19:50:41 +07:00
|
|
|
if (skb_checksum_help(skb))
|
|
|
|
goto out_free;
|
|
|
|
}
|
2015-04-10 19:10:10 +07:00
|
|
|
}
|
|
|
|
|
2020-01-14 06:42:33 +07:00
|
|
|
skb_list_walk_safe(skb, skb, next) {
|
|
|
|
skb_mark_not_on_list(skb);
|
2015-04-14 19:50:41 +07:00
|
|
|
|
2020-06-17 15:26:36 +07:00
|
|
|
if (skb->protocol == sdata->control_port_protocol)
|
|
|
|
ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
|
|
|
|
|
2019-04-12 03:47:25 +07:00
|
|
|
skb = ieee80211_build_hdr(sdata, skb, info_flags,
|
2020-05-27 23:03:34 +07:00
|
|
|
sta, ctrl_flags, cookie);
|
2020-01-14 06:42:33 +07:00
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
kfree_skb_list(next);
|
2015-04-14 19:50:41 +07:00
|
|
|
goto out;
|
2020-01-14 06:42:33 +07:00
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2015-04-22 22:10:38 +07:00
|
|
|
ieee80211_tx_stats(dev, skb->len);
|
2015-04-14 19:50:41 +07:00
|
|
|
|
2020-07-23 17:01:52 +07:00
|
|
|
ieee80211_xmit(sdata, sta, skb);
|
2015-04-14 19:50:41 +07:00
|
|
|
}
|
2015-04-10 19:10:10 +07:00
|
|
|
goto out;
|
|
|
|
out_free:
|
|
|
|
kfree_skb(skb);
|
2014-11-09 23:50:10 +07:00
|
|
|
out:
|
2012-07-26 22:24:39 +07:00
|
|
|
rcu_read_unlock();
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2016-11-22 17:52:18 +07:00
|
|
|
static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
|
|
|
|
{
|
|
|
|
struct ethhdr *eth;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = skb_ensure_writable(skb, ETH_HLEN);
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
eth = (void *)skb->data;
|
|
|
|
ether_addr_copy(eth->h_dest, sta->sta.addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ieee80211_multicast_to_unicast(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
const struct ethhdr *eth = (void *)skb->data;
|
|
|
|
const struct vlan_ethhdr *ethvlan = (void *)skb->data;
|
|
|
|
__be16 ethertype;
|
|
|
|
|
|
|
|
if (likely(!is_multicast_ether_addr(eth->h_dest)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
if (sdata->u.vlan.sta)
|
|
|
|
return false;
|
|
|
|
if (sdata->wdev.use_4addr)
|
|
|
|
return false;
|
2020-07-08 03:45:48 +07:00
|
|
|
fallthrough;
|
2016-11-22 17:52:18 +07:00
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
/* check runtime toggle for this bss */
|
|
|
|
if (!sdata->bss->multicast_to_unicast)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* multicast to unicast conversion only for some payload */
|
|
|
|
ethertype = eth->h_proto;
|
|
|
|
if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
|
|
|
|
ethertype = ethvlan->h_vlan_encapsulated_proto;
|
|
|
|
switch (ethertype) {
|
|
|
|
case htons(ETH_P_ARP):
|
|
|
|
case htons(ETH_P_IP):
|
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct sk_buff_head *queue)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
const struct ethhdr *eth = (struct ethhdr *)skb->data;
|
|
|
|
struct sta_info *sta, *first = NULL;
|
|
|
|
struct sk_buff *cloned_skb;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(sta, &local->sta_list, list) {
|
|
|
|
if (sdata != sta->sdata)
|
|
|
|
/* AP-VLAN mismatch */
|
|
|
|
continue;
|
|
|
|
if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr)))
|
|
|
|
/* do not send back to source */
|
|
|
|
continue;
|
|
|
|
if (!first) {
|
|
|
|
first = sta;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
cloned_skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!cloned_skb)
|
|
|
|
goto multicast;
|
|
|
|
if (unlikely(ieee80211_change_da(cloned_skb, sta))) {
|
|
|
|
dev_kfree_skb(cloned_skb);
|
|
|
|
goto multicast;
|
|
|
|
}
|
|
|
|
__skb_queue_tail(queue, cloned_skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(first)) {
|
|
|
|
if (unlikely(ieee80211_change_da(skb, first)))
|
|
|
|
goto multicast;
|
|
|
|
__skb_queue_tail(queue, skb);
|
|
|
|
} else {
|
|
|
|
/* no STA connected, drop */
|
|
|
|
kfree_skb(skb);
|
|
|
|
skb = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
multicast:
|
|
|
|
__skb_queue_purge(queue);
|
|
|
|
__skb_queue_tail(queue, skb);
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2014-11-09 23:50:10 +07:00
|
|
|
/**
|
|
|
|
* ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
|
|
|
|
* @skb: packet to be sent
|
|
|
|
* @dev: incoming interface
|
|
|
|
*
|
|
|
|
* On failure skb will be freed.
|
|
|
|
*/
|
2014-11-09 23:50:07 +07:00
|
|
|
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
2016-11-22 17:52:18 +07:00
|
|
|
if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) {
|
|
|
|
struct sk_buff_head queue;
|
|
|
|
|
|
|
|
__skb_queue_head_init(&queue);
|
|
|
|
ieee80211_convert_to_unicast(skb, dev, &queue);
|
|
|
|
while ((skb = __skb_dequeue(&queue)))
|
2020-05-27 23:03:34 +07:00
|
|
|
__ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
|
2016-11-22 17:52:18 +07:00
|
|
|
} else {
|
2020-05-27 23:03:34 +07:00
|
|
|
__ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
|
2016-11-22 17:52:18 +07:00
|
|
|
}
|
|
|
|
|
2014-11-09 23:50:07 +07:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2019-11-25 17:04:37 +07:00
|
|
|
static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, int led_len,
|
|
|
|
struct sta_info *sta,
|
|
|
|
bool txpending)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct ieee80211_tx_control control = {};
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_sta *pubsta = NULL;
|
|
|
|
unsigned long flags;
|
|
|
|
int q = info->hw_queue;
|
|
|
|
|
|
|
|
if (ieee80211_queue_skb(local, sdata, sta, skb))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
|
|
|
|
|
|
if (local->queue_stop_reasons[q] ||
|
|
|
|
(!txpending && !skb_queue_empty(&local->pending[q]))) {
|
|
|
|
if (txpending)
|
|
|
|
skb_queue_head(&local->pending[q], skb);
|
|
|
|
else
|
|
|
|
skb_queue_tail(&local->pending[q], skb);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
|
|
|
|
|
|
if (sta && sta->uploaded)
|
|
|
|
pubsta = &sta->sta;
|
|
|
|
|
|
|
|
control.sta = pubsta;
|
|
|
|
|
|
|
|
drv_tx(local, &control, skb);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct net_device *dev, struct sta_info *sta,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ethhdr *ehdr = (struct ethhdr *)skb->data;
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
bool authorized = false;
|
|
|
|
bool multicast;
|
|
|
|
unsigned char *ra = ehdr->h_dest;
|
|
|
|
|
|
|
|
if (IS_ERR(sta) || (sta && !sta->uploaded))
|
|
|
|
sta = NULL;
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
|
|
|
|
(!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
|
|
|
|
ra = sdata->u.mgd.bssid;
|
|
|
|
|
2020-06-09 17:15:54 +07:00
|
|
|
if (is_zero_ether_addr(ra))
|
2019-11-25 17:04:37 +07:00
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
multicast = is_multicast_ether_addr(ra);
|
|
|
|
|
|
|
|
if (sta)
|
|
|
|
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
|
|
|
|
|
|
|
|
if (!multicast && !authorized &&
|
|
|
|
(ehdr->h_proto != sdata->control_port_protocol ||
|
|
|
|
!ether_addr_equal(sdata->vif.addr, ehdr->h_source)))
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
if (multicast && sdata->vif.type == NL80211_IFTYPE_AP &&
|
|
|
|
!atomic_read(&sdata->u.ap.num_mcast_sta))
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
|
|
|
|
test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
|
|
|
|
goto out_free;
|
|
|
|
|
2020-07-22 21:20:17 +07:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
2019-11-25 17:04:37 +07:00
|
|
|
if (unlikely(!multicast && skb->sk &&
|
|
|
|
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
|
2020-07-22 21:20:17 +07:00
|
|
|
info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
|
|
|
|
&info->flags, NULL);
|
2019-11-25 17:04:37 +07:00
|
|
|
|
|
|
|
if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) {
|
|
|
|
if (sdata->control_port_no_encrypt)
|
|
|
|
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (multicast)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_ACK;
|
|
|
|
|
|
|
|
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
|
|
|
|
ieee80211_tx_stats(dev, skb->len);
|
|
|
|
|
|
|
|
if (sta) {
|
|
|
|
sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
|
|
|
|
sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
|
|
sdata = container_of(sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
|
|
|
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_HW_80211_ENCAP;
|
|
|
|
info->control.vif = &sdata->vif;
|
|
|
|
|
|
|
|
ieee80211_tx_8023(sdata, skb, skb->len, sta, false);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
if (WARN_ON(!sdata->hw_80211_encap)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(skb->len < ETH_HLEN)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
|
|
|
|
kfree_skb(skb);
|
|
|
|
else
|
|
|
|
ieee80211_8023_xmit(sdata, dev, sta, skb);
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2014-11-09 23:50:11 +07:00
|
|
|
struct sk_buff *
|
|
|
|
ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, u32 info_flags)
|
|
|
|
{
|
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
struct ieee80211_tx_data tx = {
|
|
|
|
.local = sdata->local,
|
|
|
|
.sdata = sdata,
|
|
|
|
};
|
2015-03-21 15:13:45 +07:00
|
|
|
struct sta_info *sta;
|
2014-11-09 23:50:11 +07:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
2015-03-21 15:13:45 +07:00
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
skb = ERR_PTR(-EINVAL);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-05-27 23:03:34 +07:00
|
|
|
skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, 0, NULL);
|
2014-11-09 23:50:11 +07:00
|
|
|
if (IS_ERR(skb))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
hdr = (void *)skb->data;
|
|
|
|
tx.sta = sta_info_get(sdata, hdr->addr1);
|
|
|
|
tx.skb = skb;
|
|
|
|
|
|
|
|
if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2008-05-17 05:57:14 +07:00
|
|
|
/*
|
|
|
|
* ieee80211_clear_tx_pending may not be called in a context where
|
|
|
|
* it is possible that it packets could come in again.
|
|
|
|
*/
|
2007-07-27 20:43:22 +07:00
|
|
|
void ieee80211_clear_tx_pending(struct ieee80211_local *local)
|
|
|
|
{
|
2012-11-10 09:44:14 +07:00
|
|
|
struct sk_buff *skb;
|
2009-03-23 23:28:35 +07:00
|
|
|
int i;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2012-11-10 09:44:14 +07:00
|
|
|
for (i = 0; i < local->hw.queues; i++) {
|
|
|
|
while ((skb = skb_dequeue(&local->pending[i])) != NULL)
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2011-02-24 20:42:06 +07:00
|
|
|
/*
|
|
|
|
* Returns false if the frame couldn't be transmitted but was queued instead,
|
|
|
|
* which in this case means re-queued -- take as an indication to stop sending
|
|
|
|
* more pending frames.
|
|
|
|
*/
|
2009-03-23 23:28:41 +07:00
|
|
|
static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct sta_info *sta;
|
|
|
|
struct ieee80211_hdr *hdr;
|
2011-02-24 20:42:06 +07:00
|
|
|
bool result;
|
2012-07-26 22:24:39 +07:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
2009-03-23 23:28:41 +07:00
|
|
|
|
2009-07-14 05:33:34 +07:00
|
|
|
sdata = vif_to_sdata(info->control.vif);
|
2009-03-23 23:28:41 +07:00
|
|
|
|
|
|
|
if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
|
2012-07-26 22:24:39 +07:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
|
|
|
if (unlikely(!chanctx_conf)) {
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
return true;
|
|
|
|
}
|
2014-11-09 23:50:09 +07:00
|
|
|
info->band = chanctx_conf->def.chan->band;
|
2020-07-23 17:01:52 +07:00
|
|
|
result = ieee80211_tx(sdata, NULL, skb, true);
|
2019-11-25 17:04:37 +07:00
|
|
|
} else if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
|
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ERR(sta) || (sta && !sta->uploaded))
|
|
|
|
sta = NULL;
|
|
|
|
|
|
|
|
result = ieee80211_tx_8023(sdata, skb, skb->len, sta, true);
|
2009-03-23 23:28:41 +07:00
|
|
|
} else {
|
2011-11-16 21:28:55 +07:00
|
|
|
struct sk_buff_head skbs;
|
|
|
|
|
|
|
|
__skb_queue_head_init(&skbs);
|
|
|
|
__skb_queue_tail(&skbs, skb);
|
|
|
|
|
2009-03-23 23:28:41 +07:00
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
2009-11-25 23:46:18 +07:00
|
|
|
sta = sta_info_get(sdata, hdr->addr1);
|
2009-03-23 23:28:41 +07:00
|
|
|
|
2011-11-16 21:28:57 +07:00
|
|
|
result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
|
2009-03-23 23:28:41 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2008-05-17 05:57:14 +07:00
|
|
|
/*
|
2009-06-17 22:43:56 +07:00
|
|
|
* Transmit all pending packets. Called from tasklet.
|
2008-05-17 05:57:14 +07:00
|
|
|
*/
|
2007-07-27 20:43:22 +07:00
|
|
|
void ieee80211_tx_pending(unsigned long data)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = (struct ieee80211_local *)data;
|
2009-03-23 23:28:37 +07:00
|
|
|
unsigned long flags;
|
2009-03-23 23:28:41 +07:00
|
|
|
int i;
|
2009-06-17 22:43:56 +07:00
|
|
|
bool txok;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2009-03-23 23:28:36 +07:00
|
|
|
rcu_read_lock();
|
2008-05-17 05:57:14 +07:00
|
|
|
|
2009-06-17 22:43:56 +07:00
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
2009-03-23 23:28:37 +07:00
|
|
|
for (i = 0; i < local->hw.queues; i++) {
|
|
|
|
/*
|
|
|
|
* If queue is stopped by something other than due to pending
|
|
|
|
* frames, or we have no pending frames, proceed to next queue.
|
|
|
|
*/
|
2009-06-17 22:43:56 +07:00
|
|
|
if (local->queue_stop_reasons[i] ||
|
2009-03-23 23:28:37 +07:00
|
|
|
skb_queue_empty(&local->pending[i]))
|
2007-07-27 20:43:22 +07:00
|
|
|
continue;
|
2008-05-17 05:57:14 +07:00
|
|
|
|
2009-03-23 23:28:37 +07:00
|
|
|
while (!skb_queue_empty(&local->pending[i])) {
|
2009-06-17 22:43:56 +07:00
|
|
|
struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
|
2009-07-14 05:33:34 +07:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
2009-07-27 15:33:31 +07:00
|
|
|
if (WARN_ON(!info->control.vif)) {
|
2012-10-08 19:39:33 +07:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2009-07-27 15:33:31 +07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-06-17 22:43:56 +07:00
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
|
|
|
|
flags);
|
|
|
|
|
|
|
|
txok = ieee80211_tx_pending_skb(local, skb);
|
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock,
|
|
|
|
flags);
|
|
|
|
if (!txok)
|
2009-03-23 23:28:37 +07:00
|
|
|
break;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
2010-03-23 03:42:43 +07:00
|
|
|
|
|
|
|
if (skb_queue_empty(&local->pending[i]))
|
2012-04-03 21:28:50 +07:00
|
|
|
ieee80211_propagate_queue_wake(local, i);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
2009-06-17 22:43:56 +07:00
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
2009-03-23 23:28:37 +07:00
|
|
|
|
2009-03-23 23:28:36 +07:00
|
|
|
rcu_read_unlock();
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* functions for drivers to get certain frames */
|
|
|
|
|
2013-01-07 22:04:50 +07:00
|
|
|
static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
|
2014-05-09 18:11:49 +07:00
|
|
|
struct ps_data *ps, struct sk_buff *skb,
|
|
|
|
bool is_template)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
|
|
|
u8 *pos, *tim;
|
|
|
|
int aid0 = 0;
|
|
|
|
int i, have_bits = 0, n1, n2;
|
|
|
|
|
|
|
|
/* Generate bitmap for TIM only if there are any STAs in power save
|
|
|
|
* mode. */
|
2012-10-11 02:39:50 +07:00
|
|
|
if (atomic_read(&ps->num_sta_ps) > 0)
|
2007-07-27 20:43:22 +07:00
|
|
|
/* in the hope that this is faster than
|
|
|
|
* checking byte-for-byte */
|
2013-12-18 14:44:16 +07:00
|
|
|
have_bits = !bitmap_empty((unsigned long *)ps->tim,
|
2007-07-27 20:43:22 +07:00
|
|
|
IEEE80211_MAX_AID+1);
|
2014-05-09 18:11:49 +07:00
|
|
|
if (!is_template) {
|
|
|
|
if (ps->dtim_count == 0)
|
|
|
|
ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
|
|
|
|
else
|
|
|
|
ps->dtim_count--;
|
|
|
|
}
|
2007-07-27 20:43:22 +07:00
|
|
|
|
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:21 +07:00
|
|
|
tim = pos = skb_put(skb, 6);
|
2007-07-27 20:43:22 +07:00
|
|
|
*pos++ = WLAN_EID_TIM;
|
|
|
|
*pos++ = 4;
|
2012-10-11 02:39:50 +07:00
|
|
|
*pos++ = ps->dtim_count;
|
2012-02-13 21:17:18 +07:00
|
|
|
*pos++ = sdata->vif.bss_conf.dtim_period;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2012-10-11 02:39:50 +07:00
|
|
|
if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
|
2007-07-27 20:43:22 +07:00
|
|
|
aid0 = 1;
|
|
|
|
|
2012-10-11 02:39:50 +07:00
|
|
|
ps->dtim_bc_mc = aid0 == 1;
|
2011-02-01 01:48:44 +07:00
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
if (have_bits) {
|
|
|
|
/* Find largest even number N1 so that bits numbered 1 through
|
|
|
|
* (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
|
|
|
|
* (N2 + 1) x 8 through 2007 are 0. */
|
|
|
|
n1 = 0;
|
|
|
|
for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
|
2012-10-11 02:39:50 +07:00
|
|
|
if (ps->tim[i]) {
|
2007-07-27 20:43:22 +07:00
|
|
|
n1 = i & 0xfe;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
n2 = n1;
|
|
|
|
for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
|
2012-10-11 02:39:50 +07:00
|
|
|
if (ps->tim[i]) {
|
2007-07-27 20:43:22 +07:00
|
|
|
n2 = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bitmap control */
|
|
|
|
*pos++ = n1 | aid0;
|
|
|
|
/* Part Virt Bitmap */
|
2011-11-24 21:50:00 +07:00
|
|
|
skb_put(skb, n2 - n1);
|
2012-10-11 02:39:50 +07:00
|
|
|
memcpy(pos, ps->tim + n1, n2 - n1 + 1);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
tim[1] = n2 - n1 + 4;
|
|
|
|
} else {
|
|
|
|
*pos++ = aid0; /* Bitmap control */
|
|
|
|
*pos++ = 0; /* Part Virt Bitmap */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-07 22:04:50 +07:00
|
|
|
static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
|
2014-05-09 18:11:49 +07:00
|
|
|
struct ps_data *ps, struct sk_buff *skb,
|
|
|
|
bool is_template)
|
2013-01-07 22:04:50 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not very nice, but we want to allow the driver to call
|
|
|
|
* ieee80211_beacon_get() as a response to the set_tim()
|
|
|
|
* callback. That, however, is already invoked under the
|
|
|
|
* sta_lock to guarantee consistent and race-free update
|
|
|
|
* of the tim bitmap in mac80211 and the driver.
|
|
|
|
*/
|
|
|
|
if (local->tim_in_locked_section) {
|
2014-05-09 18:11:49 +07:00
|
|
|
__ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
|
2013-01-07 22:04:50 +07:00
|
|
|
} else {
|
2013-02-22 18:55:01 +07:00
|
|
|
spin_lock_bh(&local->tim_lock);
|
2014-05-09 18:11:49 +07:00
|
|
|
__ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
|
2013-02-22 18:55:01 +07:00
|
|
|
spin_unlock_bh(&local->tim_lock);
|
2013-01-07 22:04:50 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-09 18:11:50 +07:00
|
|
|
static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct beacon_data *beacon)
|
2013-07-11 21:09:06 +07:00
|
|
|
{
|
|
|
|
struct probe_resp *resp;
|
2013-08-28 18:41:31 +07:00
|
|
|
u8 *beacon_data;
|
|
|
|
size_t beacon_data_len;
|
2014-05-09 18:11:47 +07:00
|
|
|
int i;
|
2014-06-05 19:21:36 +07:00
|
|
|
u8 count = beacon->csa_current_counter;
|
2013-08-28 18:41:31 +07:00
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
beacon_data = beacon->tail;
|
|
|
|
beacon_data_len = beacon->tail_len;
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_ADHOC:
|
|
|
|
beacon_data = beacon->head;
|
|
|
|
beacon_data_len = beacon->head_len;
|
|
|
|
break;
|
2013-10-18 05:55:02 +07:00
|
|
|
case NL80211_IFTYPE_MESH_POINT:
|
|
|
|
beacon_data = beacon->head;
|
|
|
|
beacon_data_len = beacon->head_len;
|
|
|
|
break;
|
2013-08-28 18:41:31 +07:00
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
2013-07-11 21:09:06 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
rcu_read_lock();
|
2014-05-09 18:11:47 +07:00
|
|
|
for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; ++i) {
|
2014-06-05 19:21:36 +07:00
|
|
|
resp = rcu_dereference(sdata->u.ap.probe_resp);
|
2014-05-09 18:11:47 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
if (beacon->csa_counter_offsets[i]) {
|
|
|
|
if (WARN_ON_ONCE(beacon->csa_counter_offsets[i] >=
|
|
|
|
beacon_data_len)) {
|
2014-05-09 18:11:47 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
|
|
}
|
2014-06-05 19:21:36 +07:00
|
|
|
|
|
|
|
beacon_data[beacon->csa_counter_offsets[i]] = count;
|
2013-07-11 21:09:06 +07:00
|
|
|
}
|
2014-06-05 19:21:36 +07:00
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP && resp)
|
|
|
|
resp->data[resp->csa_counter_offsets[i]] = count;
|
2013-07-11 21:09:06 +07:00
|
|
|
}
|
2014-06-05 19:21:36 +07:00
|
|
|
rcu_read_unlock();
|
2014-05-09 18:11:50 +07:00
|
|
|
}
|
|
|
|
|
2015-06-10 18:06:53 +07:00
|
|
|
static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
|
|
|
|
{
|
|
|
|
beacon->csa_current_counter--;
|
|
|
|
|
|
|
|
/* the counter should never reach 0 */
|
|
|
|
WARN_ON_ONCE(!beacon->csa_current_counter);
|
|
|
|
|
|
|
|
return beacon->csa_current_counter;
|
|
|
|
}
|
|
|
|
|
2014-05-09 18:11:50 +07:00
|
|
|
u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
2014-06-05 19:21:36 +07:00
|
|
|
struct beacon_data *beacon = NULL;
|
|
|
|
u8 count = 0;
|
2014-05-09 18:11:47 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
beacon = rcu_dereference(sdata->u.ap.beacon);
|
|
|
|
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
|
|
|
|
beacon = rcu_dereference(sdata->u.ibss.presp);
|
|
|
|
else if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
|
|
beacon = rcu_dereference(sdata->u.mesh.beacon);
|
|
|
|
|
|
|
|
if (!beacon)
|
|
|
|
goto unlock;
|
|
|
|
|
2015-06-10 18:06:53 +07:00
|
|
|
count = __ieee80211_csa_update_counter(beacon);
|
2014-05-09 18:11:50 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return count;
|
2013-07-11 21:09:06 +07:00
|
|
|
}
|
2014-05-09 18:11:50 +07:00
|
|
|
EXPORT_SYMBOL(ieee80211_csa_update_counter);
|
2013-07-11 21:09:06 +07:00
|
|
|
|
2018-04-20 17:49:24 +07:00
|
|
|
void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
struct beacon_data *beacon = NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
beacon = rcu_dereference(sdata->u.ap.beacon);
|
|
|
|
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
|
|
|
|
beacon = rcu_dereference(sdata->u.ibss.presp);
|
|
|
|
else if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
|
|
beacon = rcu_dereference(sdata->u.mesh.beacon);
|
|
|
|
|
|
|
|
if (!beacon)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
if (counter < beacon->csa_current_counter)
|
|
|
|
beacon->csa_current_counter = counter;
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_csa_set_counter);
|
|
|
|
|
2013-07-11 21:09:06 +07:00
|
|
|
bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
struct beacon_data *beacon = NULL;
|
|
|
|
u8 *beacon_data;
|
|
|
|
size_t beacon_data_len;
|
|
|
|
int ret = false;
|
|
|
|
|
|
|
|
if (!ieee80211_sdata_running(sdata))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
if (vif->type == NL80211_IFTYPE_AP) {
|
|
|
|
struct ieee80211_if_ap *ap = &sdata->u.ap;
|
|
|
|
|
|
|
|
beacon = rcu_dereference(ap->beacon);
|
|
|
|
if (WARN_ON(!beacon || !beacon->tail))
|
|
|
|
goto out;
|
|
|
|
beacon_data = beacon->tail;
|
|
|
|
beacon_data_len = beacon->tail_len;
|
2013-08-28 18:41:31 +07:00
|
|
|
} else if (vif->type == NL80211_IFTYPE_ADHOC) {
|
|
|
|
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
|
|
|
|
|
|
|
|
beacon = rcu_dereference(ifibss->presp);
|
|
|
|
if (!beacon)
|
|
|
|
goto out;
|
|
|
|
|
2013-10-18 05:55:02 +07:00
|
|
|
beacon_data = beacon->head;
|
|
|
|
beacon_data_len = beacon->head_len;
|
|
|
|
} else if (vif->type == NL80211_IFTYPE_MESH_POINT) {
|
|
|
|
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
|
|
|
|
|
|
|
beacon = rcu_dereference(ifmsh->beacon);
|
|
|
|
if (!beacon)
|
|
|
|
goto out;
|
|
|
|
|
2013-08-28 18:41:31 +07:00
|
|
|
beacon_data = beacon->head;
|
|
|
|
beacon_data_len = beacon->head_len;
|
2013-07-11 21:09:06 +07:00
|
|
|
} else {
|
|
|
|
WARN_ON(1);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-06-05 19:21:37 +07:00
|
|
|
if (!beacon->csa_counter_offsets[0])
|
|
|
|
goto out;
|
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
if (WARN_ON_ONCE(beacon->csa_counter_offsets[0] > beacon_data_len))
|
2013-07-11 21:09:06 +07:00
|
|
|
goto out;
|
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
if (beacon_data[beacon->csa_counter_offsets[0]] == 1)
|
2013-07-11 21:09:06 +07:00
|
|
|
ret = true;
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_csa_is_complete);
|
|
|
|
|
2020-02-22 20:25:46 +07:00
|
|
|
static int ieee80211_beacon_protect(struct sk_buff *skb,
|
|
|
|
struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
ieee80211_tx_result res;
|
|
|
|
struct ieee80211_tx_data tx;
|
2020-03-20 16:20:23 +07:00
|
|
|
struct sk_buff *check_skb;
|
2020-02-22 20:25:46 +07:00
|
|
|
|
|
|
|
memset(&tx, 0, sizeof(tx));
|
|
|
|
tx.key = rcu_dereference(sdata->default_beacon_key);
|
|
|
|
if (!tx.key)
|
|
|
|
return 0;
|
|
|
|
tx.local = local;
|
|
|
|
tx.sdata = sdata;
|
|
|
|
__skb_queue_head_init(&tx.skbs);
|
|
|
|
__skb_queue_tail(&tx.skbs, skb);
|
|
|
|
res = ieee80211_tx_h_encrypt(&tx);
|
2020-03-20 16:20:23 +07:00
|
|
|
check_skb = __skb_dequeue(&tx.skbs);
|
|
|
|
/* we may crash after this, but it'd be a bug in crypto */
|
|
|
|
WARN_ON(check_skb != skb);
|
2020-02-22 20:25:46 +07:00
|
|
|
if (WARN_ON_ONCE(res != TX_CONTINUE))
|
2020-03-20 16:20:23 +07:00
|
|
|
return -EINVAL;
|
2020-02-22 20:25:46 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-09 18:11:49 +07:00
|
|
|
static struct sk_buff *
|
|
|
|
__ieee80211_beacon_get(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_mutable_offsets *offs,
|
|
|
|
bool is_template)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2014-06-05 19:21:36 +07:00
|
|
|
struct beacon_data *beacon = NULL;
|
2008-07-09 19:40:37 +07:00
|
|
|
struct sk_buff *skb = NULL;
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info;
|
2007-07-27 20:43:22 +07:00
|
|
|
struct ieee80211_sub_if_data *sdata = NULL;
|
2016-04-12 20:56:15 +07:00
|
|
|
enum nl80211_band band;
|
2009-12-29 17:59:19 +07:00
|
|
|
struct ieee80211_tx_rate_control txrc;
|
2012-07-26 22:24:39 +07:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
2014-05-09 18:11:50 +07:00
|
|
|
int csa_off_base = 0;
|
2008-01-25 01:38:38 +07:00
|
|
|
|
2007-12-19 08:03:33 +07:00
|
|
|
rcu_read_lock();
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2007-12-19 07:31:26 +07:00
|
|
|
sdata = vif_to_sdata(vif);
|
2012-07-26 22:24:39 +07:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2012-07-26 22:24:39 +07:00
|
|
|
if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
|
2011-01-25 01:28:49 +07:00
|
|
|
goto out;
|
|
|
|
|
2014-05-09 18:11:49 +07:00
|
|
|
if (offs)
|
|
|
|
memset(offs, 0, sizeof(*offs));
|
2009-10-29 14:30:35 +07:00
|
|
|
|
2008-09-11 05:01:58 +07:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP) {
|
2012-10-11 02:39:50 +07:00
|
|
|
struct ieee80211_if_ap *ap = &sdata->u.ap;
|
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
beacon = rcu_dereference(ap->beacon);
|
2011-02-08 02:03:35 +07:00
|
|
|
if (beacon) {
|
2014-06-05 19:21:37 +07:00
|
|
|
if (beacon->csa_counter_offsets[0]) {
|
2014-05-09 18:11:50 +07:00
|
|
|
if (!is_template)
|
2015-06-10 18:06:53 +07:00
|
|
|
__ieee80211_csa_update_counter(beacon);
|
2014-05-09 18:11:50 +07:00
|
|
|
|
|
|
|
ieee80211_set_csa(sdata, beacon);
|
|
|
|
}
|
2013-07-11 21:09:06 +07:00
|
|
|
|
2008-02-23 21:17:19 +07:00
|
|
|
/*
|
|
|
|
* headroom, head length,
|
|
|
|
* tail length and maximum TIM length
|
|
|
|
*/
|
|
|
|
skb = dev_alloc_skb(local->tx_headroom +
|
|
|
|
beacon->head_len +
|
2013-12-14 19:54:53 +07:00
|
|
|
beacon->tail_len + 256 +
|
|
|
|
local->hw.extra_beacon_tailroom);
|
2008-02-23 21:17:19 +07:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
2008-02-23 21:17:10 +07:00
|
|
|
|
2008-02-23 21:17:19 +07:00
|
|
|
skb_reserve(skb, local->tx_headroom);
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:20 +07:00
|
|
|
skb_put_data(skb, beacon->head, beacon->head_len);
|
2008-02-23 21:17:10 +07:00
|
|
|
|
2014-05-09 18:11:49 +07:00
|
|
|
ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
|
|
|
|
is_template);
|
2008-02-23 21:17:10 +07:00
|
|
|
|
2014-05-09 18:11:49 +07:00
|
|
|
if (offs) {
|
|
|
|
offs->tim_offset = beacon->head_len;
|
|
|
|
offs->tim_length = skb->len - beacon->head_len;
|
2014-05-09 18:11:50 +07:00
|
|
|
|
|
|
|
/* for AP the csa offsets are from tail */
|
|
|
|
csa_off_base = skb->len;
|
2014-05-09 18:11:49 +07:00
|
|
|
}
|
2009-10-29 14:30:35 +07:00
|
|
|
|
2008-02-23 21:17:19 +07:00
|
|
|
if (beacon->tail)
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:20 +07:00
|
|
|
skb_put_data(skb, beacon->tail,
|
|
|
|
beacon->tail_len);
|
2020-02-22 20:25:46 +07:00
|
|
|
|
|
|
|
if (ieee80211_beacon_protect(skb, local, sdata) < 0)
|
|
|
|
goto out;
|
2008-07-09 19:40:37 +07:00
|
|
|
} else
|
|
|
|
goto out;
|
2008-09-11 05:01:58 +07:00
|
|
|
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
|
2009-02-15 18:44:28 +07:00
|
|
|
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
|
2008-07-09 19:40:37 +07:00
|
|
|
struct ieee80211_hdr *hdr;
|
2008-02-23 21:17:10 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
beacon = rcu_dereference(ifibss->presp);
|
|
|
|
if (!beacon)
|
2008-07-09 19:40:37 +07:00
|
|
|
goto out;
|
|
|
|
|
2014-06-05 19:21:37 +07:00
|
|
|
if (beacon->csa_counter_offsets[0]) {
|
2014-05-09 18:11:50 +07:00
|
|
|
if (!is_template)
|
2015-06-10 18:06:53 +07:00
|
|
|
__ieee80211_csa_update_counter(beacon);
|
2013-08-28 18:41:31 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
ieee80211_set_csa(sdata, beacon);
|
2014-05-09 18:11:50 +07:00
|
|
|
}
|
2013-08-28 18:41:31 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
|
2013-12-14 19:54:53 +07:00
|
|
|
local->hw.extra_beacon_tailroom);
|
2008-07-09 19:40:37 +07:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
2013-03-08 02:54:29 +07:00
|
|
|
skb_reserve(skb, local->tx_headroom);
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:20 +07:00
|
|
|
skb_put_data(skb, beacon->head, beacon->head_len);
|
2008-07-09 19:40:37 +07:00
|
|
|
|
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
2008-07-16 08:44:13 +07:00
|
|
|
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
|
|
|
|
IEEE80211_STYPE_BEACON);
|
2008-02-23 21:17:19 +07:00
|
|
|
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
|
2012-04-01 01:31:32 +07:00
|
|
|
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
2008-09-11 05:01:49 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
beacon = rcu_dereference(ifmsh->beacon);
|
|
|
|
if (!beacon)
|
2011-01-18 19:45:32 +07:00
|
|
|
goto out;
|
|
|
|
|
2014-06-05 19:21:37 +07:00
|
|
|
if (beacon->csa_counter_offsets[0]) {
|
2014-05-09 18:11:50 +07:00
|
|
|
if (!is_template)
|
|
|
|
/* TODO: For mesh csa_counter is in TU, so
|
|
|
|
* decrementing it by one isn't correct, but
|
|
|
|
* for now we leave it consistent with overall
|
|
|
|
* mac80211's behavior.
|
|
|
|
*/
|
2015-06-10 18:06:53 +07:00
|
|
|
__ieee80211_csa_update_counter(beacon);
|
2014-05-09 18:11:50 +07:00
|
|
|
|
2014-06-05 19:21:36 +07:00
|
|
|
ieee80211_set_csa(sdata, beacon);
|
2014-05-09 18:11:50 +07:00
|
|
|
}
|
2013-10-18 05:55:02 +07:00
|
|
|
|
2012-04-01 01:31:32 +07:00
|
|
|
if (ifmsh->sync_ops)
|
2016-12-08 08:15:51 +07:00
|
|
|
ifmsh->sync_ops->adjust_tsf(sdata, beacon);
|
2012-04-01 01:31:32 +07:00
|
|
|
|
2011-10-27 04:47:25 +07:00
|
|
|
skb = dev_alloc_skb(local->tx_headroom +
|
2014-06-05 19:21:36 +07:00
|
|
|
beacon->head_len +
|
2013-01-31 00:14:08 +07:00
|
|
|
256 + /* TIM IE */
|
2014-06-05 19:21:36 +07:00
|
|
|
beacon->tail_len +
|
2013-12-14 19:54:53 +07:00
|
|
|
local->hw.extra_beacon_tailroom);
|
2008-02-23 21:17:19 +07:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
2013-02-15 02:20:13 +07:00
|
|
|
skb_reserve(skb, local->tx_headroom);
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:20 +07:00
|
|
|
skb_put_data(skb, beacon->head, beacon->head_len);
|
2014-05-09 18:11:49 +07:00
|
|
|
ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
|
|
|
|
|
|
|
|
if (offs) {
|
2014-06-05 19:21:36 +07:00
|
|
|
offs->tim_offset = beacon->head_len;
|
|
|
|
offs->tim_length = skb->len - beacon->head_len;
|
2014-05-09 18:11:49 +07:00
|
|
|
}
|
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:20 +07:00
|
|
|
skb_put_data(skb, beacon->tail, beacon->tail_len);
|
2008-07-09 19:40:37 +07:00
|
|
|
} else {
|
|
|
|
WARN_ON(1);
|
2007-12-19 08:03:33 +07:00
|
|
|
goto out;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
|
|
|
|
2014-05-09 18:11:50 +07:00
|
|
|
/* CSA offsets */
|
2014-06-05 19:21:36 +07:00
|
|
|
if (offs && beacon) {
|
2014-05-09 18:11:50 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; i++) {
|
2014-06-05 19:21:36 +07:00
|
|
|
u16 csa_off = beacon->csa_counter_offsets[i];
|
2014-05-09 18:11:50 +07:00
|
|
|
|
|
|
|
if (!csa_off)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
offs->csa_counter_offs[i] = csa_off_base + csa_off;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-09 17:39:59 +07:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2012-07-26 22:24:39 +07:00
|
|
|
|
2008-05-15 17:55:29 +07:00
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
2009-06-17 22:43:56 +07:00
|
|
|
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
2009-12-29 17:59:19 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_ACK;
|
2008-05-15 17:55:29 +07:00
|
|
|
info->band = band;
|
2009-12-29 17:59:19 +07:00
|
|
|
|
|
|
|
memset(&txrc, 0, sizeof(txrc));
|
|
|
|
txrc.hw = hw;
|
2012-07-26 19:07:46 +07:00
|
|
|
txrc.sband = local->hw.wiphy->bands[band];
|
2009-12-29 17:59:19 +07:00
|
|
|
txrc.bss_conf = &sdata->vif.bss_conf;
|
|
|
|
txrc.skb = skb;
|
|
|
|
txrc.reported_rate.idx = -1;
|
2020-04-25 22:57:12 +07:00
|
|
|
if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band])
|
|
|
|
txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band];
|
|
|
|
else
|
|
|
|
txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
|
2010-11-11 21:07:23 +07:00
|
|
|
txrc.bss = true;
|
2009-12-29 17:59:19 +07:00
|
|
|
rate_control_get_rate(sdata, NULL, &txrc);
|
2008-05-15 17:55:29 +07:00
|
|
|
|
|
|
|
info->control.vif = vif;
|
2008-07-10 16:21:26 +07:00
|
|
|
|
2010-05-07 01:45:17 +07:00
|
|
|
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
|
|
|
|
IEEE80211_TX_CTL_ASSIGN_SEQ |
|
|
|
|
IEEE80211_TX_CTL_FIRST_FRAGMENT;
|
2008-10-21 17:40:02 +07:00
|
|
|
out:
|
2007-12-19 08:03:33 +07:00
|
|
|
rcu_read_unlock();
|
2007-07-27 20:43:22 +07:00
|
|
|
return skb;
|
2014-05-09 18:11:49 +07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
struct sk_buff *
|
|
|
|
ieee80211_beacon_get_template(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_mutable_offsets *offs)
|
|
|
|
{
|
|
|
|
return __ieee80211_beacon_get(hw, vif, offs, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_beacon_get_template);
|
|
|
|
|
|
|
|
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
u16 *tim_offset, u16 *tim_length)
|
|
|
|
{
|
|
|
|
struct ieee80211_mutable_offsets offs = {};
|
|
|
|
struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
|
2015-09-09 14:46:32 +07:00
|
|
|
struct sk_buff *copy;
|
|
|
|
struct ieee80211_supported_band *sband;
|
|
|
|
int shift;
|
|
|
|
|
|
|
|
if (!bcn)
|
|
|
|
return bcn;
|
2014-05-09 18:11:49 +07:00
|
|
|
|
|
|
|
if (tim_offset)
|
|
|
|
*tim_offset = offs.tim_offset;
|
|
|
|
|
|
|
|
if (tim_length)
|
|
|
|
*tim_length = offs.tim_length;
|
|
|
|
|
2015-09-09 14:46:32 +07:00
|
|
|
if (ieee80211_hw_check(hw, BEACON_TX_STATUS) ||
|
|
|
|
!hw_to_local(hw)->monitors)
|
|
|
|
return bcn;
|
|
|
|
|
|
|
|
/* send a copy to monitor interfaces */
|
|
|
|
copy = skb_copy(bcn, GFP_ATOMIC);
|
|
|
|
if (!copy)
|
|
|
|
return bcn;
|
|
|
|
|
|
|
|
shift = ieee80211_vif_get_shift(vif);
|
2017-04-27 14:15:38 +07:00
|
|
|
sband = ieee80211_get_sband(vif_to_sdata(vif));
|
|
|
|
if (!sband)
|
|
|
|
return bcn;
|
|
|
|
|
2019-07-14 22:44:15 +07:00
|
|
|
ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false,
|
|
|
|
NULL);
|
2015-09-09 14:46:32 +07:00
|
|
|
|
2014-05-09 18:11:49 +07:00
|
|
|
return bcn;
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
2009-10-29 14:30:35 +07:00
|
|
|
EXPORT_SYMBOL(ieee80211_beacon_get_tim);
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2011-11-10 16:28:57 +07:00
|
|
|
struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct ieee80211_if_ap *ap = NULL;
|
2012-08-06 18:26:16 +07:00
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
struct probe_resp *presp = NULL;
|
2011-11-10 16:28:57 +07:00
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
|
|
|
|
if (sdata->vif.type != NL80211_IFTYPE_AP)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
ap = &sdata->u.ap;
|
|
|
|
presp = rcu_dereference(ap->probe_resp);
|
|
|
|
if (!presp)
|
|
|
|
goto out;
|
|
|
|
|
2012-08-06 18:26:16 +07:00
|
|
|
skb = dev_alloc_skb(presp->len);
|
2011-11-10 16:28:57 +07:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:20 +07:00
|
|
|
skb_put_data(skb, presp->data, presp->len);
|
2012-08-06 18:26:16 +07:00
|
|
|
|
2011-11-10 16:28:57 +07:00
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
memset(hdr->addr1, 0, sizeof(hdr->addr1));
|
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_proberesp_get);
|
|
|
|
|
2010-01-06 01:16:19 +07:00
|
|
|
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct ieee80211_if_managed *ifmgd;
|
|
|
|
struct ieee80211_pspoll *pspoll;
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
sdata = vif_to_sdata(vif);
|
|
|
|
ifmgd = &sdata->u.mgd;
|
|
|
|
local = sdata->local;
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
|
2011-08-30 04:17:31 +07:00
|
|
|
if (!skb)
|
2010-01-06 01:16:19 +07:00
|
|
|
return NULL;
|
2011-08-30 04:17:31 +07:00
|
|
|
|
2010-01-06 01:16:19 +07:00
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom);
|
|
|
|
|
networking: convert many more places to skb_put_zero()
There were many places that my previous spatch didn't find,
as pointed out by yuan linyu in various patches.
The following spatch found many more and also removes the
now unnecessary casts:
@@
identifier p, p2;
expression len;
expression skb;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_zero(skb, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_zero(skb, len);
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, len);
|
-memset(p, 0, len);
)
@@
type t, t2;
identifier p, p2;
expression skb;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, sizeof(*p));
|
-memset(p, 0, sizeof(*p));
)
@@
expression skb, len;
@@
-memset(skb_put(skb, len), 0, len);
+skb_put_zero(skb, len);
Apply it to the tree (with one manual fixup to keep the
comment in vxlan.c, which spatch removed.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:19 +07:00
|
|
|
pspoll = skb_put_zero(skb, sizeof(*pspoll));
|
2010-01-06 01:16:19 +07:00
|
|
|
pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
|
|
|
|
IEEE80211_STYPE_PSPOLL);
|
2020-04-17 17:38:04 +07:00
|
|
|
pspoll->aid = cpu_to_le16(sdata->vif.bss_conf.aid);
|
2010-01-06 01:16:19 +07:00
|
|
|
|
|
|
|
/* aid in PS-Poll has its two MSBs each set to 1 */
|
|
|
|
pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
|
|
|
|
|
|
|
|
memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
|
|
|
|
memcpy(pspoll->ta, vif->addr, ETH_ALEN);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_pspoll_get);
|
|
|
|
|
|
|
|
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
|
2017-11-21 20:46:08 +07:00
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
bool qos_ok)
|
2010-01-06 01:16:19 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_hdr_3addr *nullfunc;
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct ieee80211_if_managed *ifmgd;
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct sk_buff *skb;
|
2017-11-21 20:46:08 +07:00
|
|
|
bool qos = false;
|
2010-01-06 01:16:19 +07:00
|
|
|
|
|
|
|
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
sdata = vif_to_sdata(vif);
|
|
|
|
ifmgd = &sdata->u.mgd;
|
|
|
|
local = sdata->local;
|
|
|
|
|
2017-11-21 20:46:08 +07:00
|
|
|
if (qos_ok) {
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
sta = sta_info_get(sdata, ifmgd->bssid);
|
|
|
|
qos = sta && sta->sta.wme;
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
|
|
|
|
sizeof(*nullfunc) + 2);
|
2011-08-30 04:17:31 +07:00
|
|
|
if (!skb)
|
2010-01-06 01:16:19 +07:00
|
|
|
return NULL;
|
2011-08-30 04:17:31 +07:00
|
|
|
|
2010-01-06 01:16:19 +07:00
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom);
|
|
|
|
|
networking: convert many more places to skb_put_zero()
There were many places that my previous spatch didn't find,
as pointed out by yuan linyu in various patches.
The following spatch found many more and also removes the
now unnecessary casts:
@@
identifier p, p2;
expression len;
expression skb;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_zero(skb, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_zero(skb, len);
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, len);
|
-memset(p, 0, len);
)
@@
type t, t2;
identifier p, p2;
expression skb;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, sizeof(*p));
|
-memset(p, 0, sizeof(*p));
)
@@
expression skb, len;
@@
-memset(skb_put(skb, len), 0, len);
+skb_put_zero(skb, len);
Apply it to the tree (with one manual fixup to keep the
comment in vxlan.c, which spatch removed.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:19 +07:00
|
|
|
nullfunc = skb_put_zero(skb, sizeof(*nullfunc));
|
2010-01-06 01:16:19 +07:00
|
|
|
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
|
|
|
|
IEEE80211_STYPE_NULLFUNC |
|
|
|
|
IEEE80211_FCTL_TODS);
|
2017-11-21 20:46:08 +07:00
|
|
|
if (qos) {
|
2018-11-09 17:16:46 +07:00
|
|
|
__le16 qoshdr = cpu_to_le16(7);
|
2017-11-21 20:46:08 +07:00
|
|
|
|
|
|
|
BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
|
|
|
|
IEEE80211_STYPE_NULLFUNC) !=
|
|
|
|
IEEE80211_STYPE_QOS_NULLFUNC);
|
|
|
|
nullfunc->frame_control |=
|
|
|
|
cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
|
|
|
|
skb->priority = 7;
|
|
|
|
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
|
2018-11-09 17:16:46 +07:00
|
|
|
skb_put_data(skb, &qoshdr, sizeof(qoshdr));
|
2017-11-21 20:46:08 +07:00
|
|
|
}
|
|
|
|
|
2010-01-06 01:16:19 +07:00
|
|
|
memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
|
|
|
|
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
|
|
|
|
memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_nullfunc_get);
|
|
|
|
|
2010-01-06 01:16:38 +07:00
|
|
|
struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
|
2014-06-13 03:24:31 +07:00
|
|
|
const u8 *src_addr,
|
2010-01-06 01:16:38 +07:00
|
|
|
const u8 *ssid, size_t ssid_len,
|
2012-11-29 19:00:10 +07:00
|
|
|
size_t tailroom)
|
2010-01-06 01:16:38 +07:00
|
|
|
{
|
2014-06-13 03:24:31 +07:00
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2010-01-06 01:16:38 +07:00
|
|
|
struct ieee80211_hdr_3addr *hdr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
size_t ie_ssid_len;
|
|
|
|
u8 *pos;
|
|
|
|
|
|
|
|
ie_ssid_len = 2 + ssid_len;
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
|
2012-11-29 19:00:10 +07:00
|
|
|
ie_ssid_len + tailroom);
|
2011-08-30 04:17:31 +07:00
|
|
|
if (!skb)
|
2010-01-06 01:16:38 +07:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom);
|
|
|
|
|
networking: convert many more places to skb_put_zero()
There were many places that my previous spatch didn't find,
as pointed out by yuan linyu in various patches.
The following spatch found many more and also removes the
now unnecessary casts:
@@
identifier p, p2;
expression len;
expression skb;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_zero(skb, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_zero(skb, len);
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, len);
|
-memset(p, 0, len);
)
@@
type t, t2;
identifier p, p2;
expression skb;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, sizeof(*p));
|
-memset(p, 0, sizeof(*p));
)
@@
expression skb, len;
@@
-memset(skb_put(skb, len), 0, len);
+skb_put_zero(skb, len);
Apply it to the tree (with one manual fixup to keep the
comment in vxlan.c, which spatch removed.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 19:29:19 +07:00
|
|
|
hdr = skb_put_zero(skb, sizeof(*hdr));
|
2010-01-06 01:16:38 +07:00
|
|
|
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
|
|
|
|
IEEE80211_STYPE_PROBE_REQ);
|
2012-07-13 21:23:07 +07:00
|
|
|
eth_broadcast_addr(hdr->addr1);
|
2014-06-13 03:24:31 +07:00
|
|
|
memcpy(hdr->addr2, src_addr, ETH_ALEN);
|
2012-07-13 21:23:07 +07:00
|
|
|
eth_broadcast_addr(hdr->addr3);
|
2010-01-06 01:16:38 +07:00
|
|
|
|
|
|
|
pos = skb_put(skb, ie_ssid_len);
|
|
|
|
*pos++ = WLAN_EID_SSID;
|
|
|
|
*pos++ = ssid_len;
|
2012-03-29 21:30:41 +07:00
|
|
|
if (ssid_len)
|
2010-01-06 01:16:38 +07:00
|
|
|
memcpy(pos, ssid, ssid_len);
|
|
|
|
pos += ssid_len;
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_probereq_get);
|
|
|
|
|
2007-12-19 07:31:26 +07:00
|
|
|
void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
2007-07-27 20:43:22 +07:00
|
|
|
const void *frame, size_t frame_len,
|
2008-05-15 17:55:29 +07:00
|
|
|
const struct ieee80211_tx_info *frame_txctl,
|
2007-07-27 20:43:22 +07:00
|
|
|
struct ieee80211_rts *rts)
|
|
|
|
{
|
|
|
|
const struct ieee80211_hdr *hdr = frame;
|
|
|
|
|
2008-06-23 06:45:27 +07:00
|
|
|
rts->frame_control =
|
|
|
|
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
|
2007-12-19 07:31:26 +07:00
|
|
|
rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
|
|
|
|
frame_txctl);
|
2007-07-27 20:43:22 +07:00
|
|
|
memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
|
|
|
|
memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_rts_get);
|
|
|
|
|
2007-12-19 07:31:26 +07:00
|
|
|
void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
2007-07-27 20:43:22 +07:00
|
|
|
const void *frame, size_t frame_len,
|
2008-05-15 17:55:29 +07:00
|
|
|
const struct ieee80211_tx_info *frame_txctl,
|
2007-07-27 20:43:22 +07:00
|
|
|
struct ieee80211_cts *cts)
|
|
|
|
{
|
|
|
|
const struct ieee80211_hdr *hdr = frame;
|
|
|
|
|
2008-06-23 06:45:27 +07:00
|
|
|
cts->frame_control =
|
|
|
|
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
|
2007-12-19 07:31:26 +07:00
|
|
|
cts->duration = ieee80211_ctstoself_duration(hw, vif,
|
|
|
|
frame_len, frame_txctl);
|
2007-07-27 20:43:22 +07:00
|
|
|
memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_ctstoself_get);
|
|
|
|
|
|
|
|
struct sk_buff *
|
2007-12-19 07:31:26 +07:00
|
|
|
ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_vif *vif)
|
2007-07-27 20:43:22 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2008-05-27 21:50:51 +07:00
|
|
|
struct sk_buff *skb = NULL;
|
2008-02-25 22:27:43 +07:00
|
|
|
struct ieee80211_tx_data tx;
|
2007-07-27 20:43:22 +07:00
|
|
|
struct ieee80211_sub_if_data *sdata;
|
2012-10-11 02:39:50 +07:00
|
|
|
struct ps_data *ps;
|
2008-05-15 17:55:29 +07:00
|
|
|
struct ieee80211_tx_info *info;
|
2012-07-26 22:24:39 +07:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2007-12-19 07:31:26 +07:00
|
|
|
sdata = vif_to_sdata(vif);
|
2007-12-19 08:03:33 +07:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2012-07-26 22:24:39 +07:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2007-12-19 08:03:33 +07:00
|
|
|
|
2012-10-11 02:39:50 +07:00
|
|
|
if (!chanctx_conf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP) {
|
|
|
|
struct beacon_data *beacon =
|
|
|
|
rcu_dereference(sdata->u.ap.beacon);
|
|
|
|
|
|
|
|
if (!beacon || !beacon->head)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ps = &sdata->u.ap.ps;
|
2013-01-31 00:14:08 +07:00
|
|
|
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
|
|
|
|
ps = &sdata->u.mesh.ps;
|
2012-10-11 02:39:50 +07:00
|
|
|
} else {
|
2008-05-27 21:50:51 +07:00
|
|
|
goto out;
|
2012-10-11 02:39:50 +07:00
|
|
|
}
|
2007-12-19 08:03:33 +07:00
|
|
|
|
2012-10-11 02:39:50 +07:00
|
|
|
if (ps->dtim_count != 0 || !ps->dtim_bc_mc)
|
2008-05-27 21:50:51 +07:00
|
|
|
goto out; /* send buffered bc/mc only after DTIM beacon */
|
2008-05-15 17:55:29 +07:00
|
|
|
|
2007-07-27 20:43:22 +07:00
|
|
|
while (1) {
|
2012-10-11 02:39:50 +07:00
|
|
|
skb = skb_dequeue(&ps->bc_buf);
|
2007-07-27 20:43:22 +07:00
|
|
|
if (!skb)
|
2008-05-27 21:50:51 +07:00
|
|
|
goto out;
|
2007-07-27 20:43:22 +07:00
|
|
|
local->total_ps_buffered--;
|
|
|
|
|
2012-10-11 02:39:50 +07:00
|
|
|
if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
|
2007-07-27 20:43:22 +07:00
|
|
|
struct ieee80211_hdr *hdr =
|
|
|
|
(struct ieee80211_hdr *) skb->data;
|
|
|
|
/* more buffered multicast/broadcast frames ==> set
|
|
|
|
* MoreData flag in IEEE 802.11 header to inform PS
|
|
|
|
* STAs */
|
|
|
|
hdr->frame_control |=
|
|
|
|
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
|
|
|
|
}
|
|
|
|
|
2014-03-06 21:08:43 +07:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
2013-03-01 22:01:18 +07:00
|
|
|
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
|
2015-03-20 20:18:27 +07:00
|
|
|
if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
|
2007-07-27 20:43:22 +07:00
|
|
|
break;
|
2016-08-02 16:13:41 +07:00
|
|
|
ieee80211_free_txskb(hw, skb);
|
2007-07-27 20:43:22 +07:00
|
|
|
}
|
2008-05-15 17:55:29 +07:00
|
|
|
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
2008-02-25 22:27:43 +07:00
|
|
|
tx.flags |= IEEE80211_TX_PS_BUFFERED;
|
2012-11-09 17:39:59 +07:00
|
|
|
info->band = chanctx_conf->def.chan->band;
|
2007-07-27 20:43:22 +07:00
|
|
|
|
2008-06-20 06:22:30 +07:00
|
|
|
if (invoke_tx_handlers(&tx))
|
2007-07-27 20:43:22 +07:00
|
|
|
skb = NULL;
|
2008-06-20 06:22:30 +07:00
|
|
|
out:
|
2008-02-25 22:27:46 +07:00
|
|
|
rcu_read_unlock();
|
2007-07-27 20:43:22 +07:00
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_get_buffered_bc);
|
2009-06-17 22:43:56 +07:00
|
|
|
|
2014-11-19 18:47:38 +07:00
|
|
|
int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
|
|
|
|
{
|
|
|
|
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
|
|
|
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
int ret;
|
|
|
|
u32 queues;
|
|
|
|
|
|
|
|
lockdep_assert_held(&local->sta_mtx);
|
|
|
|
|
|
|
|
/* only some cases are supported right now */
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON(tid >= IEEE80211_NUM_UPS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (sta->reserved_tid == tid) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) {
|
|
|
|
sdata_err(sdata, "TID reservation already active\n");
|
|
|
|
ret = -EALREADY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ieee80211_stop_vif_queues(sdata->local, sdata,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
|
|
|
|
|
|
|
|
synchronize_net();
|
|
|
|
|
|
|
|
/* Tear down BA sessions so we stop aggregating on this TID */
|
2015-06-03 02:39:54 +07:00
|
|
|
if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
|
2014-11-19 18:47:38 +07:00
|
|
|
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
|
|
|
|
__ieee80211_stop_tx_ba_session(sta, tid,
|
|
|
|
AGG_STOP_LOCAL_REQUEST);
|
|
|
|
}
|
|
|
|
|
|
|
|
queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]);
|
2015-01-07 20:42:39 +07:00
|
|
|
__ieee80211_flush_queues(local, sdata, queues, false);
|
2014-11-19 18:47:38 +07:00
|
|
|
|
|
|
|
sta->reserved_tid = tid;
|
|
|
|
|
|
|
|
ieee80211_wake_vif_queues(local, sdata,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
|
|
|
|
|
2015-06-03 02:39:54 +07:00
|
|
|
if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION))
|
2014-11-19 18:47:38 +07:00
|
|
|
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_reserve_tid);
|
|
|
|
|
|
|
|
void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid)
|
|
|
|
{
|
|
|
|
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
|
|
|
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
|
|
|
|
|
|
|
lockdep_assert_held(&sdata->local->sta_mtx);
|
|
|
|
|
|
|
|
/* only some cases are supported right now */
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tid != sta->reserved_tid) {
|
|
|
|
sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_unreserve_tid);
|
|
|
|
|
2012-07-26 22:24:39 +07:00
|
|
|
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, int tid,
|
2020-07-23 17:01:52 +07:00
|
|
|
enum nl80211_band band)
|
2009-06-17 22:43:56 +07:00
|
|
|
{
|
2017-01-24 22:42:10 +07:00
|
|
|
int ac = ieee80211_ac_from_tid(tid);
|
2012-04-03 21:28:50 +07:00
|
|
|
|
2016-03-03 08:16:56 +07:00
|
|
|
skb_reset_mac_header(skb);
|
2012-04-03 21:28:50 +07:00
|
|
|
skb_set_queue_mapping(skb, ac);
|
2011-12-15 16:18:34 +07:00
|
|
|
skb->priority = tid;
|
2010-01-06 00:00:58 +07:00
|
|
|
|
2013-02-13 21:39:57 +07:00
|
|
|
skb->dev = sdata->dev;
|
|
|
|
|
2009-06-18 22:25:11 +07:00
|
|
|
/*
|
|
|
|
* The other path calling ieee80211_xmit is from the tasklet,
|
|
|
|
* and while we can handle concurrent transmissions locking
|
|
|
|
* requirements are that we do not come into tx with bhs on.
|
|
|
|
*/
|
|
|
|
local_bh_disable();
|
2014-11-09 23:50:09 +07:00
|
|
|
IEEE80211_SKB_CB(skb)->band = band;
|
2020-07-23 17:01:52 +07:00
|
|
|
ieee80211_xmit(sdata, NULL, skb);
|
2009-06-18 22:25:11 +07:00
|
|
|
local_bh_enable();
|
2009-06-17 22:43:56 +07:00
|
|
|
}
|
2018-03-27 00:52:50 +07:00
|
|
|
|
|
|
|
int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
const u8 *buf, size_t len,
|
2020-05-08 21:42:00 +07:00
|
|
|
const u8 *dest, __be16 proto, bool unencrypted,
|
|
|
|
u64 *cookie)
|
2018-03-27 00:52:50 +07:00
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct ethhdr *ehdr;
|
2020-03-26 21:53:34 +07:00
|
|
|
u32 ctrl_flags = 0;
|
2020-05-27 23:03:34 +07:00
|
|
|
u32 flags = 0;
|
2018-03-27 00:52:50 +07:00
|
|
|
|
|
|
|
/* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
|
|
|
|
* or Pre-Authentication
|
|
|
|
*/
|
|
|
|
if (proto != sdata->control_port_protocol &&
|
|
|
|
proto != cpu_to_be16(ETH_P_PREAUTH))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-26 21:53:34 +07:00
|
|
|
if (proto == sdata->control_port_protocol)
|
2020-06-17 15:26:36 +07:00
|
|
|
ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
|
|
|
|
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
|
2020-03-26 21:53:34 +07:00
|
|
|
|
2018-03-27 00:52:50 +07:00
|
|
|
if (unencrypted)
|
2020-05-27 23:03:34 +07:00
|
|
|
flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
|
|
|
|
|
|
if (cookie)
|
|
|
|
ctrl_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
|
|
|
|
|
|
|
|
flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
|
|
|
|
IEEE80211_TX_CTL_INJECTED;
|
2018-03-27 00:52:50 +07:00
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
|
|
|
|
sizeof(struct ethhdr) + len);
|
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr));
|
|
|
|
|
|
|
|
skb_put_data(skb, buf, len);
|
|
|
|
|
|
|
|
ehdr = skb_push(skb, sizeof(struct ethhdr));
|
|
|
|
memcpy(ehdr->h_dest, dest, ETH_ALEN);
|
2020-02-24 16:19:11 +07:00
|
|
|
memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
|
2018-03-27 00:52:50 +07:00
|
|
|
ehdr->h_proto = proto;
|
|
|
|
|
|
|
|
skb->dev = dev;
|
|
|
|
skb->protocol = htons(ETH_P_802_3);
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
2020-05-27 23:03:34 +07:00
|
|
|
/* mutex lock is only needed for incrementing the cookie counter */
|
|
|
|
mutex_lock(&local->mtx);
|
|
|
|
|
2018-06-19 22:39:50 +07:00
|
|
|
local_bh_disable();
|
2020-05-27 23:03:34 +07:00
|
|
|
__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie);
|
2018-06-19 22:39:50 +07:00
|
|
|
local_bh_enable();
|
2018-03-27 00:52:50 +07:00
|
|
|
|
2020-05-27 23:03:34 +07:00
|
|
|
mutex_unlock(&local->mtx);
|
|
|
|
|
2018-03-27 00:52:50 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2019-04-12 03:47:26 +07:00
|
|
|
|
|
|
|
int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
const u8 *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom + len +
|
|
|
|
30 + /* header size */
|
|
|
|
18); /* 11s header size */
|
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom);
|
|
|
|
skb_put_data(skb, buf, len);
|
|
|
|
|
|
|
|
skb->dev = dev;
|
|
|
|
skb->protocol = htons(ETH_P_802_3);
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
__ieee80211_subif_start_xmit(skb, skb->dev, 0,
|
2020-05-27 23:03:34 +07:00
|
|
|
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP,
|
|
|
|
NULL);
|
2018-06-19 22:39:50 +07:00
|
|
|
local_bh_enable();
|
2018-03-27 00:52:50 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|