Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem

Conflicts:
	drivers/net/wireless/iwlwifi/dvm/testmode.c
	drivers/net/wireless/iwlwifi/pcie/trans.c
This commit is contained in:
John W. Linville 2012-06-19 14:41:22 -04:00
commit b3c911eeb4
86 changed files with 4850 additions and 3073 deletions

View File

@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
}
do {
register unsigned int iobase = info->p_dev->resource[0]->start;
register unsigned int offset;
register unsigned char command;
register unsigned long ready_bit;
unsigned int iobase = info->p_dev->resource[0]->start;
unsigned int offset;
unsigned char command;
unsigned long ready_bit;
register struct sk_buff *skb;
register int len;
int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));

View File

@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
hdev->flush = bpa10x_flush;
hdev->send = bpa10x_send_frame;
set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
err = hci_register_dev(hdev);
if (err < 0) {

View File

@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
return;
do {
register unsigned int iobase = info->p_dev->resource[0]->start;
unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
int len;
if (!pcmcia_dev_present(info->p_dev))
break;

View File

@ -110,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
/* Marvell SD8787 Bluetooth AMP device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
/* Marvell SD8797 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },

View File

@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info)
}
do {
register unsigned int iobase = info->p_dev->resource[0]->start;
unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));

View File

@ -21,15 +21,7 @@
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
@ -1026,7 +1018,7 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc = usb_ifnum_to_if(data->udev, 1);
if (!reset)
set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
if (!disable_scofix)
@ -1038,7 +1030,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
}
if (id->driver_info & BTUSB_CSR) {
@ -1046,7 +1038,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Old firmware would otherwise execute USB reset */
if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
}
if (id->driver_info & BTUSB_SNIFFER) {

View File

@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
}
do {
register unsigned int iobase = info->p_dev->resource[0]->start;
unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
register int len;
int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));

View File

@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
static int bcsp_recv(struct hci_uart *hu, void *data, int count)
{
struct bcsp_struct *bcsp = hu->priv;
register unsigned char *ptr;
unsigned char *ptr;
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);

View File

@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
static inline int h4_check_data_len(struct h4_struct *h4, int len)
{
register int room = skb_tailroom(h4->rx_skb);
int room = skb_tailroom(h4->rx_skb);
BT_DBG("len %d room %d", len, room);

View File

@ -394,7 +394,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
hdev->dev_type = HCI_AMP;

View File

@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
static inline int ll_check_data_len(struct ll_struct *ll, int len)
{
register int room = skb_tailroom(ll->rx_skb);
int room = skb_tailroom(ll->rx_skb);
BT_DBG("len %d room %d", len, room);
@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len)
static int ll_recv(struct hci_uart *hu, void *data, int count)
{
struct ll_struct *ll = hu->priv;
register char *ptr;
char *ptr;
struct hci_event_hdr *eh;
struct hci_acl_hdr *ah;
struct hci_sco_hdr *sh;
register int len, type, dlen;
int len, type, dlen;
BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);

View File

@ -181,11 +181,14 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_cause = 0, async_cause;
u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
if (ath9k_hw_mci_is_enabled(ah))
async_mask |= AR_INTR_ASYNC_MASK_MCI;
async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
if (async_cause & async_mask) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);

View File

@ -321,7 +321,7 @@ void ar9003_mci_set_full_sleep(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
if (ar9003_mci_state(ah, MCI_STATE_ENABLE) &&
(mci->bt_state != MCI_BT_SLEEP) &&
!mci->halted_bt_gpm) {
ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
@ -484,7 +484,7 @@ static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 cur_bt_state;
cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP);
if (mci->bt_state != cur_bt_state)
mci->bt_state = cur_bt_state;
@ -593,8 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
if (!time_out)
break;
offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
&more_data);
offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
if (offset == MCI_GPM_INVALID)
continue;
@ -658,8 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
time_out = 0;
while (more_data == MCI_GPM_MORE) {
offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
&more_data);
offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
if (offset == MCI_GPM_INVALID)
break;
@ -893,13 +891,16 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
udelay(100);
}
/* Check pending GPM msg before MCI Reset Rx */
ar9003_mci_check_gpm_offset(ah);
regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
udelay(1);
regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
ar9003_mci_get_next_gpm_offset(ah, true, NULL);
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
(SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
@ -1010,38 +1011,32 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
}
}
void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (!mci->update_2g5g)
if (!mci->update_2g5g && !force)
return;
if (mci->is_2g) {
ar9003_mci_send_2g5g_status(ah, true);
ar9003_mci_send_lna_transfer(ah, true);
udelay(5);
REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
REG_SET_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
REG_SET_BIT(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
ar9003_mci_osla_setup(ah, true);
} else {
ar9003_mci_send_lna_take(ah, true);
udelay(5);
REG_SET_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
ar9003_mci_send_2g5g_status(ah, true);
ar9003_mci_osla_setup(ah, false);
if (!force)
ar9003_mci_send_2g5g_status(ah, true);
}
}
@ -1169,11 +1164,10 @@ void ar9003_mci_cleanup(struct ath_hw *ah)
}
EXPORT_SYMBOL(ar9003_mci_cleanup);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 value = 0, more_gpm = 0, gpm_ptr;
u32 value = 0;
u8 query_type;
switch (state_type) {
@ -1185,81 +1179,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
value = 0;
}
value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
break;
case MCI_STATE_INIT_GPM_OFFSET:
value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
mci->gpm_idx = value;
break;
case MCI_STATE_NEXT_GPM_OFFSET:
case MCI_STATE_LAST_GPM_OFFSET:
/*
* This could be useful to avoid new GPM message interrupt which
* may lead to spurious interrupt after power sleep, or multiple
* entry of ath_mci_intr().
* Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
* alleviate this effect, but clearing GPM RX interrupt bit is
* safe, because whether this is called from hw or driver code
* there must be an interrupt bit set/triggered initially
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_GPM);
gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
value = gpm_ptr;
if (value == 0)
value = mci->gpm_len - 1;
else if (value >= mci->gpm_len) {
if (value != 0xFFFF)
value = 0;
} else {
value--;
}
if (value == 0xFFFF) {
value = MCI_GPM_INVALID;
more_gpm = MCI_GPM_NOMORE;
} else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
if (gpm_ptr == mci->gpm_idx) {
value = MCI_GPM_INVALID;
more_gpm = MCI_GPM_NOMORE;
} else {
for (;;) {
u32 temp_index;
/* skip reserved GPM if any */
if (value != mci->gpm_idx)
more_gpm = MCI_GPM_MORE;
else
more_gpm = MCI_GPM_NOMORE;
temp_index = mci->gpm_idx;
mci->gpm_idx++;
if (mci->gpm_idx >=
mci->gpm_len)
mci->gpm_idx = 0;
if (ar9003_mci_is_gpm_valid(ah,
temp_index)) {
value = temp_index;
break;
}
if (more_gpm == MCI_GPM_NOMORE) {
value = MCI_GPM_INVALID;
break;
}
}
}
if (p_data)
*p_data = more_gpm;
}
if (value != MCI_GPM_INVALID)
value <<= 4;
break;
case MCI_STATE_LAST_SCHD_MSG_OFFSET:
value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
@ -1272,21 +1191,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
AR_MCI_RX_REMOTE_SLEEP) ?
MCI_BT_SLEEP : MCI_BT_AWAKE;
break;
case MCI_STATE_CONT_RSSI_POWER:
value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
break;
case MCI_STATE_CONT_PRIORITY:
value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
break;
case MCI_STATE_CONT_TXRX:
value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
break;
case MCI_STATE_BT:
value = mci->bt_state;
break;
case MCI_STATE_SET_BT_SLEEP:
mci->bt_state = MCI_BT_SLEEP;
break;
case MCI_STATE_SET_BT_AWAKE:
mci->bt_state = MCI_BT_AWAKE;
ar9003_mci_send_coex_version_query(ah, true);
@ -1295,7 +1199,7 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
if (mci->unhalt_bt_gpm)
ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
ar9003_mci_2g5g_switch(ah, true);
ar9003_mci_2g5g_switch(ah, false);
break;
case MCI_STATE_SET_BT_CAL_START:
mci->bt_state = MCI_BT_CAL_START;
@ -1319,34 +1223,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
case MCI_STATE_SEND_WLAN_COEX_VERSION:
ar9003_mci_send_coex_version_response(ah, true);
break;
case MCI_STATE_SET_BT_COEX_VERSION:
if (!p_data)
ath_dbg(common, MCI,
"MCI Set BT Coex version with NULL data!!\n");
else {
mci->bt_ver_major = (*p_data >> 8) & 0xff;
mci->bt_ver_minor = (*p_data) & 0xff;
mci->bt_version_known = true;
ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
mci->bt_ver_major, mci->bt_ver_minor);
}
break;
case MCI_STATE_SEND_WLAN_CHANNELS:
if (p_data) {
if (((mci->wlan_channels[1] & 0xffff0000) ==
(*(p_data + 1) & 0xffff0000)) &&
(mci->wlan_channels[2] == *(p_data + 2)) &&
(mci->wlan_channels[3] == *(p_data + 3)))
break;
mci->wlan_channels[0] = *p_data++;
mci->wlan_channels[1] = *p_data++;
mci->wlan_channels[2] = *p_data++;
mci->wlan_channels[3] = *p_data++;
}
mci->wlan_channels_update = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
break;
case MCI_STATE_SEND_VERSION_QUERY:
ar9003_mci_send_coex_version_query(ah, true);
break;
@ -1354,29 +1230,12 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
break;
case MCI_STATE_NEED_FLUSH_BT_INFO:
/*
* btcoex_hw.mci.unhalt_bt_gpm means whether it's
* needed to send UNHALT message. It's set whenever
* there's a request to send HALT message.
* mci_halted_bt_gpm means whether HALT message is sent
* out successfully.
*
* Checking (mci_unhalt_bt_gpm == false) instead of
* checking (ah->mci_halted_bt_gpm == false) will make
* sure currently is in UNHALT-ed mode and BT can
* respond to status query.
*/
value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
if (p_data)
mci->need_flush_btinfo = (*p_data != 0) ? true : false;
break;
case MCI_STATE_RECOVER_RX:
ar9003_mci_prep_interface(ah);
mci->query_bt = true;
mci->need_flush_btinfo = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
ar9003_mci_2g5g_switch(ah, true);
ar9003_mci_2g5g_switch(ah, false);
break;
case MCI_STATE_NEED_FTP_STOMP:
value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
@ -1404,3 +1263,154 @@ void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
/* Force another 2g5g update at next scanning */
mci->update_2g5g = true;
}
void ar9003_mci_set_power_awake(struct ath_hw *ah)
{
u32 btcoex_ctrl2, diag_sw;
int i;
u8 lna_ctrl, bt_sleep;
for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2);
if (btcoex_ctrl2 != 0xdeadbeef)
break;
udelay(AH_TIME_QUANTUM);
}
REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23)));
for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
diag_sw = REG_READ(ah, AR_DIAG_SW);
if (diag_sw != 0xdeadbeef)
break;
udelay(AH_TIME_QUANTUM);
}
REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
REG_WRITE(ah, AR_DIAG_SW, diag_sw);
if (bt_sleep && (lna_ctrl == 2)) {
REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1);
REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1);
udelay(50);
}
}
void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 offset;
/*
* This should only be called before "MAC Warm Reset" or "MCI Reset Rx".
*/
offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
if (mci->gpm_idx == offset)
return;
ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n",
mci->gpm_idx, offset);
mci->query_bt = true;
mci->need_flush_btinfo = true;
mci->gpm_idx = 0;
}
u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 offset, more_gpm = 0, gpm_ptr;
if (first) {
gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
mci->gpm_idx = gpm_ptr;
return gpm_ptr;
}
/*
* This could be useful to avoid new GPM message interrupt which
* may lead to spurious interrupt after power sleep, or multiple
* entry of ath_mci_intr().
* Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
* alleviate this effect, but clearing GPM RX interrupt bit is
* safe, because whether this is called from hw or driver code
* there must be an interrupt bit set/triggered initially
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_GPM);
gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
offset = gpm_ptr;
if (!offset)
offset = mci->gpm_len - 1;
else if (offset >= mci->gpm_len) {
if (offset != 0xFFFF)
offset = 0;
} else {
offset--;
}
if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) {
offset = MCI_GPM_INVALID;
more_gpm = MCI_GPM_NOMORE;
goto out;
}
for (;;) {
u32 temp_index;
/* skip reserved GPM if any */
if (offset != mci->gpm_idx)
more_gpm = MCI_GPM_MORE;
else
more_gpm = MCI_GPM_NOMORE;
temp_index = mci->gpm_idx;
mci->gpm_idx++;
if (mci->gpm_idx >= mci->gpm_len)
mci->gpm_idx = 0;
if (ar9003_mci_is_gpm_valid(ah, temp_index)) {
offset = temp_index;
break;
}
if (more_gpm == MCI_GPM_NOMORE) {
offset = MCI_GPM_INVALID;
break;
}
}
if (offset != MCI_GPM_INVALID)
offset <<= 4;
out:
if (more)
*more = more_gpm;
return offset;
}
EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset);
void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
mci->bt_ver_major = major;
mci->bt_ver_minor = minor;
mci->bt_version_known = true;
ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n",
mci->bt_ver_major, mci->bt_ver_minor);
}
EXPORT_SYMBOL(ar9003_mci_set_bt_version);
void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
mci->wlan_channels_update = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
}
EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);

View File

@ -189,26 +189,15 @@ enum mci_bt_state {
/* Type of state query */
enum mci_state_type {
MCI_STATE_ENABLE,
MCI_STATE_INIT_GPM_OFFSET,
MCI_STATE_NEXT_GPM_OFFSET,
MCI_STATE_LAST_GPM_OFFSET,
MCI_STATE_BT,
MCI_STATE_SET_BT_SLEEP,
MCI_STATE_SET_BT_AWAKE,
MCI_STATE_SET_BT_CAL_START,
MCI_STATE_SET_BT_CAL,
MCI_STATE_LAST_SCHD_MSG_OFFSET,
MCI_STATE_REMOTE_SLEEP,
MCI_STATE_CONT_RSSI_POWER,
MCI_STATE_CONT_PRIORITY,
MCI_STATE_CONT_TXRX,
MCI_STATE_RESET_REQ_WAKE,
MCI_STATE_SEND_WLAN_COEX_VERSION,
MCI_STATE_SET_BT_COEX_VERSION,
MCI_STATE_SEND_WLAN_CHANNELS,
MCI_STATE_SEND_VERSION_QUERY,
MCI_STATE_SEND_STATUS_QUERY,
MCI_STATE_NEED_FLUSH_BT_INFO,
MCI_STATE_SET_CONCUR_TX_PRI,
MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP,
@ -259,14 +248,15 @@ enum mci_gpm_coex_opcode {
bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
u32 *payload, u8 len, bool wait_done,
bool check_bt);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
u16 len, u32 sched_addr);
void ar9003_mci_cleanup(struct ath_hw *ah);
void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
u32 *rx_msg_intr);
void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
/*
* These functions are used by ath9k_hw.
*/
@ -277,7 +267,7 @@ void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep);
void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable);
void ar9003_mci_init_cal_done(struct ath_hw *ah);
void ar9003_mci_set_full_sleep(struct ath_hw *ah);
void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force);
void ar9003_mci_check_bt(struct ath_hw *ah);
bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@ -285,6 +275,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
bool is_full_sleep);
void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
void ar9003_mci_set_power_awake(struct ath_hw *ah);
void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
#else
@ -322,6 +315,15 @@ static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
{
}
static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
{
}
static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
{
}
static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
{
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#endif

View File

@ -52,7 +52,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
{0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},

View File

@ -698,6 +698,7 @@ struct ath_softc {
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex btcoex;
struct ath_mci_coex mci_coex;
struct work_struct mci_work;
#endif
struct ath_descdma txsdma;

View File

@ -202,7 +202,7 @@ static void ath_btcoex_period_timer(unsigned long data)
btcoex->bt_wait_time += btcoex->btcoex_period;
if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP, NULL) &&
if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
(mci->num_pan || mci->num_other_acl))
ah->btcoex_hw.mci.stomp_ftp =
(sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
@ -232,7 +232,7 @@ static void ath_btcoex_period_timer(unsigned long data)
}
ath9k_ps_restore(sc);
timer_period = btcoex->btcoex_period / 1000;
timer_period = btcoex->btcoex_period;
mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
}
@ -267,10 +267,10 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
btcoex->btcoex_period / 100;
btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
btcoex->btcoex_period / 100;
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,

View File

@ -1348,6 +1348,9 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
}
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_check_gpm_offset(ah);
REG_WRITE(ah, AR_RTC_RC, rst_flags);
REGWRITE_BUFFER_FLUSH(ah);
@ -1708,7 +1711,7 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
ath9k_hw_start_nfcal(ah, true);
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_2g5g_switch(ah, true);
ar9003_mci_2g5g_switch(ah, false);
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
@ -1912,7 +1915,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_set_dma(ah);
REG_WRITE(ah, AR_OBS, 8);
if (!ath9k_hw_mci_is_enabled(ah))
REG_WRITE(ah, AR_OBS, 8);
if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
@ -2111,6 +2115,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
AR_RTC_FORCE_WAKE_EN);
udelay(50);
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_set_power_awake(ah);
for (i = POWER_UP_TIME / 50; i > 0; i--) {
val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
if (val == AR_RTC_STATUS_ON)

View File

@ -136,6 +136,14 @@ void ath_hw_pll_work(struct work_struct *work)
u32 pll_sqsum;
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_pll_work.work);
/*
* ensure that the PLL WAR is executed only
* after the STA is associated (or) if the
* beaconing had started in interfaces that
* uses beacons.
*/
if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
return;
ath9k_ps_wakeup(sc);
pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);

View File

@ -150,6 +150,9 @@ static void __ath_cancel_work(struct ath_softc *sc)
cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
cancel_delayed_work_sync(&sc->hw_pll_work);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
cancel_work_sync(&sc->mci_work);
#endif
}
static void ath_cancel_work(struct ath_softc *sc)
@ -1033,15 +1036,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
}
}
if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
((vif->type == NL80211_IFTYPE_ADHOC) &&
sc->nvifs > 0)) {
ath_err(common, "Cannot create ADHOC interface when other"
" interfaces already exist.\n");
ret = -EINVAL;
goto out;
}
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
@ -1066,15 +1060,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
/* See if new interface type is valid. */
if ((new_type == NL80211_IFTYPE_ADHOC) &&
(sc->nvifs > 1)) {
ath_err(common, "When using ADHOC, it must be the only"
" interface.\n");
ret = -EINVAL;
goto out;
}
if (ath9k_uses_beacons(new_type) &&
!ath9k_uses_beacons(vif->type)) {
if (sc->nbcnvifs >= ATH_BCBUF) {
@ -1258,6 +1243,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
return -EINVAL;
}

View File

@ -20,7 +20,7 @@
#include "ath9k.h"
#include "mci.h"
static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 };
static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
static struct ath_mci_profile_info*
ath_mci_find_profile(struct ath_mci_profile *mci,
@ -28,11 +28,14 @@ ath_mci_find_profile(struct ath_mci_profile *mci,
{
struct ath_mci_profile_info *entry;
if (list_empty(&mci->info))
return NULL;
list_for_each_entry(entry, &mci->info, list) {
if (entry->conn_handle == info->conn_handle)
break;
return entry;
}
return entry;
return NULL;
}
static bool ath_mci_add_profile(struct ath_common *common,
@ -49,31 +52,21 @@ static bool ath_mci_add_profile(struct ath_common *common,
(info->type != MCI_GPM_COEX_PROFILE_VOICE))
return false;
entry = ath_mci_find_profile(mci, info);
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return false;
if (entry) {
memcpy(entry, info, 10);
} else {
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return false;
memcpy(entry, info, 10);
INC_PROF(mci, info);
list_add_tail(&info->list, &mci->info);
}
memcpy(entry, info, 10);
INC_PROF(mci, info);
list_add_tail(&entry->list, &mci->info);
return true;
}
static void ath_mci_del_profile(struct ath_common *common,
struct ath_mci_profile *mci,
struct ath_mci_profile_info *info)
struct ath_mci_profile_info *entry)
{
struct ath_mci_profile_info *entry;
entry = ath_mci_find_profile(mci, info);
if (!entry)
return;
@ -86,12 +79,16 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
{
struct ath_mci_profile_info *info, *tinfo;
mci->aggr_limit = 0;
if (list_empty(&mci->info))
return;
list_for_each_entry_safe(info, tinfo, &mci->info, list) {
list_del(&info->list);
DEC_PROF(mci, info);
kfree(info);
}
mci->aggr_limit = 0;
}
static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
@ -123,6 +120,8 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
goto skip_tuning;
btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
if (num_profile == 1) {
info = list_first_entry(&mci->info,
struct ath_mci_profile_info,
@ -181,12 +180,11 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
return;
btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0);
btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
btcoex->btcoex_period *= 1000;
btcoex->btcoex_no_stomp = btcoex->btcoex_period *
btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
(100 - btcoex->duty_cycle) / 100;
ath9k_hw_btcoex_enable(sc->sc_ah);
@ -197,20 +195,16 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
switch (opcode) {
case MCI_GPM_BT_CAL_REQ:
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
if (mci_hw->bt_state == MCI_BT_AWAKE) {
ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
} else {
ath_dbg(common, MCI, "MCI State mismatch: %d\n",
ar9003_mci_state(ah, MCI_STATE_BT, NULL));
}
break;
case MCI_GPM_BT_CAL_DONE:
ar9003_mci_state(ah, MCI_STATE_BT, NULL);
ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
break;
case MCI_GPM_BT_CAL_GRANT:
MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
@ -223,32 +217,42 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
}
}
static void ath9k_mci_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
ath_mci_update_scheme(sc);
}
static void ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &btcoex->mci;
struct ath_mci_profile_info *entry = NULL;
entry = ath_mci_find_profile(mci, info);
if (entry)
memcpy(entry, info, 10);
if (info->start) {
if (!ath_mci_add_profile(common, mci, info))
if (!entry && !ath_mci_add_profile(common, mci, info))
return;
} else
ath_mci_del_profile(common, mci, info);
ath_mci_del_profile(common, mci, entry);
btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
mci->aggr_limit = mci->num_sco ? 6 : 0;
if (NUM_PROF(mci)) {
btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
if (NUM_PROF(mci))
btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
} else {
else
btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
ATH_BTCOEX_STOMP_LOW;
btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
}
ath_mci_update_scheme(sc);
ieee80211_queue_work(sc->hw, &sc->mci_work);
}
static void ath_mci_process_status(struct ath_softc *sc,
@ -263,8 +267,6 @@ static void ath_mci_process_status(struct ath_softc *sc,
if (status->is_link)
return;
memset(&info, 0, sizeof(struct ath_mci_profile_info));
info.conn_handle = status->conn_handle;
if (ath_mci_find_profile(mci, &info))
return;
@ -284,7 +286,7 @@ static void ath_mci_process_status(struct ath_softc *sc,
} while (++i < ATH_MCI_MAX_PROFILE);
if (old_num_mgmt != mci->num_mgmt)
ath_mci_update_scheme(sc);
ieee80211_queue_work(sc->hw, &sc->mci_work);
}
static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@ -293,25 +295,20 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
struct ath_mci_profile_info profile_info;
struct ath_mci_profile_status profile_status;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u32 version;
u8 major;
u8 minor;
u8 major, minor;
u32 seq_num;
switch (opcode) {
case MCI_GPM_COEX_VERSION_QUERY:
version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION,
NULL);
ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
break;
case MCI_GPM_COEX_VERSION_RESPONSE:
major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
version = (major << 8) + minor;
version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION,
&version);
ar9003_mci_set_bt_version(ah, major, minor);
break;
case MCI_GPM_COEX_STATUS_QUERY:
ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL);
ar9003_mci_send_wlan_channels(ah);
break;
case MCI_GPM_COEX_BT_PROFILE_INFO:
memcpy(&profile_info,
@ -378,6 +375,7 @@ int ath_mci_setup(struct ath_softc *sc)
mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
mci->sched_buf.bf_paddr);
INIT_WORK(&sc->mci_work, ath9k_mci_work);
ath_dbg(common, MCI, "MCI Initialized\n");
return 0;
@ -405,6 +403,7 @@ void ath_mci_intr(struct ath_softc *sc)
struct ath_mci_coex *mci = &sc->mci_coex;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 mci_int, mci_int_rxmsg;
u32 offset, subtype, opcode;
u32 *pgpm;
@ -413,8 +412,8 @@ void ath_mci_intr(struct ath_softc *sc)
ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
ar9003_mci_get_next_gpm_offset(ah, true, NULL);
return;
}
@ -433,46 +432,41 @@ void ath_mci_intr(struct ath_softc *sc)
NULL, 0, true, false);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
/*
* always do this for recovery and 2G/5G toggling and LNA_TRANS
*/
ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) !=
MCI_BT_SLEEP)
ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE,
NULL);
}
if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
(ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
MCI_BT_SLEEP))
ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) !=
MCI_BT_AWAKE)
ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
NULL);
}
if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
(ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
MCI_BT_AWAKE))
mci_hw->bt_state = MCI_BT_SLEEP;
}
if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
(mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
skip_gpm = true;
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
NULL);
offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
@ -481,8 +475,8 @@ void ath_mci_intr(struct ath_softc *sc)
while (more_data == MCI_GPM_MORE) {
pgpm = mci->gpm_buf.bf_addr;
offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
&more_data);
offset = ar9003_mci_get_next_gpm_offset(ah, false,
&more_data);
if (offset == MCI_GPM_INVALID)
break;
@ -523,23 +517,17 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
int value_dbm = ar9003_mci_state(ah,
MCI_STATE_CONT_RSSI_POWER, NULL);
int value_dbm = MS(mci_hw->cont_status,
AR_MCI_CONT_RSSI_POWER);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
ath_dbg(common, MCI,
"MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n",
ar9003_mci_state(ah,
MCI_STATE_CONT_PRIORITY, NULL),
value_dbm);
else
ath_dbg(common, MCI,
"MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
ar9003_mci_state(ah,
MCI_STATE_CONT_PRIORITY, NULL),
value_dbm);
ath_dbg(common, MCI,
"MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
"tx" : "rx",
MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
value_dbm);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)

View File

@ -770,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate *rates = tx_info->control.rates;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
u8 try_per_rate, i = 0, rix, high_rix;
u8 try_per_rate, i = 0, rix;
int is_probe = 0;
if (rate_control_send_low(sta, priv_sta, txrc))
@ -791,7 +791,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
rate_table = ath_rc_priv->rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
&is_probe, false);
high_rix = rix;
/*
* If we're in HT mode and both us and our peer supports LDPC.
@ -839,16 +838,16 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
try_per_rate = 8;
/*
* Use a legacy rate as last retry to ensure that the frame
* is tried in both MCS and legacy rates.
* If the last rate in the rate series is MCS and has
* more than 80% of per thresh, then use a legacy rate
* as last retry to ensure that the frame is tried in both
* MCS and legacy rate.
*/
if ((rates[2].flags & IEEE80211_TX_RC_MCS) &&
(!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) ||
(ath_rc_priv->per[high_rix] > 45)))
ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
(ath_rc_priv->per[rix] > 45))
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
&is_probe, true);
else
ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
/* All other rates in the series have RTS enabled */
ath_rc_rate_set_series(rate_table, &rates[i], txrc,

View File

@ -2098,8 +2098,8 @@ enum {
#define AR_MCI_CONT_STATUS 0x1848
#define AR_MCI_CONT_RSSI_POWER 0x000000FF
#define AR_MCI_CONT_RSSI_POWER_S 0
#define AR_MCI_CONT_RRIORITY 0x0000FF00
#define AR_MCI_CONT_RRIORITY_S 8
#define AR_MCI_CONT_PRIORITY 0x0000FF00
#define AR_MCI_CONT_PRIORITY_S 8
#define AR_MCI_CONT_TXRX 0x00010000
#define AR_MCI_CONT_TXRX_S 16

View File

@ -34,3 +34,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
sdio_chip.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
dhd_dbg.o

View File

@ -613,6 +613,9 @@ struct brcmf_pub {
struct work_struct multicast_work;
u8 macvalue[ETH_ALEN];
atomic_t pend_8021x_cnt;
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
};
struct brcmf_if_event {

View File

@ -0,0 +1,126 @@
/*
* Copyright (c) 2012 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/if_ether.h>
#include <linux/if.h>
#include <linux/ieee80211.h>
#include <linux/module.h>
#include <defs.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
static struct dentry *root_folder;
void brcmf_debugfs_init(void)
{
root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (IS_ERR(root_folder))
root_folder = NULL;
}
void brcmf_debugfs_exit(void)
{
if (!root_folder)
return;
debugfs_remove_recursive(root_folder);
root_folder = NULL;
}
int brcmf_debugfs_attach(struct brcmf_pub *drvr)
{
if (!root_folder)
return -ENODEV;
drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder);
return PTR_RET(drvr->dbgfs_dir);
}
void brcmf_debugfs_detach(struct brcmf_pub *drvr)
{
if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
debugfs_remove_recursive(drvr->dbgfs_dir);
}
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
{
return drvr->dbgfs_dir;
}
static
ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data,
size_t count, loff_t *ppos)
{
struct brcmf_sdio_count *sdcnt = f->private_data;
char buf[750];
int res;
/* only allow read from start */
if (*ppos > 0)
return 0;
res = scnprintf(buf, sizeof(buf),
"intrcount: %u\nlastintrs: %u\n"
"pollcnt: %u\nregfails: %u\n"
"tx_sderrs: %u\nfcqueued: %u\n"
"rxrtx: %u\nrx_toolong: %u\n"
"rxc_errors: %u\nrx_hdrfail: %u\n"
"rx_badhdr: %u\nrx_badseq: %u\n"
"fc_rcvd: %u\nfc_xoff: %u\n"
"fc_xon: %u\nrxglomfail: %u\n"
"rxglomframes: %u\nrxglompkts: %u\n"
"f2rxhdrs: %u\nf2rxdata: %u\n"
"f2txdata: %u\nf1regdata: %u\n"
"tickcnt: %u\ntx_ctlerrs: %lu\n"
"tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n"
"rx_ctlpkts: %lu\nrx_readahead: %lu\n",
sdcnt->intrcount, sdcnt->lastintrs,
sdcnt->pollcnt, sdcnt->regfails,
sdcnt->tx_sderrs, sdcnt->fcqueued,
sdcnt->rxrtx, sdcnt->rx_toolong,
sdcnt->rxc_errors, sdcnt->rx_hdrfail,
sdcnt->rx_badhdr, sdcnt->rx_badseq,
sdcnt->fc_rcvd, sdcnt->fc_xoff,
sdcnt->fc_xon, sdcnt->rxglomfail,
sdcnt->rxglomframes, sdcnt->rxglompkts,
sdcnt->f2rxhdrs, sdcnt->f2rxdata,
sdcnt->f2txdata, sdcnt->f1regdata,
sdcnt->tickcnt, sdcnt->tx_ctlerrs,
sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
return simple_read_from_buffer(data, count, ppos, buf, res);
}
static const struct file_operations brcmf_debugfs_sdio_counter_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = brcmf_debugfs_sdio_counter_read
};
void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
struct brcmf_sdio_count *sdcnt)
{
struct dentry *dentry = drvr->dbgfs_dir;
if (!IS_ERR_OR_NULL(dentry))
debugfs_create_file("counters", S_IRUGO, dentry,
sdcnt, &brcmf_debugfs_sdio_counter_ops);
}

View File

@ -76,4 +76,63 @@ do { \
extern int brcmf_msg_level;
/*
* hold counter variables used in brcmfmac sdio driver.
*/
struct brcmf_sdio_count {
uint intrcount; /* Count of device interrupt callbacks */
uint lastintrs; /* Count as of last watchdog timer */
uint pollcnt; /* Count of active polls */
uint regfails; /* Count of R_REG failures */
uint tx_sderrs; /* Count of tx attempts with sd errors */
uint fcqueued; /* Tx packets that got queued */
uint rxrtx; /* Count of rtx requests (NAK to dongle) */
uint rx_toolong; /* Receive frames too long to receive */
uint rxc_errors; /* SDIO errors when reading control frames */
uint rx_hdrfail; /* SDIO errors on header reads */
uint rx_badhdr; /* Bad received headers (roosync?) */
uint rx_badseq; /* Mismatched rx sequence number */
uint fc_rcvd; /* Number of flow-control events received */
uint fc_xoff; /* Number which turned on flow-control */
uint fc_xon; /* Number which turned off flow-control */
uint rxglomfail; /* Failed deglom attempts */
uint rxglomframes; /* Number of glom frames (superframes) */
uint rxglompkts; /* Number of packets from glom frames */
uint f2rxhdrs; /* Number of header reads */
uint f2rxdata; /* Number of frame data reads */
uint f2txdata; /* Number of f2 frame writes */
uint f1regdata; /* Number of f1 register accesses */
uint tickcnt; /* Number of watchdog been schedule */
ulong tx_ctlerrs; /* Err of sending ctrl frames */
ulong tx_ctlpkts; /* Ctrl frames sent to dongle */
ulong rx_ctlerrs; /* Err of processing rx ctrl frames */
ulong rx_ctlpkts; /* Ctrl frames processed from dongle */
ulong rx_readahead_cnt; /* packets where header read-ahead was used */
};
struct brcmf_pub;
#ifdef DEBUG
void brcmf_debugfs_init(void);
void brcmf_debugfs_exit(void);
int brcmf_debugfs_attach(struct brcmf_pub *drvr);
void brcmf_debugfs_detach(struct brcmf_pub *drvr);
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
struct brcmf_sdio_count *sdcnt);
#else
static inline void brcmf_debugfs_init(void)
{
}
static inline void brcmf_debugfs_exit(void)
{
}
static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
{
return 0;
}
static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
{
}
#endif
#endif /* _BRCMF_DBG_H_ */

View File

@ -1007,6 +1007,9 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
drvr->bus_if->drvr = drvr;
drvr->dev = dev;
/* create device debugfs folder */
brcmf_debugfs_attach(drvr);
/* Attach and link in the protocol */
ret = brcmf_proto_attach(drvr);
if (ret != 0) {
@ -1123,6 +1126,7 @@ void brcmf_detach(struct device *dev)
brcmf_proto_detach(drvr);
}
brcmf_debugfs_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
}
@ -1192,6 +1196,8 @@ int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size)
static void brcmf_driver_init(struct work_struct *work)
{
brcmf_debugfs_init();
#ifdef CONFIG_BRCMFMAC_SDIO
brcmf_sdio_init();
#endif
@ -1219,6 +1225,7 @@ static void __exit brcmfmac_module_exit(void)
#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_exit();
#endif
brcmf_debugfs_exit();
}
module_init(brcmfmac_module_init);

View File

@ -502,12 +502,9 @@ struct brcmf_sdio {
bool intr; /* Use interrupts */
bool poll; /* Use polling */
bool ipend; /* Device interrupt is pending */
uint intrcount; /* Count of device interrupt callbacks */
uint lastintrs; /* Count as of last watchdog timer */
uint spurious; /* Count of spurious interrupts */
uint pollrate; /* Ticks between device polls */
uint polltick; /* Tick counter */
uint pollcnt; /* Count of active polls */
#ifdef DEBUG
uint console_interval;
@ -515,8 +512,6 @@ struct brcmf_sdio {
uint console_addr; /* Console address from shared struct */
#endif /* DEBUG */
uint regfails; /* Count of R_REG failures */
uint clkstate; /* State of sd and backplane clock(s) */
bool activity; /* Activity flag for clock down */
s32 idletime; /* Control for activity timeout */
@ -531,33 +526,6 @@ struct brcmf_sdio {
/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
bool usebufpool;
/* Some additional counters */
uint tx_sderrs; /* Count of tx attempts with sd errors */
uint fcqueued; /* Tx packets that got queued */
uint rxrtx; /* Count of rtx requests (NAK to dongle) */
uint rx_toolong; /* Receive frames too long to receive */
uint rxc_errors; /* SDIO errors when reading control frames */
uint rx_hdrfail; /* SDIO errors on header reads */
uint rx_badhdr; /* Bad received headers (roosync?) */
uint rx_badseq; /* Mismatched rx sequence number */
uint fc_rcvd; /* Number of flow-control events received */
uint fc_xoff; /* Number which turned on flow-control */
uint fc_xon; /* Number which turned off flow-control */
uint rxglomfail; /* Failed deglom attempts */
uint rxglomframes; /* Number of glom frames (superframes) */
uint rxglompkts; /* Number of packets from glom frames */
uint f2rxhdrs; /* Number of header reads */
uint f2rxdata; /* Number of frame data reads */
uint f2txdata; /* Number of f2 frame writes */
uint f1regdata; /* Number of f1 register accesses */
uint tickcnt; /* Number of watchdog been schedule */
unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
unsigned long rx_readahead_cnt; /* Number of packets where header
* read-ahead was used. */
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
bool ctrl_frame_stat;
@ -583,6 +551,7 @@ struct brcmf_sdio {
u32 fw_ptr;
bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt;
};
/* clkstate */
@ -945,7 +914,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
if (ret == 0)
w_sdreg32(bus, SMB_INT_ACK,
offsetof(struct sdpcmd_regs, tosbmailbox));
bus->f1regdata += 2;
bus->sdcnt.f1regdata += 2;
/* Dongle recomposed rx frames, accept them again */
if (hmb_data & HMB_DATA_NAKHANDLED) {
@ -984,12 +953,12 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
HMB_DATA_FCDATA_SHIFT;
if (fcbits & ~bus->flowcontrol)
bus->fc_xoff++;
bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
bus->fc_xon++;
bus->sdcnt.fc_xon++;
bus->fc_rcvd++;
bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
@ -1021,7 +990,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_RF_TERM, &err);
bus->f1regdata++;
bus->sdcnt.f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
@ -1029,7 +998,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
SBSDIO_FUNC1_RFRAMEBCHI, &err);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_RFRAMEBCLO, &err);
bus->f1regdata += 2;
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@ -1047,11 +1016,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
if (rtx) {
bus->rxrtx++;
bus->sdcnt.rxrtx++;
err = w_sdreg32(bus, SMB_NAK,
offsetof(struct sdpcmd_regs, tosbmailbox));
bus->f1regdata++;
bus->sdcnt.f1regdata++;
if (err == 0)
bus->rxskip = true;
}
@ -1243,7 +1212,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
dlen);
errcode = -1;
}
bus->f2rxdata++;
bus->sdcnt.f2rxdata++;
/* On failure, kill the superframe, allow a couple retries */
if (errcode < 0) {
@ -1256,7 +1225,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
bus->rxglomfail++;
bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
return 0;
@ -1312,7 +1281,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (rxseq != seq) {
brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
seq, rxseq);
bus->rx_badseq++;
bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@ -1376,7 +1345,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
bus->rxglomfail++;
bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
bus->nextlen = 0;
@ -1402,7 +1371,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (rxseq != seq) {
brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
seq, rxseq);
bus->rx_badseq++;
bus->sdcnt.rx_badseq++;
rxseq = seq;
}
rxseq++;
@ -1441,8 +1410,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
down(&bus->sdsem);
}
bus->rxglomframes++;
bus->rxglompkts += bus->glom.qlen;
bus->sdcnt.rxglomframes++;
bus->sdcnt.rxglompkts += bus->glom.qlen;
}
return num;
}
@ -1526,7 +1495,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
bus->sdiodev->bus_if->dstats.rx_errors++;
bus->rx_toolong++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
@ -1536,13 +1505,13 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
bus->sdiodev->sbwad,
SDIO_FUNC_2,
F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
bus->f2rxdata++;
bus->sdcnt.f2rxdata++;
/* Control frame failures need retransmission */
if (sdret < 0) {
brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
rdlen, sdret);
bus->rxc_errors++;
bus->sdcnt.rxc_errors++;
brcmf_sdbrcm_rxfail(bus, true, true);
goto done;
}
@ -1589,7 +1558,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
/* Read the entire frame */
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, *pkt);
bus->f2rxdata++;
bus->sdcnt.f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
@ -1630,7 +1599,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
if ((u16)~(*len ^ check)) {
brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
nextlen, *len, check);
bus->rx_badhdr++;
bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto fail;
}
@ -1746,7 +1715,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
bus->nextlen = 0;
}
bus->rx_readahead_cnt++;
bus->sdcnt.rx_readahead_cnt++;
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(
@ -1754,12 +1723,12 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (bus->flowcontrol != fcbits) {
if (~bus->flowcontrol & fcbits)
bus->fc_xoff++;
bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
bus->fc_xon++;
bus->sdcnt.fc_xon++;
bus->fc_rcvd++;
bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
@ -1767,7 +1736,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (rxseq != seq) {
brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
seq, rxseq);
bus->rx_badseq++;
bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@ -1814,11 +1783,11 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->rxhdr,
BRCMF_FIRSTREAD);
bus->f2rxhdrs++;
bus->sdcnt.f2rxhdrs++;
if (sdret < 0) {
brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
bus->rx_hdrfail++;
bus->sdcnt.rx_hdrfail++;
brcmf_sdbrcm_rxfail(bus, true, true);
continue;
}
@ -1840,7 +1809,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if ((u16) ~(len ^ check)) {
brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
len, check);
bus->rx_badhdr++;
bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@ -1861,7 +1830,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if ((doff < SDPCM_HDRLEN) || (doff > len)) {
brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
doff, len, SDPCM_HDRLEN, seq);
bus->rx_badhdr++;
bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@ -1880,19 +1849,19 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (bus->flowcontrol != fcbits) {
if (~bus->flowcontrol & fcbits)
bus->fc_xoff++;
bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
bus->fc_xon++;
bus->sdcnt.fc_xon++;
bus->fc_rcvd++;
bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
/* Check and update sequence number */
if (rxseq != seq) {
brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
bus->rx_badseq++;
bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@ -1937,7 +1906,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
len, rdlen);
bus->sdiodev->bus_if->dstats.rx_errors++;
bus->rx_toolong++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@ -1960,7 +1929,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
/* Read the remaining frame data */
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
bus->f2rxdata++;
bus->sdcnt.f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
@ -2147,18 +2116,18 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
bus->f2txdata++;
bus->sdcnt.f2txdata++;
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
bus->tx_sderrs++;
bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
bus->f1regdata++;
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@ -2166,7 +2135,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
@ -2224,7 +2193,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
ret = r_sdreg32(bus, &intstatus,
offsetof(struct sdpcmd_regs,
intstatus));
bus->f2txdata++;
bus->sdcnt.f2txdata++;
if (ret != 0)
break;
if (intstatus & bus->hostintmask)
@ -2417,7 +2386,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
bus->ipend = false;
err = r_sdreg32(bus, &newstatus,
offsetof(struct sdpcmd_regs, intstatus));
bus->f1regdata++;
bus->sdcnt.f1regdata++;
if (err != 0)
newstatus = 0;
newstatus &= bus->hostintmask;
@ -2426,7 +2395,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err = w_sdreg32(bus, newstatus,
offsetof(struct sdpcmd_regs,
intstatus));
bus->f1regdata++;
bus->sdcnt.f1regdata++;
}
}
@ -2445,7 +2414,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err = r_sdreg32(bus, &newstatus,
offsetof(struct sdpcmd_regs, intstatus));
bus->f1regdata += 2;
bus->sdcnt.f1regdata += 2;
bus->fcstate =
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
intstatus |= (newstatus & bus->hostintmask);
@ -2510,13 +2479,13 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
bus->tx_sderrs++;
bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, &err);
bus->f1regdata++;
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@ -2526,7 +2495,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO,
&err);
bus->f1regdata += 2;
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
@ -2657,7 +2626,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
/* Check for existing queue, current flow-control,
pending event, or pending clock */
brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
bus->fcqueued++;
bus->sdcnt.fcqueued++;
/* Priority based enq */
spin_lock_bh(&bus->txqlock);
@ -2845,13 +2814,13 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
/* On failure, abort the command and terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
bus->tx_sderrs++;
bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
bus->f1regdata++;
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@ -2859,7 +2828,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
bus->sdcnt.f1regdata += 2;
if (hi == 0 && lo == 0)
break;
}
@ -2976,13 +2945,26 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
up(&bus->sdsem);
if (ret)
bus->tx_ctlerrs++;
bus->sdcnt.tx_ctlerrs++;
else
bus->tx_ctlpkts++;
bus->sdcnt.tx_ctlpkts++;
return ret ? -EIO : 0;
}
#ifdef DEBUG
static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
{
struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
}
#else
static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
{
}
#endif /* DEBUG */
static int
brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
@ -3017,9 +2999,9 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
}
if (rxlen)
bus->rx_ctlpkts++;
bus->sdcnt.rx_ctlpkts++;
else
bus->rx_ctlerrs++;
bus->sdcnt.rx_ctlerrs++;
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
@ -3419,7 +3401,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
return 0;
/* Start the watchdog timer */
bus->tickcnt = 0;
bus->sdcnt.tickcnt = 0;
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
down(&bus->sdsem);
@ -3512,7 +3494,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
/* Count the interrupt call */
bus->intrcount++;
bus->sdcnt.intrcount++;
bus->ipend = true;
/* Shouldn't get this interrupt if we're sleeping? */
@ -3554,7 +3536,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->polltick = 0;
/* Check device if no interrupts */
if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
if (!bus->intr ||
(bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
if (!bus->dpc_sched) {
u8 devpend;
@ -3569,7 +3552,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
/* If there is something, make like the ISR and
schedule the DPC */
if (intstatus) {
bus->pollcnt++;
bus->sdcnt.pollcnt++;
bus->ipend = true;
bus->dpc_sched = true;
@ -3581,7 +3564,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
}
/* Update interrupt tracking */
bus->lastintrs = bus->intrcount;
bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
}
#ifdef DEBUG
/* Poll for console output periodically */
@ -3793,7 +3776,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
brcmf_sdbrcm_bus_watchdog(bus);
/* Count the tick for reference */
bus->tickcnt++;
bus->sdcnt.tickcnt++;
} else
break;
}
@ -3834,7 +3817,6 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
if (bus) {
/* De-register interrupt handler */
brcmf_sdio_intr_unregister(bus->sdiodev);
@ -3938,6 +3920,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
goto fail;
}
brcmf_sdio_debugfs_create(bus);
brcmf_dbg(INFO, "completed!!\n");
/* if firmware path present try to download and bring up bus */

View File

@ -735,10 +735,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
* a candidate for aggregation
*/
p = pktq_ppeek(&qi->q, prec);
/* tx_info must be checked with current p */
tx_info = IEEE80211_SKB_CB(p);
if (p) {
tx_info = IEEE80211_SKB_CB(p);
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
@ -759,6 +757,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
p = NULL;
continue;
}
/* next packet fit for aggregation so dequeue */
p = brcmu_pktq_pdeq(&qi->q, prec);
} else {
p = NULL;

View File

@ -721,14 +721,6 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush,
};
/*
* is called in brcms_bcma_probe() context, therefore no locking required.
*/
static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
{
return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
}
void brcms_dpc(unsigned long data)
{
struct brcms_info *wl;
@ -1068,9 +1060,9 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
"%d\n", __func__, err);
if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode))
wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
__func__, err);
if (wl->pub->srom_ccode[0] &&
regulatory_hint(wl->wiphy, wl->pub->srom_ccode))
wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__);
n_adapters_found++;
return wl;

View File

@ -1,7 +1,3 @@
obj-$(CONFIG_IWLDVM) += dvm/
CFLAGS_iwl-devtrace.o := -I$(src)
# common
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
iwlwifi-objs += iwl-io.o
@ -13,5 +9,11 @@ iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
CFLAGS_iwl-devtrace.o := -I$(src)

View File

@ -395,8 +395,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
}
extern int iwl_alive_start(struct iwl_priv *priv);
/* svtool */
/* testmode support */
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
int len);
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
@ -404,13 +406,16 @@ extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
struct netlink_callback *cb,
void *data, int len);
extern void iwl_testmode_init(struct iwl_priv *priv);
extern void iwl_testmode_cleanup(struct iwl_priv *priv);
extern void iwl_testmode_free(struct iwl_priv *priv);
#else
static inline
int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
return -ENOSYS;
}
static inline
int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
@ -418,12 +423,12 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
{
return -ENOSYS;
}
static inline
void iwl_testmode_init(struct iwl_priv *priv)
static inline void iwl_testmode_init(struct iwl_priv *priv)
{
}
static inline
void iwl_testmode_cleanup(struct iwl_priv *priv)
static inline void iwl_testmode_free(struct iwl_priv *priv)
{
}
#endif

View File

@ -52,6 +52,8 @@
#include "rs.h"
#include "tt.h"
#include "iwl-test.h"
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
#define CT_KILL_THRESHOLD 114 /* in Celsius */
@ -596,24 +598,6 @@ struct iwl_lib_ops {
void (*temperature)(struct iwl_priv *priv);
};
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace {
u32 buff_size;
u32 total_size;
u32 num_chunks;
u8 *cpu_addr;
u8 *trace_addr;
dma_addr_t dma_addr;
bool trace_enabled;
};
struct iwl_testmode_mem {
u32 buff_size;
u32 num_chunks;
u8 *buff_addr;
bool read_in_progress;
};
#endif
struct iwl_wipan_noa_data {
struct rcu_head rcu_head;
u32 length;
@ -670,8 +654,6 @@ struct iwl_priv {
enum ieee80211_band band;
u8 valid_contexts;
void (*pre_rx_handler)(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb);
int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
@ -895,9 +877,9 @@ struct iwl_priv {
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace testmode_trace;
struct iwl_testmode_mem testmode_mem;
struct iwl_test tst;
u32 tm_fixed_rate;
#endif

View File

@ -1265,7 +1265,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
* the mutex, this ensures we don't try to send two
* (or more) synchronous commands at a time.
*/
if (cmd->flags & CMD_SYNC)
if (!(cmd->flags & CMD_ASYNC))
lockdep_assert_held(&priv->mutex);
if (priv->ucode_owner == IWL_OWNERSHIP_TM &&

View File

@ -476,7 +476,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
}
if (priv->wowlan_sram)
_iwl_read_targ_mem_words(
_iwl_read_targ_mem_dwords(
priv->trans, 0x800000,
priv->wowlan_sram,
img->sec[IWL_UCODE_SECTION_DATA].len / 4);

View File

@ -406,7 +406,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
base = priv->device_pointers.log_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read));
iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
capacity = read.capacity;
mode = read.mode;
num_wraps = read.wrap_counter;
@ -1548,7 +1548,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
iwl_dbgfs_unregister(priv);
iwl_testmode_cleanup(priv);
iwl_testmode_free(priv);
iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
@ -1671,7 +1671,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
/*TODO: Update dbgfs with ISR error stats obtained below */
iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");

View File

@ -1124,8 +1124,6 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
void (*pre_rx_handler)(struct iwl_priv *,
struct iwl_rx_cmd_buffer *);
int err = 0;
/*
@ -1135,19 +1133,19 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
*/
iwl_notification_wait_notify(&priv->notif_wait, pkt);
/* RX data may be forwarded to userspace (using pre_rx_handler) in one
* of two cases: the first, that the user owns the uCode through
* testmode - in such case the pre_rx_handler is set and no further
* processing takes place. The other case is when the user want to
* monitor the rx w/o affecting the regular flow - the pre_rx_handler
* will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
/*
* RX data may be forwarded to userspace in one
* of two cases: the user owns the fw through testmode or when
* the user requested to monitor the rx w/o affecting the regular flow.
* In these cases the iwl_test object will handle forwarding the rx
* data to user space.
* Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
* continues.
* We need to use ACCESS_ONCE to prevent a case where the handler
* changes between the check and the call.
*/
pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler);
if (pre_rx_handler)
pre_rx_handler(priv, rxb);
iwl_test_rx(&priv->tst, rxb);
#endif
if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
/* Based on type of command response or notification,
* handle those that need handling via function in

View File

@ -60,6 +60,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@ -69,355 +70,84 @@
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <net/netlink.h>
#include "iwl-debug.h"
#include "iwl-io.h"
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-prph.h"
#include "dev.h"
#include "agn.h"
#include "testmode.h"
#include "iwl-test.h"
#include "iwl-testmode.h"
static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
struct iwl_host_cmd *cmd)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
return iwl_dvm_send_cmd(priv, cmd);
}
/* Periphery registers absolute lower bound. This is used in order to
* differentiate registery access through HBUS_TARG_PRPH_* and
* HBUS_TARG_MEM_* accesses.
*/
#define IWL_TM_ABS_PRPH_START (0xA00000)
static bool iwl_testmode_valid_hw_addr(u32 addr)
{
if (iwlagn_hw_valid_rtc_data_addr(addr))
return true;
/* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
* through the NL80211_CMD_TESTMODE channel regulated by nl80211.
* See testmode.h
*/
static
struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
addr < IWLAGN_RTC_INST_UPPER_BOUND)
return true;
[IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
[IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
return false;
}
[IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
[IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
[IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
return priv->fw->ucode_ver;
}
[IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
static struct sk_buff*
iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
}
[IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
return cfg80211_testmode_reply(skb);
}
[IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
int len)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
GFP_ATOMIC);
}
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
return cfg80211_testmode_event(skb, GFP_ATOMIC);
}
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
[IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
[IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
[IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
static struct iwl_test_ops tst_ops = {
.send_cmd = iwl_testmode_send_cmd,
.valid_hw_addr = iwl_testmode_valid_hw_addr,
.get_fw_ver = iwl_testmode_get_fw_ver,
.alloc_reply = iwl_testmode_alloc_reply,
.reply = iwl_testmode_reply,
.alloc_event = iwl_testmode_alloc_event,
.event = iwl_testmode_event,
};
/*
* See the struct iwl_rx_packet in commands.h for the format of the
* received events from the device
*/
static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (pkt)
return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
else
return 0;
}
/*
* This function multicasts the spontaneous messages from the device to the
* user space. It is invoked whenever there is a received messages
* from the device. This function is called within the ISR of the rx handlers
* in iwlagn driver.
*
* The parsing of the message content is left to the user space application,
* The message content is treated as unattacked raw data and is encapsulated
* with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
*
* @priv: the instance of iwlwifi device
* @rxb: pointer to rx data content received by the ISR
*
* See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
* For the messages multicasting to the user application, the mandatory
* TLV fields are :
* IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
* IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
*/
static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct ieee80211_hw *hw = priv->hw;
struct sk_buff *skb;
void *data;
int length;
data = rxb_addr(rxb);
length = get_event_length(rxb);
if (!data || length == 0)
return;
skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
GFP_ATOMIC);
if (skb == NULL) {
IWL_ERR(priv,
"Run out of memory for messages to user space ?\n");
return;
}
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
/* the length doesn't include len_n_flags field, so add it manually */
nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
goto nla_put_failure;
cfg80211_testmode_event(skb, GFP_ATOMIC);
return;
nla_put_failure:
kfree_skb(skb);
IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n");
}
void iwl_testmode_init(struct iwl_priv *priv)
{
priv->pre_rx_handler = NULL;
priv->testmode_trace.trace_enabled = false;
priv->testmode_mem.read_in_progress = false;
iwl_test_init(&priv->tst, priv->trans, &tst_ops);
}
static void iwl_mem_cleanup(struct iwl_priv *priv)
void iwl_testmode_free(struct iwl_priv *priv)
{
if (priv->testmode_mem.read_in_progress) {
kfree(priv->testmode_mem.buff_addr);
priv->testmode_mem.buff_addr = NULL;
priv->testmode_mem.buff_size = 0;
priv->testmode_mem.num_chunks = 0;
priv->testmode_mem.read_in_progress = false;
}
iwl_test_free(&priv->tst);
}
static void iwl_trace_cleanup(struct iwl_priv *priv)
{
if (priv->testmode_trace.trace_enabled) {
if (priv->testmode_trace.cpu_addr &&
priv->testmode_trace.dma_addr)
dma_free_coherent(priv->trans->dev,
priv->testmode_trace.total_size,
priv->testmode_trace.cpu_addr,
priv->testmode_trace.dma_addr);
priv->testmode_trace.trace_enabled = false;
priv->testmode_trace.cpu_addr = NULL;
priv->testmode_trace.trace_addr = NULL;
priv->testmode_trace.dma_addr = 0;
priv->testmode_trace.buff_size = 0;
priv->testmode_trace.total_size = 0;
}
}
void iwl_testmode_cleanup(struct iwl_priv *priv)
{
iwl_trace_cleanup(priv);
iwl_mem_cleanup(priv);
}
/*
* This function handles the user application commands to the ucode.
*
* It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
* IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
* host command to the ucode.
*
* If any mandatory field is missing, -ENOMSG is replied to the user space
* application; otherwise, waits for the host command to be sent and checks
* the return code. In case or error, it is returned, otherwise a reply is
* allocated and the reply RX packet
* is returned.
*
* @hw: ieee80211_hw object that represents the device
* @tb: gnl message fields from the user space
*/
static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_host_cmd cmd;
struct iwl_rx_packet *pkt;
struct sk_buff *skb;
void *reply_buf;
u32 reply_len;
int ret;
bool cmd_want_skb;
memset(&cmd, 0, sizeof(struct iwl_host_cmd));
if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
!tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
IWL_ERR(priv, "Missing ucode command mandatory fields\n");
return -ENOMSG;
}
cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
if (cmd_want_skb)
cmd.flags |= CMD_WANT_SKB;
cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret) {
IWL_ERR(priv, "Failed to send hcmd\n");
return ret;
}
if (!cmd_want_skb)
return ret;
/* Handling return of SKB to the user */
pkt = cmd.resp_pkt;
if (!pkt) {
IWL_ERR(priv, "HCMD received a null response packet\n");
return ret;
}
reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20);
reply_buf = kmalloc(reply_len, GFP_KERNEL);
if (!skb || !reply_buf) {
kfree_skb(skb);
kfree(reply_buf);
return -ENOMEM;
}
/* The reply is in a page, that we cannot send to user space. */
memcpy(reply_buf, &(pkt->hdr), reply_len);
iwl_free_resp(&cmd);
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
goto nla_put_failure;
return cfg80211_testmode_reply(skb);
nla_put_failure:
IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n");
return -ENOMSG;
}
/*
* This function handles the user application commands for register access.
*
* It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
* handlers respectively.
*
* If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
* mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
* IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
* the success of the command execution.
*
* If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
* value is returned with IWL_TM_ATTR_REG_VALUE32.
*
* @hw: ieee80211_hw object that represents the device
* @tb: gnl message fields from the user space
*/
static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
u32 ofs, val32, cmd;
u8 val8;
struct sk_buff *skb;
int status = 0;
if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
IWL_ERR(priv, "Missing register offset\n");
return -ENOMSG;
}
ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
/* Allow access only to FH/CSR/HBUS in direct mode.
Since we don't have the upper bounds for the CSR and HBUS segments,
we will use only the upper bound of FH for sanity check. */
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
(ofs >= FH_MEM_UPPER_BOUND)) {
IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n",
FH_MEM_UPPER_BOUND);
return -EINVAL;
}
switch (cmd) {
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
val32 = iwl_read_direct32(priv->trans, ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
if (!skb) {
IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
goto nla_put_failure;
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
IWL_ERR(priv, "Missing value to write\n");
return -ENOMSG;
} else {
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
iwl_write_direct32(priv->trans, ofs, val32);
}
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
IWL_ERR(priv, "Missing value to write\n");
return -ENOMSG;
} else {
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
iwl_write8(priv->trans, ofs, val8);
}
break;
default:
IWL_ERR(priv, "Unknown testmode register command ID\n");
return -ENOSYS;
}
return status;
nla_put_failure:
kfree_skb(skb);
return -EMSGSIZE;
}
static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
{
struct iwl_notification_wait calib_wait;
@ -469,7 +199,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
struct sk_buff *skb;
unsigned char *rsp_data_ptr = NULL;
int status = 0, rsp_data_len = 0;
u32 devid, inst_size = 0, data_size = 0;
u32 inst_size = 0, data_size = 0;
const struct fw_img *img;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
@ -563,39 +293,6 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
break;
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
IWL_INFO(priv, "uCode version raw: 0x%x\n",
priv->fw->ucode_ver);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
if (!skb) {
IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
priv->fw->ucode_ver))
goto nla_put_failure;
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
devid = priv->trans->hw_id;
IWL_INFO(priv, "hw version: 0x%x\n", devid);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
if (!skb) {
IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
goto nla_put_failure;
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
if (!skb) {
@ -630,125 +327,6 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
return -EMSGSIZE;
}
/*
* This function handles the user application commands for uCode trace
*
* It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
* handlers respectively.
*
* If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
* value of the actual command execution is replied to the user application.
*
* @hw: ieee80211_hw object that represents the device
* @tb: gnl message fields from the user space
*/
static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct sk_buff *skb;
int status = 0;
struct device *dev = priv->trans->dev;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
if (priv->testmode_trace.trace_enabled)
return -EBUSY;
if (!tb[IWL_TM_ATTR_TRACE_SIZE])
priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
else
priv->testmode_trace.buff_size =
nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
if (!priv->testmode_trace.buff_size)
return -EINVAL;
if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
return -EINVAL;
priv->testmode_trace.total_size =
priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
priv->testmode_trace.cpu_addr =
dma_alloc_coherent(dev,
priv->testmode_trace.total_size,
&priv->testmode_trace.dma_addr,
GFP_KERNEL);
if (!priv->testmode_trace.cpu_addr)
return -ENOMEM;
priv->testmode_trace.trace_enabled = true;
priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
priv->testmode_trace.cpu_addr, 0x100);
memset(priv->testmode_trace.trace_addr, 0x03B,
priv->testmode_trace.buff_size);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
sizeof(priv->testmode_trace.dma_addr) + 20);
if (!skb) {
IWL_ERR(priv, "Memory allocation fail\n");
iwl_trace_cleanup(priv);
return -ENOMEM;
}
if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
sizeof(priv->testmode_trace.dma_addr),
(u64 *)&priv->testmode_trace.dma_addr))
goto nla_put_failure;
status = cfg80211_testmode_reply(skb);
if (status < 0) {
IWL_ERR(priv, "Error sending msg : %d\n", status);
}
priv->testmode_trace.num_chunks =
DIV_ROUND_UP(priv->testmode_trace.buff_size,
DUMP_CHUNK_SIZE);
break;
case IWL_TM_CMD_APP2DEV_END_TRACE:
iwl_trace_cleanup(priv);
break;
default:
IWL_ERR(priv, "Unknown testmode mem command ID\n");
return -ENOSYS;
}
return status;
nla_put_failure:
kfree_skb(skb);
if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
iwl_trace_cleanup(priv);
return -EMSGSIZE;
}
static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct netlink_callback *cb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int idx, length;
if (priv->testmode_trace.trace_enabled &&
priv->testmode_trace.trace_addr) {
idx = cb->args[4];
if (idx >= priv->testmode_trace.num_chunks)
return -ENOENT;
length = DUMP_CHUNK_SIZE;
if (((idx + 1) == priv->testmode_trace.num_chunks) &&
(priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
length = priv->testmode_trace.buff_size %
DUMP_CHUNK_SIZE;
if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
priv->testmode_trace.trace_addr +
(DUMP_CHUNK_SIZE * idx)))
goto nla_put_failure;
idx++;
cb->args[4] = idx;
return 0;
} else
return -EFAULT;
nla_put_failure:
return -ENOBUFS;
}
/*
* This function handles the user application switch ucode ownership.
*
@ -777,10 +355,10 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
if (owner == IWL_OWNERSHIP_DRIVER) {
priv->ucode_owner = owner;
priv->pre_rx_handler = NULL;
iwl_test_enable_notifications(&priv->tst, false);
} else if (owner == IWL_OWNERSHIP_TM) {
priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
priv->ucode_owner = owner;
iwl_test_enable_notifications(&priv->tst, true);
} else {
IWL_ERR(priv, "Invalid owner\n");
return -EINVAL;
@ -788,180 +366,6 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
return 0;
}
static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
{
struct iwl_trans *trans = priv->trans;
unsigned long flags;
int i;
if (size & 0x3)
return -EINVAL;
priv->testmode_mem.buff_size = size;
priv->testmode_mem.buff_addr =
kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL);
if (priv->testmode_mem.buff_addr == NULL)
return -ENOMEM;
/* Hard-coded periphery absolute address */
if (IWL_TM_ABS_PRPH_START <= addr &&
addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans);
iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
addr | (3 << 24));
for (i = 0; i < size; i += 4)
*(u32 *)(priv->testmode_mem.buff_addr + i) =
iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
iwl_release_nic_access(trans);
spin_unlock_irqrestore(&trans->reg_lock, flags);
} else { /* target memory (SRAM) */
_iwl_read_targ_mem_words(trans, addr,
priv->testmode_mem.buff_addr,
priv->testmode_mem.buff_size / 4);
}
priv->testmode_mem.num_chunks =
DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE);
priv->testmode_mem.read_in_progress = true;
return 0;
}
static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
u32 size, unsigned char *buf)
{
struct iwl_trans *trans = priv->trans;
u32 val, i;
unsigned long flags;
if (IWL_TM_ABS_PRPH_START <= addr &&
addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
/* Periphery writes can be 1-3 bytes long, or DWORDs */
if (size < 4) {
memcpy(&val, buf, size);
spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans);
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
(addr & 0x0000FFFF) |
((size - 1) << 24));
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
iwl_release_nic_access(trans);
/* needed after consecutive writes w/o read */
mmiowb();
spin_unlock_irqrestore(&trans->reg_lock, flags);
} else {
if (size % 4)
return -EINVAL;
for (i = 0; i < size; i += 4)
iwl_write_prph(trans, addr+i,
*(u32 *)(buf+i));
}
} else if (iwlagn_hw_valid_rtc_data_addr(addr) ||
(IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
addr < IWLAGN_RTC_INST_UPPER_BOUND)) {
_iwl_write_targ_mem_words(trans, addr, buf, size/4);
} else
return -EINVAL;
return 0;
}
/*
* This function handles the user application commands for SRAM data dump
*
* It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
* IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
*
* Several error will be retured, -EBUSY if the SRAM data retrieved by
* previous command has not been delivered to userspace, or -ENOMSG if
* the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
* are missing, or -ENOMEM if the buffer allocation fails.
*
* Otherwise 0 is replied indicating the success of the SRAM reading.
*
* @hw: ieee80211_hw object that represents the device
* @tb: gnl message fields from the user space
*/
static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw,
struct nlattr **tb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
u32 addr, size, cmd;
unsigned char *buf;
/* Both read and write should be blocked, for atomicity */
if (priv->testmode_mem.read_in_progress)
return -EBUSY;
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
IWL_ERR(priv, "Error finding memory offset address\n");
return -ENOMSG;
}
addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
IWL_ERR(priv, "Error finding size for memory reading\n");
return -ENOMSG;
}
size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ)
return iwl_testmode_indirect_read(priv, addr, size);
else {
if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
return -EINVAL;
buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
return iwl_testmode_indirect_write(priv, addr, size, buf);
}
}
static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct netlink_callback *cb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int idx, length;
if (priv->testmode_mem.read_in_progress) {
idx = cb->args[4];
if (idx >= priv->testmode_mem.num_chunks) {
iwl_mem_cleanup(priv);
return -ENOENT;
}
length = DUMP_CHUNK_SIZE;
if (((idx + 1) == priv->testmode_mem.num_chunks) &&
(priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE))
length = priv->testmode_mem.buff_size %
DUMP_CHUNK_SIZE;
if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
priv->testmode_mem.buff_addr +
(DUMP_CHUNK_SIZE * idx)))
goto nla_put_failure;
idx++;
cb->args[4] = idx;
return 0;
} else
return -EFAULT;
nla_put_failure:
return -ENOBUFS;
}
static int iwl_testmode_notifications(struct ieee80211_hw *hw,
struct nlattr **tb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
bool enable;
enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
if (enable)
priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
else
priv->pre_rx_handler = NULL;
return 0;
}
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
* invoke the corresponding handlers.
@ -987,32 +391,27 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int result;
result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
iwl_testmode_gnl_msg_policy);
if (result != 0) {
IWL_ERR(priv, "Error parsing the gnl message : %d\n", result);
result = iwl_test_parse(&priv->tst, tb, data, len);
if (result)
return result;
}
/* IWL_TM_ATTR_COMMAND is absolutely mandatory */
if (!tb[IWL_TM_ATTR_COMMAND]) {
IWL_ERR(priv, "Missing testmode command type\n");
return -ENOMSG;
}
/* in case multiple accesses to the device happens */
mutex_lock(&priv->mutex);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_UCODE:
IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
result = iwl_testmode_ucode(hw, tb);
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
result = iwl_testmode_reg(hw, tb);
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
case IWL_TM_CMD_APP2DEV_END_TRACE:
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
result = iwl_test_handle_cmd(&priv->tst, tb);
break;
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
@ -1020,45 +419,25 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
result = iwl_testmode_driver(hw, tb);
break;
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
case IWL_TM_CMD_APP2DEV_END_TRACE:
case IWL_TM_CMD_APP2DEV_READ_TRACE:
IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
result = iwl_testmode_trace(hw, tb);
break;
case IWL_TM_CMD_APP2DEV_OWNERSHIP:
IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
result = iwl_testmode_ownership(hw, tb);
break;
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
IWL_DEBUG_INFO(priv, "testmode indirect memory cmd "
"to driver\n");
result = iwl_testmode_indirect_mem(hw, tb);
break;
case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
IWL_DEBUG_INFO(priv, "testmode notifications cmd "
"to driver\n");
result = iwl_testmode_notifications(hw, tb);
break;
default:
IWL_ERR(priv, "Unknown testmode command\n");
result = -ENOSYS;
break;
}
mutex_unlock(&priv->mutex);
if (result)
IWL_ERR(priv, "Test cmd failed result=%d\n", result);
return result;
}
@ -1066,7 +445,6 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len)
{
struct nlattr *tb[IWL_TM_ATTR_MAX];
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int result;
u32 cmd;
@ -1075,39 +453,19 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
/* offset by 1 since commands start at 0 */
cmd = cb->args[3] - 1;
} else {
result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
iwl_testmode_gnl_msg_policy);
if (result) {
IWL_ERR(priv,
"Error parsing the gnl message : %d\n", result);
return result;
}
struct nlattr *tb[IWL_TM_ATTR_MAX];
result = iwl_test_parse(&priv->tst, tb, data, len);
if (result)
return result;
/* IWL_TM_ATTR_COMMAND is absolutely mandatory */
if (!tb[IWL_TM_ATTR_COMMAND]) {
IWL_ERR(priv, "Missing testmode command type\n");
return -ENOMSG;
}
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
cb->args[3] = cmd + 1;
}
/* in case multiple accesses to the device happens */
mutex_lock(&priv->mutex);
switch (cmd) {
case IWL_TM_CMD_APP2DEV_READ_TRACE:
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
result = iwl_testmode_trace_dump(hw, skb, cb);
break;
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
result = iwl_testmode_buffer_dump(hw, skb, cb);
break;
default:
result = -EINVAL;
break;
}
result = iwl_test_dump(&priv->tst, cmd, skb, cb);
mutex_unlock(&priv->mutex);
return result;
}

View File

@ -403,6 +403,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
info->driver_data[0] = ctx;
info->driver_data[1] = dev_cmd;
/* From now on, we cannot access info->control */
spin_lock(&priv->sta_lock);
@ -1182,7 +1183,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
}
/*we can free until ssn % q.n_bd not inclusive */
WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs));
WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
txq_id, ssn, &skbs));
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;

View File

@ -131,6 +131,8 @@ struct iwl_drv {
#define DVM_OP_MODE 0
#define MVM_OP_MODE 1
/* Protects the table contents, i.e. the ops pointer & drv list */
static struct mutex iwlwifi_opmode_table_mtx;
static struct iwlwifi_opmode_table {
const char *name; /* name: iwldvm, iwlmvm, etc */
const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
@ -776,6 +778,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
const unsigned int api_min = drv->cfg->ucode_api_min;
u32 api_ver;
int i;
bool load_module = false;
fw->ucode_capa.max_probe_length = 200;
fw->ucode_capa.standard_phy_calibration_size =
@ -898,6 +901,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
mutex_lock(&iwlwifi_opmode_table_mtx);
op = &iwlwifi_opmode_table[DVM_OP_MODE];
/* add this device to the list of devices using this op_mode */
@ -907,11 +911,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
const struct iwl_op_mode_ops *ops = op->ops;
drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
if (!drv->op_mode)
if (!drv->op_mode) {
mutex_unlock(&iwlwifi_opmode_table_mtx);
goto out_unbind;
}
} else {
request_module_nowait("%s", op->name);
load_module = true;
}
mutex_unlock(&iwlwifi_opmode_table_mtx);
/*
* Complete the firmware request last so that
@ -919,6 +926,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* are doing the start() above.
*/
complete(&drv->request_firmware_complete);
/*
* Load the module last so we don't block anything
* else from proceeding if the module fails to load
* or hangs loading.
*/
if (load_module)
request_module("%s", op->name);
return;
try_again:
@ -952,6 +967,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
drv->cfg = cfg;
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
ret = iwl_request_firmware(drv, true);
@ -974,6 +990,16 @@ void iwl_drv_stop(struct iwl_drv *drv)
iwl_dealloc_ucode(drv);
mutex_lock(&iwlwifi_opmode_table_mtx);
/*
* List is empty (this item wasn't added)
* when firmware loading failed -- in that
* case we can't remove it from any list.
*/
if (!list_empty(&drv->list))
list_del(&drv->list);
mutex_unlock(&iwlwifi_opmode_table_mtx);
kfree(drv);
}
@ -996,6 +1022,7 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
int i;
struct iwl_drv *drv;
mutex_lock(&iwlwifi_opmode_table_mtx);
for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
if (strcmp(iwlwifi_opmode_table[i].name, name))
continue;
@ -1003,8 +1030,10 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
drv->op_mode = ops->start(drv->trans, drv->cfg,
&drv->fw);
mutex_unlock(&iwlwifi_opmode_table_mtx);
return 0;
}
mutex_unlock(&iwlwifi_opmode_table_mtx);
return -EIO;
}
EXPORT_SYMBOL_GPL(iwl_opmode_register);
@ -1014,6 +1043,7 @@ void iwl_opmode_deregister(const char *name)
int i;
struct iwl_drv *drv;
mutex_lock(&iwlwifi_opmode_table_mtx);
for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
if (strcmp(iwlwifi_opmode_table[i].name, name))
continue;
@ -1026,8 +1056,10 @@ void iwl_opmode_deregister(const char *name)
drv->op_mode = NULL;
}
}
mutex_unlock(&iwlwifi_opmode_table_mtx);
return;
}
mutex_unlock(&iwlwifi_opmode_table_mtx);
}
EXPORT_SYMBOL_GPL(iwl_opmode_deregister);
@ -1035,6 +1067,8 @@ static int __init iwl_drv_init(void)
{
int i;
mutex_init(&iwlwifi_opmode_table_mtx);
for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);

View File

@ -421,6 +421,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
(FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
/* Instruct FH to increment the retry count of a packet when
* it is brought from the memory to TX-FIFO
*/

View File

@ -298,8 +298,8 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
}
EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words)
void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
unsigned long flags;
int offs;
@ -308,26 +308,26 @@ void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
for (offs = 0; offs < words; offs++)
for (offs = 0; offs < dwords; offs++)
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_words);
EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
{
u32 value;
_iwl_read_targ_mem_words(trans, addr, &value, 1);
_iwl_read_targ_mem_dwords(trans, addr, &value, 1);
return value;
}
EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words)
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
unsigned long flags;
int offs, result = 0;
@ -336,7 +336,7 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
for (offs = 0; offs < words; offs++)
for (offs = 0; offs < dwords; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_release_nic_access(trans);
} else
@ -345,10 +345,10 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
return result;
}
EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_words);
EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
{
return _iwl_write_targ_mem_words(trans, addr, &val, 1);
return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
}
EXPORT_SYMBOL_GPL(iwl_write_targ_mem);

View File

@ -76,18 +76,18 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words);
void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \
#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
do { \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
_iwl_read_targ_mem_words(trans, addr, buf, \
(bufsize) / sizeof(u32));\
_iwl_read_targ_mem_dwords(trans, addr, buf, \
(bufsize) / sizeof(u32));\
} while (0)
int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words);
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);

View File

@ -0,0 +1,856 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/export.h>
#include <net/netlink.h>
#include "iwl-io.h"
#include "iwl-fh.h"
#include "iwl-prph.h"
#include "iwl-trans.h"
#include "iwl-test.h"
#include "iwl-csr.h"
#include "iwl-testmode.h"
/*
* Periphery registers absolute lower bound. This is used in order to
* differentiate registery access through HBUS_TARG_PRPH_* and
* HBUS_TARG_MEM_* accesses.
*/
#define IWL_ABS_PRPH_START (0xA00000)
/*
* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
* through the NL80211_CMD_TESTMODE channel regulated by nl80211.
* See iwl-testmode.h
*/
static
struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
[IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
[IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
[IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
[IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
[IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
[IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
[IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
[IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
};
static inline void iwl_test_trace_clear(struct iwl_test *tst)
{
memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
}
static void iwl_test_trace_stop(struct iwl_test *tst)
{
if (!tst->trace.enabled)
return;
if (tst->trace.cpu_addr && tst->trace.dma_addr)
dma_free_coherent(tst->trans->dev,
tst->trace.tsize,
tst->trace.cpu_addr,
tst->trace.dma_addr);
iwl_test_trace_clear(tst);
}
static inline void iwl_test_mem_clear(struct iwl_test *tst)
{
memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
}
static inline void iwl_test_mem_stop(struct iwl_test *tst)
{
if (!tst->mem.in_read)
return;
iwl_test_mem_clear(tst);
}
/*
* Initializes the test object
* During the lifetime of the test object it is assumed that the transport is
* started. The test object should be stopped before the transport is stopped.
*/
void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
struct iwl_test_ops *ops)
{
tst->trans = trans;
tst->ops = ops;
iwl_test_trace_clear(tst);
iwl_test_mem_clear(tst);
}
EXPORT_SYMBOL_GPL(iwl_test_init);
/*
* Stop the test object
*/
void iwl_test_free(struct iwl_test *tst)
{
iwl_test_mem_stop(tst);
iwl_test_trace_stop(tst);
}
EXPORT_SYMBOL_GPL(iwl_test_free);
static inline int iwl_test_send_cmd(struct iwl_test *tst,
struct iwl_host_cmd *cmd)
{
return tst->ops->send_cmd(tst->trans->op_mode, cmd);
}
static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
{
return tst->ops->valid_hw_addr(addr);
}
static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
{
return tst->ops->get_fw_ver(tst->trans->op_mode);
}
static inline struct sk_buff*
iwl_test_alloc_reply(struct iwl_test *tst, int len)
{
return tst->ops->alloc_reply(tst->trans->op_mode, len);
}
static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
{
return tst->ops->reply(tst->trans->op_mode, skb);
}
static inline struct sk_buff*
iwl_test_alloc_event(struct iwl_test *tst, int len)
{
return tst->ops->alloc_event(tst->trans->op_mode, len);
}
static inline void
iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
{
return tst->ops->event(tst->trans->op_mode, skb);
}
/*
* This function handles the user application commands to the fw. The fw
* commands are sent in a synchronuous manner. In case that the user requested
* to get commands response, it is send to the user.
*/
static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
{
struct iwl_host_cmd cmd;
struct iwl_rx_packet *pkt;
struct sk_buff *skb;
void *reply_buf;
u32 reply_len;
int ret;
bool cmd_want_skb;
memset(&cmd, 0, sizeof(struct iwl_host_cmd));
if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
!tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
return -ENOMSG;
}
cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
if (cmd_want_skb)
cmd.flags |= CMD_WANT_SKB;
cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
cmd.id, cmd.flags, cmd.len[0]);
ret = iwl_test_send_cmd(tst, &cmd);
if (ret) {
IWL_ERR(tst->trans, "Failed to send hcmd\n");
return ret;
}
if (!cmd_want_skb)
return ret;
/* Handling return of SKB to the user */
pkt = cmd.resp_pkt;
if (!pkt) {
IWL_ERR(tst->trans, "HCMD received a null response packet\n");
return ret;
}
reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
skb = iwl_test_alloc_reply(tst, reply_len + 20);
reply_buf = kmalloc(reply_len, GFP_KERNEL);
if (!skb || !reply_buf) {
kfree_skb(skb);
kfree(reply_buf);
return -ENOMEM;
}
/* The reply is in a page, that we cannot send to user space. */
memcpy(reply_buf, &(pkt->hdr), reply_len);
iwl_free_resp(&cmd);
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
goto nla_put_failure;
return iwl_test_reply(tst, skb);
nla_put_failure:
IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
kfree(reply_buf);
kfree_skb(skb);
return -ENOMSG;
}
/*
* Handles the user application commands for register access.
*/
static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
{
u32 ofs, val32, cmd;
u8 val8;
struct sk_buff *skb;
int status = 0;
struct iwl_trans *trans = tst->trans;
if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
IWL_ERR(trans, "Missing reg offset\n");
return -ENOMSG;
}
ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
/*
* Allow access only to FH/CSR/HBUS in direct mode.
* Since we don't have the upper bounds for the CSR and HBUS segments,
* we will use only the upper bound of FH for sanity check.
*/
if (ofs >= FH_MEM_UPPER_BOUND) {
IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
FH_MEM_UPPER_BOUND);
return -EINVAL;
}
switch (cmd) {
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
val32 = iwl_read_direct32(tst->trans, ofs);
IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
skb = iwl_test_alloc_reply(tst, 20);
if (!skb) {
IWL_ERR(trans, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
goto nla_put_failure;
status = iwl_test_reply(tst, skb);
if (status < 0)
IWL_ERR(trans, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
IWL_ERR(trans, "Missing value to write\n");
return -ENOMSG;
} else {
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
iwl_write_direct32(tst->trans, ofs, val32);
}
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
IWL_ERR(trans, "Missing value to write\n");
return -ENOMSG;
} else {
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
iwl_write8(tst->trans, ofs, val8);
}
break;
default:
IWL_ERR(trans, "Unknown test register cmd ID\n");
return -ENOMSG;
}
return status;
nla_put_failure:
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Handles the request to start FW tracing. Allocates of the trace buffer
* and sends a reply to user space with the address of the allocated buffer.
*/
static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
{
struct sk_buff *skb;
int status = 0;
if (tst->trace.enabled)
return -EBUSY;
if (!tb[IWL_TM_ATTR_TRACE_SIZE])
tst->trace.size = TRACE_BUFF_SIZE_DEF;
else
tst->trace.size =
nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
if (!tst->trace.size)
return -EINVAL;
if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
tst->trace.size > TRACE_BUFF_SIZE_MAX)
return -EINVAL;
tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
tst->trace.tsize,
&tst->trace.dma_addr,
GFP_KERNEL);
if (!tst->trace.cpu_addr)
return -ENOMEM;
tst->trace.enabled = true;
tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
if (!skb) {
IWL_ERR(tst->trans, "Memory allocation fail\n");
iwl_test_trace_stop(tst);
return -ENOMEM;
}
if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
sizeof(tst->trace.dma_addr),
(u64 *)&tst->trace.dma_addr))
goto nla_put_failure;
status = iwl_test_reply(tst, skb);
if (status < 0)
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
DUMP_CHUNK_SIZE);
return status;
nla_put_failure:
kfree_skb(skb);
if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
iwl_test_trace_stop(tst);
return -EMSGSIZE;
}
/*
* Handles indirect read from the periphery or the SRAM. The read is performed
* to a temporary buffer. The user space application should later issue a dump
*/
static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
{
struct iwl_trans *trans = tst->trans;
unsigned long flags;
int i;
if (size & 0x3)
return -EINVAL;
tst->mem.size = size;
tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
if (tst->mem.addr == NULL)
return -ENOMEM;
/* Hard-coded periphery absolute address */
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans);
iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
addr | (3 << 24));
for (i = 0; i < size; i += 4)
*(u32 *)(tst->mem.addr + i) =
iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
iwl_release_nic_access(trans);
spin_unlock_irqrestore(&trans->reg_lock, flags);
} else { /* target memory (SRAM) */
_iwl_read_targ_mem_dwords(trans, addr,
tst->mem.addr,
tst->mem.size / 4);
}
tst->mem.nchunks =
DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
tst->mem.in_read = true;
return 0;
}
/*
* Handles indirect write to the periphery or SRAM. The is performed to a
* temporary buffer.
*/
static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
u32 size, unsigned char *buf)
{
struct iwl_trans *trans = tst->trans;
u32 val, i;
unsigned long flags;
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
/* Periphery writes can be 1-3 bytes long, or DWORDs */
if (size < 4) {
memcpy(&val, buf, size);
spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans);
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
(addr & 0x0000FFFF) |
((size - 1) << 24));
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
iwl_release_nic_access(trans);
/* needed after consecutive writes w/o read */
mmiowb();
spin_unlock_irqrestore(&trans->reg_lock, flags);
} else {
if (size % 4)
return -EINVAL;
for (i = 0; i < size; i += 4)
iwl_write_prph(trans, addr+i,
*(u32 *)(buf+i));
}
} else if (iwl_test_valid_hw_addr(tst, addr)) {
_iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
} else {
return -EINVAL;
}
return 0;
}
/*
* Handles the user application commands for indirect read/write
* to/from the periphery or the SRAM.
*/
static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
{
u32 addr, size, cmd;
unsigned char *buf;
/* Both read and write should be blocked, for atomicity */
if (tst->mem.in_read)
return -EBUSY;
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
IWL_ERR(tst->trans, "Error finding memory offset address\n");
return -ENOMSG;
}
addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
IWL_ERR(tst->trans, "Error finding size for memory reading\n");
return -ENOMSG;
}
size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
return iwl_test_indirect_read(tst, addr, size);
} else {
if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
return -EINVAL;
buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
return iwl_test_indirect_write(tst, addr, size, buf);
}
}
/*
* Enable notifications to user space
*/
static int iwl_test_notifications(struct iwl_test *tst,
struct nlattr **tb)
{
tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
return 0;
}
/*
* Handles the request to get the device id
*/
static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
{
u32 devid = tst->trans->hw_id;
struct sk_buff *skb;
int status;
IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
skb = iwl_test_alloc_reply(tst, 20);
if (!skb) {
IWL_ERR(tst->trans, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
goto nla_put_failure;
status = iwl_test_reply(tst, skb);
if (status < 0)
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
return 0;
nla_put_failure:
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Handles the request to get the FW version
*/
static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
{
struct sk_buff *skb;
int status;
u32 ver = iwl_test_fw_ver(tst);
IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
skb = iwl_test_alloc_reply(tst, 20);
if (!skb) {
IWL_ERR(tst->trans, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
goto nla_put_failure;
status = iwl_test_reply(tst, skb);
if (status < 0)
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
return 0;
nla_put_failure:
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
*/
int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
void *data, int len)
{
int result;
result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
iwl_testmode_gnl_msg_policy);
if (result) {
IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
return result;
}
/* IWL_TM_ATTR_COMMAND is absolutely mandatory */
if (!tb[IWL_TM_ATTR_COMMAND]) {
IWL_ERR(tst->trans, "Missing testmode command type\n");
return -ENOMSG;
}
return 0;
}
EXPORT_SYMBOL_GPL(iwl_test_parse);
/*
* Handle test commands.
* Returns 1 for unknown commands (not handled by the test object); negative
* value in case of error.
*/
int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
{
int result;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_UCODE:
IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
result = iwl_test_fw_cmd(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
result = iwl_test_reg(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
result = iwl_test_trace_begin(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_END_TRACE:
iwl_test_trace_stop(tst);
result = 0;
break;
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
result = iwl_test_indirect_mem(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
result = iwl_test_notifications(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
result = iwl_test_get_fw_ver(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
result = iwl_test_get_dev_id(tst, tb);
break;
default:
IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
result = 1;
break;
}
return result;
}
EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx, length;
if (!tst->trace.enabled || !tst->trace.trace_addr)
return -EFAULT;
idx = cb->args[4];
if (idx >= tst->trace.nchunks)
return -ENOENT;
length = DUMP_CHUNK_SIZE;
if (((idx + 1) == tst->trace.nchunks) &&
(tst->trace.size % DUMP_CHUNK_SIZE))
length = tst->trace.size %
DUMP_CHUNK_SIZE;
if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
goto nla_put_failure;
cb->args[4] = ++idx;
return 0;
nla_put_failure:
return -ENOBUFS;
}
static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx, length;
if (!tst->mem.in_read)
return -EFAULT;
idx = cb->args[4];
if (idx >= tst->mem.nchunks) {
iwl_test_mem_stop(tst);
return -ENOENT;
}
length = DUMP_CHUNK_SIZE;
if (((idx + 1) == tst->mem.nchunks) &&
(tst->mem.size % DUMP_CHUNK_SIZE))
length = tst->mem.size % DUMP_CHUNK_SIZE;
if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
goto nla_put_failure;
cb->args[4] = ++idx;
return 0;
nla_put_failure:
return -ENOBUFS;
}
/*
* Handle dump commands.
* Returns 1 for unknown commands (not handled by the test object); negative
* value in case of error.
*/
int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
struct netlink_callback *cb)
{
int result;
switch (cmd) {
case IWL_TM_CMD_APP2DEV_READ_TRACE:
IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
result = iwl_test_trace_dump(tst, skb, cb);
break;
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
result = iwl_test_buffer_dump(tst, skb, cb);
break;
default:
result = 1;
break;
}
return result;
}
EXPORT_SYMBOL_GPL(iwl_test_dump);
/*
* Multicast a spontaneous messages from the device to the user space.
*/
static void iwl_test_send_rx(struct iwl_test *tst,
struct iwl_rx_cmd_buffer *rxb)
{
struct sk_buff *skb;
struct iwl_rx_packet *data;
int length;
data = rxb_addr(rxb);
length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
/* the length doesn't include len_n_flags field, so add it manually */
length += sizeof(__le32);
skb = iwl_test_alloc_event(tst, length + 20);
if (skb == NULL) {
IWL_ERR(tst->trans, "Out of memory for message to user\n");
return;
}
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
goto nla_put_failure;
iwl_test_event(tst, skb);
return;
nla_put_failure:
kfree_skb(skb);
IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
}
/*
* Called whenever a Rx frames is recevied from the device. If notifications to
* the user space are requested, sends the frames to the user.
*/
void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
{
if (tst->notify)
iwl_test_send_rx(tst, rxb);
}
EXPORT_SYMBOL_GPL(iwl_test_rx);

View File

@ -0,0 +1,161 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __IWL_TEST_H__
#define __IWL_TEST_H__
#include <linux/types.h>
#include "iwl-trans.h"
struct iwl_test_trace {
u32 size;
u32 tsize;
u32 nchunks;
u8 *cpu_addr;
u8 *trace_addr;
dma_addr_t dma_addr;
bool enabled;
};
struct iwl_test_mem {
u32 size;
u32 nchunks;
u8 *addr;
bool in_read;
};
/*
* struct iwl_test_ops: callback to the op mode
*
* The structure defines the callbacks that the op_mode should handle,
* inorder to handle logic that is out of the scope of iwl_test. The
* op_mode must set all the callbacks.
* @send_cmd: handler that is used by the test object to request the
* op_mode to send a command to the fw.
*
* @valid_hw_addr: handler that is used by the test object to request the
* op_mode to check if the given address is a valid address.
*
* @get_fw_ver: handler used to get the FW version.
*
* @alloc_reply: handler used by the test object to request the op_mode
* to allocate an skb for sending a reply to the user, and initialize
* the skb. It is assumed that the test object only fills the required
* attributes.
*
* @reply: handler used by the test object to request the op_mode to reply
* to a request. The skb is an skb previously allocated by the the
* alloc_reply callback.
I
* @alloc_event: handler used by the test object to request the op_mode
* to allocate an skb for sending an event, and initialize
* the skb. It is assumed that the test object only fills the required
* attributes.
*
* @reply: handler used by the test object to request the op_mode to send
* an event. The skb is an skb previously allocated by the the
* alloc_event callback.
*/
struct iwl_test_ops {
int (*send_cmd)(struct iwl_op_mode *op_modes,
struct iwl_host_cmd *cmd);
bool (*valid_hw_addr)(u32 addr);
u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
};
struct iwl_test {
struct iwl_trans *trans;
struct iwl_test_ops *ops;
struct iwl_test_trace trace;
struct iwl_test_mem mem;
bool notify;
};
void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
struct iwl_test_ops *ops);
void iwl_test_free(struct iwl_test *tst);
int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
void *data, int len);
int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
struct netlink_callback *cb);
void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
static inline void iwl_test_enable_notifications(struct iwl_test *tst,
bool enable)
{
tst->notify = enable;
}
#endif

View File

@ -258,6 +258,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true

View File

@ -339,16 +339,9 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, bool active);
void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn);
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,

View File

@ -298,6 +298,10 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
struct iwl_tx_queue *txq = (void *)data;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
u32 scd_sram_addr = trans_pcie->scd_base_addr +
SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
u8 buf[16];
int i;
spin_lock(&txq->lock);
/* check if triggered erroneously */
@ -307,15 +311,40 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
}
spin_unlock(&txq->lock);
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
jiffies_to_msecs(trans_pcie->wd_timeout));
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
& (TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw =
iwl_read_targ_mem(trans,
trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(i));
if (i & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
else
tbl_dw = tbl_dw & 0x0000FFFF;
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
i, active ? "" : "in", fifo, tbl_dw,
iwl_read_prph(trans,
SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
}
iwl_op_mode_nic_error(trans->op_mode);
}
@ -1054,22 +1083,20 @@ static void iwl_tx_start(struct iwl_trans *trans)
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
int fifo = trans_pcie->setup_q_to_fifo[i];
__iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
IWL_TID_NON_QOS,
SCD_FRAME_LIMIT, 0);
}
/* Activate all Tx DMA/FIFO channels */
iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
int fifo = trans_pcie->setup_q_to_fifo[i];
iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0);
}
/* Activate all Tx DMA/FIFO channels */
iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
@ -1239,6 +1266,19 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
/* In AGG mode, the index in the ring must correspond to the WiFi
* sequence number. This is a HW requirements to help the SCD to parse
* the BA.
* Check here that the packets are in the right place on the ring.
*/
#ifdef CONFIG_IWLWIFI_DEBUG
wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
((wifi_seq & 0xff) != q->write_ptr),
"Q: %d WiFi Seq %d tfdNum %d",
txq_id, wifi_seq, q->write_ptr);
#endif
/* Set up driver data for this TFD */
txq->entries[q->write_ptr].skb = skb;
txq->entries[q->write_ptr].cmd = dev_cmd;
@ -1332,7 +1372,8 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
skb->data + hdr_len, secondlen);
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
if (txq->need_update && q->read_ptr == q->write_ptr &&
trans_pcie->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */

View File

@ -380,8 +380,8 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
u16 txq_id)
static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
u16 txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 tbl_dw_addr;
@ -405,7 +405,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
return 0;
}
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@ -415,46 +415,16 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index)
{
IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
}
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, bool active)
{
int txq_id = txq->q.id;
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
SCD_QUEUE_STTS_REG_MSK);
if (active)
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
txq_id, tx_fifo_id);
else
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn)
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
lockdep_assert_held(&trans_pcie->irq_lock);
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
/* Stop this Tx queue before configuring it */
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
iwl_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD queue */
if (txq_id != trans_pcie->cmd_queue)
@ -465,17 +435,27 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
u16 ra_tid = BUILD_RAxTID(sta_id, tid);
/* Map receiver-address / traffic-ID to this queue */
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
/* enable aggregations for the queue */
iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
} else {
/*
* disable aggregations for the queue, this will also make the
* ra_tid mapping configuration irrelevant since it is now a
* non-AGG queue.
*/
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
}
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
@ -488,43 +468,34 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
fifo, true);
}
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
__iwl_trans_pcie_txq_enable(trans, txq_id, fifo, sta_id,
tid, frame_limit, ssn);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
(1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
SCD_QUEUE_STTS_REG_MSK);
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
txq_id, fifo, ssn & 0xff);
}
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 rd_ptr, wr_ptr;
int n_bd = trans_pcie->txq[txq_id].q.n_bd;
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id);
return;
}
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
txq_id, rd_ptr, wr_ptr);
trans_pcie->txq[txq_id].q.read_ptr = 0;
trans_pcie->txq[txq_id].q.write_ptr = 0;
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
0, false);
iwl_txq_set_inactive(trans, txq_id);
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/

View File

@ -27,6 +27,17 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params) {
int i;
if (!params->privacy) {
bss_config->protocol = PROTOCOL_NO_SECURITY;
bss_config->key_mgmt = KEY_MGMT_NONE;
bss_config->wpa_cfg.length = 0;
priv->sec_info.wep_enabled = 0;
priv->sec_info.wpa_enabled = 0;
priv->sec_info.wpa2_enabled = 0;
return 0;
}
switch (params->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
bss_config->auth_mode = WLAN_AUTH_OPEN;

View File

@ -2110,7 +2110,7 @@ static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
matched) {
if (!ether_addr_equal(bssid->mac, match_bssid))
if (ether_addr_equal(bssid->mac, match_bssid))
*matched = true;
}

View File

@ -0,0 +1,126 @@
/*
Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
Copyright (c) 2011,2012 Intel Corp.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 and
only version 2 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
#ifndef __A2MP_H
#define __A2MP_H
#include <net/bluetooth/l2cap.h>
#define A2MP_FEAT_EXT 0x8000
struct amp_mgr {
struct l2cap_conn *l2cap_conn;
struct l2cap_chan *a2mp_chan;
struct kref kref;
__u8 ident;
__u8 handle;
unsigned long flags;
};
struct a2mp_cmd {
__u8 code;
__u8 ident;
__le16 len;
__u8 data[0];
} __packed;
/* A2MP command codes */
#define A2MP_COMMAND_REJ 0x01
struct a2mp_cmd_rej {
__le16 reason;
__u8 data[0];
} __packed;
#define A2MP_DISCOVER_REQ 0x02
struct a2mp_discov_req {
__le16 mtu;
__le16 ext_feat;
} __packed;
struct a2mp_cl {
__u8 id;
__u8 type;
__u8 status;
} __packed;
#define A2MP_DISCOVER_RSP 0x03
struct a2mp_discov_rsp {
__le16 mtu;
__le16 ext_feat;
struct a2mp_cl cl[0];
} __packed;
#define A2MP_CHANGE_NOTIFY 0x04
#define A2MP_CHANGE_RSP 0x05
#define A2MP_GETINFO_REQ 0x06
struct a2mp_info_req {
__u8 id;
} __packed;
#define A2MP_GETINFO_RSP 0x07
struct a2mp_info_rsp {
__u8 id;
__u8 status;
__le32 total_bw;
__le32 max_bw;
__le32 min_latency;
__le16 pal_cap;
__le16 assoc_size;
} __packed;
#define A2MP_GETAMPASSOC_REQ 0x08
struct a2mp_amp_assoc_req {
__u8 id;
} __packed;
#define A2MP_GETAMPASSOC_RSP 0x09
struct a2mp_amp_assoc_rsp {
__u8 id;
__u8 status;
__u8 amp_assoc[0];
} __packed;
#define A2MP_CREATEPHYSLINK_REQ 0x0A
#define A2MP_DISCONNPHYSLINK_REQ 0x0C
struct a2mp_physlink_req {
__u8 local_id;
__u8 remote_id;
__u8 amp_assoc[0];
} __packed;
#define A2MP_CREATEPHYSLINK_RSP 0x0B
#define A2MP_DISCONNPHYSLINK_RSP 0x0D
struct a2mp_physlink_rsp {
__u8 local_id;
__u8 remote_id;
__u8 status;
} __packed;
/* A2MP response status */
#define A2MP_STATUS_SUCCESS 0x00
#define A2MP_STATUS_INVALID_CTRL_ID 0x01
#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
#define A2MP_STATUS_COLLISION_OCCURED 0x03
#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
#define A2MP_STATUS_SECURITY_VIOLATION 0x06
void amp_mgr_get(struct amp_mgr *mgr);
int amp_mgr_put(struct amp_mgr *mgr);
struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
struct sk_buff *skb);
#endif /* __A2MP_H */

View File

@ -1,4 +1,4 @@
/*
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
@ -12,22 +12,19 @@
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#ifndef __BLUETOOTH_H
#define __BLUETOOTH_H
#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/list.h>
#include <linux/poll.h>
#include <net/sock.h>
@ -168,8 +165,8 @@ typedef struct {
#define BDADDR_LE_PUBLIC 0x01
#define BDADDR_LE_RANDOM 0x02
#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}})
#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
/* Copy, swap, convert BD Address */
static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
@ -215,7 +212,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags);
int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags);
uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait);
uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
@ -225,12 +222,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
struct l2cap_ctrl {
unsigned int sframe : 1,
poll : 1,
final : 1,
fcs : 1,
sar : 2,
super : 2;
unsigned int sframe:1,
poll:1,
final:1,
fcs:1,
sar:2,
super:2;
__u16 reqseq;
__u16 txseq;
__u8 retries;
@ -249,7 +246,8 @@ static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
{
struct sk_buff *skb;
if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) {
skb = alloc_skb(len + BT_SKB_RESERVE, how);
if (skb) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
@ -261,7 +259,8 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
{
struct sk_buff *skb;
if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
if (skb) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}

View File

@ -30,6 +30,9 @@
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
#define HCI_LINK_KEY_SIZE 16
#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
/* HCI dev events */
#define HCI_DEV_REG 1
#define HCI_DEV_UNREG 2
@ -56,9 +59,12 @@
#define HCI_BREDR 0x00
#define HCI_AMP 0x01
/* First BR/EDR Controller shall have ID = 0 */
#define HCI_BREDR_ID 0
/* HCI device quirks */
enum {
HCI_QUIRK_NO_RESET,
HCI_QUIRK_RESET_ON_CLOSE,
HCI_QUIRK_RAW_DEVICE,
HCI_QUIRK_FIXUP_BUFFER_SIZE
};
@ -133,10 +139,8 @@ enum {
#define HCIINQUIRY _IOR('H', 240, int)
/* HCI timeouts */
#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */
#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
#define HCI_CMD_TIMEOUT (1000) /* 1 seconds */
#define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */
@ -371,7 +375,7 @@ struct hci_cp_reject_conn_req {
#define HCI_OP_LINK_KEY_REPLY 0x040b
struct hci_cp_link_key_reply {
bdaddr_t bdaddr;
__u8 link_key[16];
__u8 link_key[HCI_LINK_KEY_SIZE];
} __packed;
#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
@ -523,6 +527,28 @@ struct hci_cp_io_capability_neg_reply {
__u8 reason;
} __packed;
#define HCI_OP_CREATE_PHY_LINK 0x0435
struct hci_cp_create_phy_link {
__u8 phy_handle;
__u8 key_len;
__u8 key_type;
__u8 key[HCI_AMP_LINK_KEY_SIZE];
} __packed;
#define HCI_OP_ACCEPT_PHY_LINK 0x0436
struct hci_cp_accept_phy_link {
__u8 phy_handle;
__u8 key_len;
__u8 key_type;
__u8 key[HCI_AMP_LINK_KEY_SIZE];
} __packed;
#define HCI_OP_DISCONN_PHY_LINK 0x0437
struct hci_cp_disconn_phy_link {
__u8 phy_handle;
__u8 reason;
} __packed;
#define HCI_OP_SNIFF_MODE 0x0803
struct hci_cp_sniff_mode {
__le16 handle;
@ -818,6 +844,31 @@ struct hci_rp_read_local_amp_info {
__le32 be_flush_to;
} __packed;
#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
struct hci_cp_read_local_amp_assoc {
__u8 phy_handle;
__le16 len_so_far;
__le16 max_len;
} __packed;
struct hci_rp_read_local_amp_assoc {
__u8 status;
__u8 phy_handle;
__le16 rem_len;
__u8 frag[0];
} __packed;
#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
struct hci_cp_write_remote_amp_assoc {
__u8 phy_handle;
__le16 len_so_far;
__le16 rem_len;
__u8 frag[0];
} __packed;
struct hci_rp_write_remote_amp_assoc {
__u8 status;
__u8 phy_handle;
} __packed;
#define HCI_OP_LE_SET_EVENT_MASK 0x2001
struct hci_cp_le_set_event_mask {
__u8 mask[8];
@ -1048,7 +1099,7 @@ struct hci_ev_link_key_req {
#define HCI_EV_LINK_KEY_NOTIFY 0x18
struct hci_ev_link_key_notify {
bdaddr_t bdaddr;
__u8 link_key[16];
__u8 link_key[HCI_LINK_KEY_SIZE];
__u8 key_type;
} __packed;
@ -1144,6 +1195,12 @@ struct extended_inquiry_info {
__u8 data[240];
} __packed;
#define HCI_EV_KEY_REFRESH_COMPLETE 0x30
struct hci_ev_key_refresh_complete {
__u8 status;
__le16 handle;
} __packed;
#define HCI_EV_IO_CAPA_REQUEST 0x31
struct hci_ev_io_capa_request {
bdaddr_t bdaddr;
@ -1190,6 +1247,39 @@ struct hci_ev_le_meta {
__u8 subevent;
} __packed;
#define HCI_EV_PHY_LINK_COMPLETE 0x40
struct hci_ev_phy_link_complete {
__u8 status;
__u8 phy_handle;
} __packed;
#define HCI_EV_CHANNEL_SELECTED 0x41
struct hci_ev_channel_selected {
__u8 phy_handle;
} __packed;
#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42
struct hci_ev_disconn_phy_link_complete {
__u8 status;
__u8 phy_handle;
__u8 reason;
} __packed;
#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45
struct hci_ev_logical_link_complete {
__u8 status;
__le16 handle;
__u8 phy_handle;
__u8 flow_spec_id;
} __packed;
#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46
struct hci_ev_disconn_logical_link_complete {
__u8 status;
__le16 handle;
__u8 reason;
} __packed;
#define HCI_EV_NUM_COMP_BLOCKS 0x48
struct hci_comp_blocks_info {
__le16 handle;
@ -1290,7 +1380,6 @@ struct hci_sco_hdr {
__u8 dlen;
} __packed;
#include <linux/skbuff.h>
static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
{
return (struct hci_event_hdr *) skb->data;
@ -1307,12 +1396,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
}
/* Command opcode pack/unpack */
#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10))
#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
#define hci_opcode_ogf(op) (op >> 10)
#define hci_opcode_ocf(op) (op & 0x03ff)
/* ACL handle and flags pack/unpack */
#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12))
#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12)))
#define hci_handle(h) (h & 0x0fff)
#define hci_flags(h) (h >> 12)

View File

@ -25,7 +25,6 @@
#ifndef __HCI_CORE_H
#define __HCI_CORE_H
#include <linux/interrupt.h>
#include <net/bluetooth/hci.h>
/* HCI priority */
@ -65,7 +64,7 @@ struct discovery_state {
DISCOVERY_RESOLVING,
DISCOVERY_STOPPING,
} state;
struct list_head all; /* All devices found during inquiry */
struct list_head all; /* All devices found during inquiry */
struct list_head unknown; /* Name state not known */
struct list_head resolve; /* Name needs to be resolved */
__u32 timestamp;
@ -105,7 +104,7 @@ struct link_key {
struct list_head list;
bdaddr_t bdaddr;
u8 type;
u8 val[16];
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
};
@ -333,6 +332,7 @@ struct hci_conn {
void *l2cap_data;
void *sco_data;
void *smp_conn;
struct amp_mgr *amp_mgr;
struct hci_conn *link;
@ -360,7 +360,8 @@ extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
extern int l2cap_disconn_ind(struct hci_conn *hcon);
extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
u16 flags);
extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
@ -429,8 +430,8 @@ enum {
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
test_bit(HCI_CONN_SSP_ENABLED, &conn->flags));
return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
static inline void hci_conn_hash_init(struct hci_dev *hdev)
@ -640,6 +641,19 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
dev_set_drvdata(&hdev->dev, data);
}
/* hci_dev_list shall be locked */
static inline uint8_t __hci_num_ctrl(void)
{
uint8_t count = 0;
struct list_head *p;
list_for_each(p, &hci_dev_list) {
count++;
}
return count;
}
struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
@ -661,7 +675,8 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
int hci_inquiry(void __user *arg);
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
bdaddr_t *bdaddr);
int hci_blacklist_clear(struct hci_dev *hdev);
int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);

View File

@ -40,11 +40,11 @@
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
#define L2CAP_DEFAULT_ACK_TO 200
#define L2CAP_LE_DEFAULT_MTU 23
#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
#define L2CAP_LE_MIN_MTU 23
#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@ -52,6 +52,8 @@
#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
#define L2CAP_A2MP_DEFAULT_MTU 670
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
@ -229,9 +231,14 @@ struct l2cap_conn_rsp {
__le16 status;
} __packed;
/* protocol/service multiplexer (PSM) */
#define L2CAP_PSM_SDP 0x0001
#define L2CAP_PSM_RFCOMM 0x0003
/* channel indentifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
#define L2CAP_CID_A2MP 0x0003
#define L2CAP_CID_LE_DATA 0x0004
#define L2CAP_CID_LE_SIGNALING 0x0005
#define L2CAP_CID_SMP 0x0006
@ -271,6 +278,9 @@ struct l2cap_conf_rsp {
#define L2CAP_CONF_PENDING 0x0004
#define L2CAP_CONF_EFS_REJECT 0x0005
/* configuration req/rsp continuation flag */
#define L2CAP_CONF_FLAG_CONTINUATION 0x0001
struct l2cap_conf_opt {
__u8 type;
__u8 len;
@ -419,11 +429,6 @@ struct l2cap_seq_list {
#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
#define L2CAP_SEQ_LIST_TAIL 0x8000
struct srej_list {
__u16 tx_seq;
struct list_head list;
};
struct l2cap_chan {
struct sock *sk;
@ -475,14 +480,12 @@ struct l2cap_chan {
__u16 expected_ack_seq;
__u16 expected_tx_seq;
__u16 buffer_seq;
__u16 buffer_seq_srej;
__u16 srej_save_reqseq;
__u16 last_acked_seq;
__u16 frames_sent;
__u16 unacked_frames;
__u8 retry_count;
__u16 srej_queue_next;
__u8 num_acked;
__u16 sdu_len;
struct sk_buff *sdu;
struct sk_buff *sdu_last_frag;
@ -515,7 +518,6 @@ struct l2cap_chan {
struct sk_buff_head srej_q;
struct l2cap_seq_list srej_list;
struct l2cap_seq_list retrans_list;
struct list_head srej_l;
struct list_head list;
struct list_head global_l;
@ -528,10 +530,14 @@ struct l2cap_chan {
struct l2cap_ops {
char *name;
struct l2cap_chan *(*new_connection) (void *data);
int (*recv) (void *data, struct sk_buff *skb);
void (*close) (void *data);
void (*state_change) (void *data, int state);
struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan);
int (*recv) (struct l2cap_chan * chan,
struct sk_buff *skb);
void (*teardown) (struct l2cap_chan *chan, int err);
void (*close) (struct l2cap_chan *chan);
void (*state_change) (struct l2cap_chan *chan,
int state);
void (*ready) (struct l2cap_chan *chan);
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long len, int nb);
};
@ -575,6 +581,7 @@ struct l2cap_conn {
#define L2CAP_CHAN_RAW 1
#define L2CAP_CHAN_CONN_LESS 2
#define L2CAP_CHAN_CONN_ORIENTED 3
#define L2CAP_CHAN_CONN_FIX_A2MP 4
/* ----- L2CAP socket info ----- */
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
@ -597,6 +604,7 @@ enum {
CONF_EWS_RECV,
CONF_LOC_CONF_PEND,
CONF_REM_CONF_PEND,
CONF_NOT_COMPLETE,
};
#define L2CAP_CONF_MAX_CONF_REQ 2
@ -713,11 +721,7 @@ static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO));
#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \
msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO));
#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
@ -736,173 +740,17 @@ static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
return (seq + 1) % (chan->tx_win_max + 1);
}
static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan)
{
int sub;
sub = (ch->next_tx_seq - ch->expected_ack_seq) % 64;
if (sub < 0)
sub += 64;
return sub == ch->remote_tx_win;
return NULL;
}
static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl)
static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >>
L2CAP_EXT_CTRL_REQSEQ_SHIFT;
else
return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
}
static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq)
static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
L2CAP_EXT_CTRL_REQSEQ;
else
return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ;
}
static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >>
L2CAP_EXT_CTRL_TXSEQ_SHIFT;
else
return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
}
static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
L2CAP_EXT_CTRL_TXSEQ;
else
return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ;
}
static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE;
else
return ctrl & L2CAP_CTRL_FRAME_TYPE;
}
static inline __u32 __set_sframe(struct l2cap_chan *chan)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return L2CAP_EXT_CTRL_FRAME_TYPE;
else
return L2CAP_CTRL_FRAME_TYPE;
}
static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
else
return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
}
static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR;
else
return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR;
}
static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl)
{
return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START;
}
static inline __u32 __get_sar_mask(struct l2cap_chan *chan)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return L2CAP_EXT_CTRL_SAR;
else
return L2CAP_CTRL_SAR;
}
static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >>
L2CAP_EXT_CTRL_SUPER_SHIFT;
else
return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
}
static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) &
L2CAP_EXT_CTRL_SUPERVISE;
else
return (super << L2CAP_CTRL_SUPER_SHIFT) &
L2CAP_CTRL_SUPERVISE;
}
static inline __u32 __set_ctrl_final(struct l2cap_chan *chan)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return L2CAP_EXT_CTRL_FINAL;
else
return L2CAP_CTRL_FINAL;
}
static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return ctrl & L2CAP_EXT_CTRL_FINAL;
else
return ctrl & L2CAP_CTRL_FINAL;
}
static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return L2CAP_EXT_CTRL_POLL;
else
return L2CAP_CTRL_POLL;
}
static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return ctrl & L2CAP_EXT_CTRL_POLL;
else
return ctrl & L2CAP_CTRL_POLL;
}
static inline __u32 __get_control(struct l2cap_chan *chan, void *p)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return get_unaligned_le32(p);
else
return get_unaligned_le16(p);
}
static inline void __put_control(struct l2cap_chan *chan, __u32 control,
void *p)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return put_unaligned_le32(control, p);
else
return put_unaligned_le16(control, p);
}
static inline __u8 __ctrl_size(struct l2cap_chan *chan)
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE;
else
return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
}
extern bool disable_ertm;
@ -926,5 +774,8 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
int l2cap_chan_check_security(struct l2cap_chan *chan);
void l2cap_chan_set_defaults(struct l2cap_chan *chan);
int l2cap_ertm_init(struct l2cap_chan *chan);
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
#endif /* __L2CAP_H */

View File

@ -1945,6 +1945,11 @@ enum ieee80211_rate_control_changed {
* to also unregister the device. If it returns 1, then mac80211
* will also go through the regular complete restart on resume.
*
* @set_wakeup: Enable or disable wakeup when WoWLAN configuration is
* modified. The reason is that device_set_wakeup_enable() is
* supposed to be called when the configuration changes, not only
* in suspend().
*
* @add_interface: Called when a netdevice attached to the hardware is
* enabled. Because it is not called for monitor mode devices, @start
* and @stop must be implemented.
@ -2974,6 +2979,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
* ieee80211_generic_frame_duration - Calculate the duration field for a frame
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @band: the band to calculate the frame duration on
* @frame_len: the length of the frame.
* @rate: the rate at which the frame is going to be transmitted.
*

View File

@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
a2mp.o

568
net/bluetooth/a2mp.c Normal file
View File

@ -0,0 +1,568 @@
/*
Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
Copyright (c) 2011,2012 Intel Corp.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 and
only version 2 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/a2mp.h>
/* A2MP build & send command helper functions */
static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
{
struct a2mp_cmd *cmd;
int plen;
plen = sizeof(*cmd) + len;
cmd = kzalloc(plen, GFP_KERNEL);
if (!cmd)
return NULL;
cmd->code = code;
cmd->ident = ident;
cmd->len = cpu_to_le16(len);
memcpy(cmd->data, data, len);
return cmd;
}
static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
void *data)
{
struct l2cap_chan *chan = mgr->a2mp_chan;
struct a2mp_cmd *cmd;
u16 total_len = len + sizeof(*cmd);
struct kvec iv;
struct msghdr msg;
cmd = __a2mp_build(code, ident, len, data);
if (!cmd)
return;
iv.iov_base = cmd;
iv.iov_len = total_len;
memset(&msg, 0, sizeof(msg));
msg.msg_iov = (struct iovec *) &iv;
msg.msg_iovlen = 1;
l2cap_chan_send(chan, &msg, total_len, 0);
kfree(cmd);
}
static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
{
cl->id = 0;
cl->type = 0;
cl->status = 1;
}
/* hci_dev_list shall be locked */
static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
{
int i = 0;
struct hci_dev *hdev;
__a2mp_cl_bredr(cl);
list_for_each_entry(hdev, &hci_dev_list, list) {
/* Iterate through AMP controllers */
if (hdev->id == HCI_BREDR_ID)
continue;
/* Starting from second entry */
if (++i >= num_ctrl)
return;
cl[i].id = hdev->id;
cl[i].type = hdev->amp_type;
cl[i].status = hdev->amp_status;
}
}
/* Processing A2MP messages */
static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_cmd_rej *rej = (void *) skb->data;
if (le16_to_cpu(hdr->len) < sizeof(*rej))
return -EINVAL;
BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
skb_pull(skb, sizeof(*rej));
return 0;
}
static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_discov_req *req = (void *) skb->data;
u16 len = le16_to_cpu(hdr->len);
struct a2mp_discov_rsp *rsp;
u16 ext_feat;
u8 num_ctrl;
if (len < sizeof(*req))
return -EINVAL;
skb_pull(skb, sizeof(*req));
ext_feat = le16_to_cpu(req->ext_feat);
BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
/* check that packet is not broken for now */
while (ext_feat & A2MP_FEAT_EXT) {
if (len < sizeof(ext_feat))
return -EINVAL;
ext_feat = get_unaligned_le16(skb->data);
BT_DBG("efm 0x%4.4x", ext_feat);
len -= sizeof(ext_feat);
skb_pull(skb, sizeof(ext_feat));
}
read_lock(&hci_dev_list_lock);
num_ctrl = __hci_num_ctrl();
len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
rsp = kmalloc(len, GFP_ATOMIC);
if (!rsp) {
read_unlock(&hci_dev_list_lock);
return -ENOMEM;
}
rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
rsp->ext_feat = 0;
__a2mp_add_cl(mgr, rsp->cl, num_ctrl);
read_unlock(&hci_dev_list_lock);
a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
kfree(rsp);
return 0;
}
static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_cl *cl = (void *) skb->data;
while (skb->len >= sizeof(*cl)) {
BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
cl->status);
cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
}
/* TODO send A2MP_CHANGE_RSP */
return 0;
}
static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_info_req *req = (void *) skb->data;
struct a2mp_info_rsp rsp;
struct hci_dev *hdev;
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
BT_DBG("id %d", req->id);
rsp.id = req->id;
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
hdev = hci_dev_get(req->id);
if (hdev && hdev->amp_type != HCI_BREDR) {
rsp.status = 0;
rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
}
if (hdev)
hci_dev_put(hdev);
a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
skb_pull(skb, sizeof(*req));
return 0;
}
static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_amp_assoc_req *req = (void *) skb->data;
struct hci_dev *hdev;
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
BT_DBG("id %d", req->id);
hdev = hci_dev_get(req->id);
if (!hdev || hdev->amp_type == HCI_BREDR) {
struct a2mp_amp_assoc_rsp rsp;
rsp.id = req->id;
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
&rsp);
goto clean;
}
/* Placeholder for HCI Read AMP Assoc */
clean:
if (hdev)
hci_dev_put(hdev);
skb_pull(skb, sizeof(*req));
return 0;
}
static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_physlink_req *req = (void *) skb->data;
struct a2mp_physlink_rsp rsp;
struct hci_dev *hdev;
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
rsp.local_id = req->remote_id;
rsp.remote_id = req->local_id;
hdev = hci_dev_get(req->remote_id);
if (!hdev || hdev->amp_type != HCI_AMP) {
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
goto send_rsp;
}
/* TODO process physlink create */
rsp.status = A2MP_STATUS_SUCCESS;
send_rsp:
if (hdev)
hci_dev_put(hdev);
a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
&rsp);
skb_pull(skb, le16_to_cpu(hdr->len));
return 0;
}
static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_physlink_req *req = (void *) skb->data;
struct a2mp_physlink_rsp rsp;
struct hci_dev *hdev;
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
rsp.local_id = req->remote_id;
rsp.remote_id = req->local_id;
rsp.status = A2MP_STATUS_SUCCESS;
hdev = hci_dev_get(req->local_id);
if (!hdev) {
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
goto send_rsp;
}
/* TODO Disconnect Phys Link here */
hci_dev_put(hdev);
send_rsp:
a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
skb_pull(skb, sizeof(*req));
return 0;
}
static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
BT_DBG("ident %d code %d", hdr->ident, hdr->code);
skb_pull(skb, le16_to_cpu(hdr->len));
return 0;
}
/* Handle A2MP signalling */
static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
struct a2mp_cmd *hdr = (void *) skb->data;
struct amp_mgr *mgr = chan->data;
int err = 0;
amp_mgr_get(mgr);
while (skb->len >= sizeof(*hdr)) {
struct a2mp_cmd *hdr = (void *) skb->data;
u16 len = le16_to_cpu(hdr->len);
BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
skb_pull(skb, sizeof(*hdr));
if (len > skb->len || !hdr->ident) {
err = -EINVAL;
break;
}
mgr->ident = hdr->ident;
switch (hdr->code) {
case A2MP_COMMAND_REJ:
a2mp_command_rej(mgr, skb, hdr);
break;
case A2MP_DISCOVER_REQ:
err = a2mp_discover_req(mgr, skb, hdr);
break;
case A2MP_CHANGE_NOTIFY:
err = a2mp_change_notify(mgr, skb, hdr);
break;
case A2MP_GETINFO_REQ:
err = a2mp_getinfo_req(mgr, skb, hdr);
break;
case A2MP_GETAMPASSOC_REQ:
err = a2mp_getampassoc_req(mgr, skb, hdr);
break;
case A2MP_CREATEPHYSLINK_REQ:
err = a2mp_createphyslink_req(mgr, skb, hdr);
break;
case A2MP_DISCONNPHYSLINK_REQ:
err = a2mp_discphyslink_req(mgr, skb, hdr);
break;
case A2MP_CHANGE_RSP:
case A2MP_DISCOVER_RSP:
case A2MP_GETINFO_RSP:
case A2MP_GETAMPASSOC_RSP:
case A2MP_CREATEPHYSLINK_RSP:
case A2MP_DISCONNPHYSLINK_RSP:
err = a2mp_cmd_rsp(mgr, skb, hdr);
break;
default:
BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
err = -EINVAL;
break;
}
}
if (err) {
struct a2mp_cmd_rej rej;
rej.reason = __constant_cpu_to_le16(0);
BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
&rej);
}
/* Always free skb and return success error code to prevent
from sending L2CAP Disconnect over A2MP channel */
kfree_skb(skb);
amp_mgr_put(mgr);
return 0;
}
static void a2mp_chan_close_cb(struct l2cap_chan *chan)
{
l2cap_chan_destroy(chan);
}
static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
{
struct amp_mgr *mgr = chan->data;
if (!mgr)
return;
BT_DBG("chan %p state %s", chan, state_to_string(state));
chan->state = state;
switch (state) {
case BT_CLOSED:
if (mgr)
amp_mgr_put(mgr);
break;
}
}
static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
unsigned long len, int nb)
{
return bt_skb_alloc(len, GFP_KERNEL);
}
static struct l2cap_ops a2mp_chan_ops = {
.name = "L2CAP A2MP channel",
.recv = a2mp_chan_recv_cb,
.close = a2mp_chan_close_cb,
.state_change = a2mp_chan_state_change_cb,
.alloc_skb = a2mp_chan_alloc_skb_cb,
/* Not implemented for A2MP */
.new_connection = l2cap_chan_no_new_connection,
.teardown = l2cap_chan_no_teardown,
.ready = l2cap_chan_no_ready,
};
static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
{
struct l2cap_chan *chan;
int err;
chan = l2cap_chan_create();
if (!chan)
return NULL;
BT_DBG("chan %p", chan);
chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
chan->ops = &a2mp_chan_ops;
l2cap_chan_set_defaults(chan);
chan->remote_max_tx = chan->max_tx;
chan->remote_tx_win = chan->tx_win;
chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
skb_queue_head_init(&chan->tx_q);
chan->mode = L2CAP_MODE_ERTM;
err = l2cap_ertm_init(chan);
if (err < 0) {
l2cap_chan_del(chan, 0);
return NULL;
}
chan->conf_state = 0;
l2cap_chan_add(conn, chan);
chan->remote_mps = chan->omtu;
chan->mps = chan->omtu;
chan->state = BT_CONNECTED;
return chan;
}
/* AMP Manager functions */
void amp_mgr_get(struct amp_mgr *mgr)
{
BT_DBG("mgr %p", mgr);
kref_get(&mgr->kref);
}
static void amp_mgr_destroy(struct kref *kref)
{
struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
BT_DBG("mgr %p", mgr);
kfree(mgr);
}
int amp_mgr_put(struct amp_mgr *mgr)
{
BT_DBG("mgr %p", mgr);
return kref_put(&mgr->kref, &amp_mgr_destroy);
}
static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
{
struct amp_mgr *mgr;
struct l2cap_chan *chan;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return NULL;
BT_DBG("conn %p mgr %p", conn, mgr);
mgr->l2cap_conn = conn;
chan = a2mp_chan_open(conn);
if (!chan) {
kfree(mgr);
return NULL;
}
mgr->a2mp_chan = chan;
chan->data = mgr;
conn->hcon->amp_mgr = mgr;
kref_init(&mgr->kref);
return mgr;
}
struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
struct sk_buff *skb)
{
struct amp_mgr *mgr;
mgr = amp_mgr_create(conn);
if (!mgr) {
BT_ERR("Could not create AMP manager");
return NULL;
}
BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
return mgr->a2mp_chan;
}

View File

@ -25,18 +25,7 @@
/* Bluetooth address family and sockets. */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <net/sock.h>
#include <asm/ioctls.h>
#include <linux/kmod.h>
#include <net/bluetooth/bluetooth.h>
@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
return 0;
}
unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
unsigned int bt_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;

View File

@ -26,26 +26,9 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/freezer.h>
#include <linux/errno.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <net/sock.h>
#include <linux/socket.h>
#include <linux/file.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = {
ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
};
static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct net_device *dev = s->dev;
struct sk_buff *nskb;
@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = {
BNEP_COMPRESSED
};
static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
struct socket *sock = s->sock;

View File

@ -25,16 +25,8 @@
SOFTWARE IS DISCLAIMED.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/netdevice.h>
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev)
}
#ifdef CONFIG_BT_BNEP_MC_FILTER
static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
struct ethhdr *eh = (void *) skb->data;
@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Determine ether protocol. Based on eth_type_trans. */
static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
static u16 bnep_net_eth_proto(struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
u16 proto = ntohs(eh->h_proto);
@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
return ETH_P_802_2;
}
static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
{
u16 proto = bnep_net_eth_proto(skb);
struct bnep_proto_filter *f = s->proto_filter;

View File

@ -24,24 +24,8 @@
SOFTWARE IS DISCLAIMED.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/gfp.h>
#include <linux/uaccess.h>
#include <net/sock.h>
#include "bnep.h"

View File

@ -24,24 +24,11 @@
/* Bluetooth HCI connection handling. */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/a2mp.h>
static void hci_le_connect(struct hci_conn *conn)
{
@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn)
conn->sec_level = BT_SECURITY_LOW;
memset(&cp, 0, sizeof(cp));
cp.scan_interval = cpu_to_le16(0x0060);
cp.scan_window = cpu_to_le16(0x0030);
cp.scan_interval = __constant_cpu_to_le16(0x0060);
cp.scan_window = __constant_cpu_to_le16(0x0030);
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
cp.conn_interval_min = cpu_to_le16(0x0028);
cp.conn_interval_max = cpu_to_le16(0x0038);
cp.supervision_timeout = cpu_to_le16(0x002a);
cp.min_ce_len = cpu_to_le16(0x0000);
cp.max_ce_len = cpu_to_le16(0x0000);
cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
cp.min_ce_len = __constant_cpu_to_le16(0x0000);
cp.max_ce_len = __constant_cpu_to_le16(0x0000);
hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
}
@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn)
cp.pscan_rep_mode = ie->data.pscan_rep_mode;
cp.pscan_mode = ie->data.pscan_mode;
cp.clock_offset = ie->data.clock_offset |
cpu_to_le16(0x8000);
__constant_cpu_to_le16(0x8000);
}
memcpy(conn->dev_class, ie->data.dev_class, 3);
@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
cp.handle = cpu_to_le16(handle);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
cp.max_latency = cpu_to_le16(0xffff);
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.voice_setting = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
}
void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
u16 latency, u16 to_multiplier)
u16 latency, u16 to_multiplier)
{
struct hci_cp_le_conn_update cp;
struct hci_dev *hdev = conn->hdev;
@ -197,15 +184,14 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
cp.conn_interval_max = cpu_to_le16(max);
cp.conn_latency = cpu_to_le16(latency);
cp.supervision_timeout = cpu_to_le16(to_multiplier);
cp.min_ce_len = cpu_to_le16(0x0001);
cp.max_ce_len = cpu_to_le16(0x0001);
cp.min_ce_len = __constant_cpu_to_le16(0x0001);
cp.max_ce_len = __constant_cpu_to_le16(0x0001);
hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
}
EXPORT_SYMBOL(hci_le_conn_update);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
__u8 ltk[16])
__u8 ltk[16])
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_start_enc cp;
@ -221,7 +207,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
}
EXPORT_SYMBOL(hci_le_start_enc);
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
@ -247,7 +232,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
static void hci_conn_timeout(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn,
disc_work.work);
disc_work.work);
__u8 reason;
BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
struct hci_cp_sniff_subrate cp;
cp.handle = cpu_to_le16(conn->handle);
cp.max_latency = cpu_to_le16(0);
cp.min_remote_timeout = cpu_to_le16(0);
cp.min_local_timeout = cpu_to_le16(0);
cp.max_latency = __constant_cpu_to_le16(0);
cp.min_remote_timeout = __constant_cpu_to_le16(0);
cp.min_local_timeout = __constant_cpu_to_le16(0);
hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
}
@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
cp.handle = cpu_to_le16(conn->handle);
cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
cp.attempt = cpu_to_le16(4);
cp.timeout = cpu_to_le16(1);
cp.attempt = __constant_cpu_to_le16(4);
cp.timeout = __constant_cpu_to_le16(1);
hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
}
}
@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg)
struct hci_dev *hdev = conn->hdev;
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
&conn->dst);
&conn->dst);
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
(unsigned long) conn);
(unsigned long) conn);
atomic_set(&conn->refcnt, 0);
@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn)
}
}
hci_chan_list_flush(conn);
if (conn->amp_mgr)
amp_mgr_put(conn->amp_mgr);
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@ -454,7 +441,8 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
read_lock(&hci_dev_list_lock);
list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
if (!test_bit(HCI_UP, &d->flags) ||
test_bit(HCI_RAW, &d->flags))
continue;
/* Simple routing:
@ -495,6 +483,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
if (type == LE_LINK) {
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
if (!le) {
le = hci_conn_hash_lookup_state(hdev, LE_LINK,
BT_CONNECT);
if (le)
return ERR_PTR(-EBUSY);
le = hci_conn_add(hdev, LE_LINK, dst);
if (!le)
return ERR_PTR(-ENOMEM);
@ -545,7 +538,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
hci_conn_hold(sco);
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
@ -560,7 +553,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
return sco;
}
EXPORT_SYMBOL(hci_connect);
/* Check link security requirement */
int hci_conn_check_link_mode(struct hci_conn *conn)
@ -572,7 +564,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
return 1;
}
EXPORT_SYMBOL(hci_conn_check_link_mode);
/* Authenticate remote device */
static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
@ -600,7 +591,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
sizeof(cp), &cp);
if (conn->key_type != 0xff)
set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
}
@ -618,7 +609,7 @@ static void hci_conn_encrypt(struct hci_conn *conn)
cp.handle = cpu_to_le16(conn->handle);
cp.encrypt = 0x01;
hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
&cp);
&cp);
}
}
@ -648,8 +639,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
/* An unauthenticated combination key has sufficient security for
security level 1 and 2. */
if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
(sec_level == BT_SECURITY_MEDIUM ||
sec_level == BT_SECURITY_LOW))
(sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
goto encrypt;
/* A combination key has always sufficient security for the security
@ -657,8 +647,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
is generated using maximum PIN code length (16).
For pre 2.1 units. */
if (conn->key_type == HCI_LK_COMBINATION &&
(sec_level != BT_SECURITY_HIGH ||
conn->pin_length == 16))
(sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
goto encrypt;
auth:
@ -701,12 +690,11 @@ int hci_conn_change_link_key(struct hci_conn *conn)
struct hci_cp_change_conn_link_key cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
sizeof(cp), &cp);
sizeof(cp), &cp);
}
return 0;
}
EXPORT_SYMBOL(hci_conn_change_link_key);
/* Switch role */
int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
@ -752,7 +740,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
timer:
if (hdev->idle_timeout > 0)
mod_timer(&conn->idle_timer,
jiffies + msecs_to_jiffies(hdev->idle_timeout));
jiffies + msecs_to_jiffies(hdev->idle_timeout));
}
/* Drop all connection on the device */
@ -802,7 +790,7 @@ EXPORT_SYMBOL(hci_conn_put_device);
int hci_get_conn_list(void __user *arg)
{
register struct hci_conn *c;
struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;

View File

@ -25,28 +25,10 @@
/* Bluetooth HCI core. */
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/rfkill.h>
#include <linux/timer.h>
#include <linux/crypto.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@ -65,6 +47,9 @@ DEFINE_RWLOCK(hci_dev_list_lock);
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
/* ---- HCI notifications ---- */
static void hci_notify(struct hci_dev *hdev, int event)
@ -124,8 +109,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
}
/* Execute request and wait for completion. */
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
unsigned long opt, __u32 timeout)
static int __hci_request(struct hci_dev *hdev,
void (*req)(struct hci_dev *hdev, unsigned long opt),
unsigned long opt, __u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
@ -166,8 +152,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
return err;
}
static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
unsigned long opt, __u32 timeout)
static int hci_request(struct hci_dev *hdev,
void (*req)(struct hci_dev *hdev, unsigned long opt),
unsigned long opt, __u32 timeout)
{
int ret;
@ -202,7 +189,7 @@ static void bredr_init(struct hci_dev *hdev)
/* Mandatory initialization */
/* Reset */
if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_RESET, &hdev->flags);
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
@ -235,7 +222,7 @@ static void bredr_init(struct hci_dev *hdev)
hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
param = __constant_cpu_to_le16(0x7d00);
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
bacpy(&cp.bdaddr, BDADDR_ANY);
@ -417,7 +404,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev)
INIT_LIST_HEAD(&cache->resolve);
}
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
bdaddr_t *bdaddr)
{
struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
@ -478,7 +466,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
list_for_each_entry(p, &cache->resolve, list) {
if (p->name_state != NAME_PENDING &&
abs(p->data.rssi) >= abs(ie->data.rssi))
abs(p->data.rssi) >= abs(ie->data.rssi))
break;
pos = &p->list;
}
@ -503,7 +491,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
*ssp = true;
if (ie->name_state == NAME_NEEDED &&
data->rssi != ie->data.rssi) {
data->rssi != ie->data.rssi) {
ie->data.rssi = data->rssi;
hci_inquiry_cache_update_resolve(hdev, ie);
}
@ -527,7 +515,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
update:
if (name_known && ie->name_state != NAME_KNOWN &&
ie->name_state != NAME_PENDING) {
ie->name_state != NAME_PENDING) {
ie->name_state = NAME_KNOWN;
list_del(&ie->list);
}
@ -605,8 +593,7 @@ int hci_inquiry(void __user *arg)
hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
inquiry_cache_empty(hdev) ||
ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
@ -620,7 +607,9 @@ int hci_inquiry(void __user *arg)
goto done;
}
/* for unlimited number of responses we will use buffer with 255 entries */
/* for unlimited number of responses we will use buffer with
* 255 entries
*/
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
@ -641,7 +630,7 @@ int hci_inquiry(void __user *arg)
if (!copy_to_user(ptr, &ir, sizeof(ir))) {
ptr += sizeof(ir);
if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
ir.num_rsp))
ir.num_rsp))
err = -EFAULT;
} else
err = -EFAULT;
@ -702,11 +691,11 @@ int hci_dev_open(__u16 dev)
hdev->init_last_cmd = 0;
ret = __hci_request(hdev, hci_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(HCI_INIT_TIMEOUT));
if (lmp_host_le_capable(hdev))
ret = __hci_request(hdev, hci_le_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(HCI_INIT_TIMEOUT));
clear_bit(HCI_INIT, &hdev->flags);
}
@ -791,10 +780,10 @@ static int hci_dev_do_close(struct hci_dev *hdev)
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
if (!test_bit(HCI_RAW, &hdev->flags) &&
test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0,
msecs_to_jiffies(250));
msecs_to_jiffies(250));
clear_bit(HCI_INIT, &hdev->flags);
}
@ -884,7 +873,7 @@ int hci_dev_reset(__u16 dev)
if (!test_bit(HCI_RAW, &hdev->flags))
ret = __hci_request(hdev, hci_reset_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(HCI_INIT_TIMEOUT));
done:
hci_req_unlock(hdev);
@ -924,7 +913,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
switch (cmd) {
case HCISETAUTH:
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(HCI_INIT_TIMEOUT));
break;
case HCISETENCRYPT:
@ -936,23 +925,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(HCI_INIT_TIMEOUT));
if (err)
break;
}
err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(HCI_INIT_TIMEOUT));
break;
case HCISETSCAN:
err = hci_request(hdev, hci_scan_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(HCI_INIT_TIMEOUT));
break;
case HCISETLINKPOL:
err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(HCI_INIT_TIMEOUT));
break;
case HCISETLINKMODE:
@ -1103,7 +1092,7 @@ static void hci_power_on(struct work_struct *work)
if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
schedule_delayed_work(&hdev->power_off,
msecs_to_jiffies(AUTO_OFF_TIMEOUT));
msecs_to_jiffies(AUTO_OFF_TIMEOUT));
if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
mgmt_index_added(hdev);
@ -1112,7 +1101,7 @@ static void hci_power_on(struct work_struct *work)
static void hci_power_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
power_off.work);
power_off.work);
BT_DBG("%s", hdev->name);
@ -1193,7 +1182,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
}
static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
u8 key_type, u8 old_key_type)
u8 key_type, u8 old_key_type)
{
/* Legacy key */
if (key_type < 0x03)
@ -1234,7 +1223,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
list_for_each_entry(k, &hdev->long_term_keys, list) {
if (k->ediv != ediv ||
memcmp(rand, k->rand, sizeof(k->rand)))
memcmp(rand, k->rand, sizeof(k->rand)))
continue;
return k;
@ -1242,7 +1231,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
return NULL;
}
EXPORT_SYMBOL(hci_find_ltk);
struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type)
@ -1251,12 +1239,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
list_for_each_entry(k, &hdev->long_term_keys, list)
if (addr_type == k->bdaddr_type &&
bacmp(bdaddr, &k->bdaddr) == 0)
bacmp(bdaddr, &k->bdaddr) == 0)
return k;
return NULL;
}
EXPORT_SYMBOL(hci_find_ltk_by_addr);
int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@ -1283,15 +1270,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
* combination key for legacy pairing even when there's no
* previous key */
if (type == HCI_LK_CHANGED_COMBINATION &&
(!conn || conn->remote_auth == 0xff) &&
old_key_type == 0xff) {
(!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
type = HCI_LK_COMBINATION;
if (conn)
conn->key_type = type;
}
bacpy(&key->bdaddr, bdaddr);
memcpy(key->val, val, 16);
memcpy(key->val, val, HCI_LINK_KEY_SIZE);
key->pin_len = pin_len;
if (type == HCI_LK_CHANGED_COMBINATION)
@ -1540,6 +1526,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
memset(&cp, 0, sizeof(cp));
cp.enable = 1;
cp.filter_dup = 1;
hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
@ -1707,41 +1694,39 @@ EXPORT_SYMBOL(hci_free_dev);
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
struct list_head *head, *p;
int id, error;
if (!hdev->open || !hdev->close)
return -EINVAL;
write_lock(&hci_dev_list_lock);
/* Do not allow HCI_AMP devices to register at index 0,
* so the index can be used as the AMP controller ID.
*/
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
head = &hci_dev_list;
/* Find first available device id */
list_for_each(p, &hci_dev_list) {
int nid = list_entry(p, struct hci_dev, list)->id;
if (nid > id)
break;
if (nid == id)
id++;
head = p;
switch (hdev->dev_type) {
case HCI_BREDR:
id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
break;
case HCI_AMP:
id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
break;
default:
return -EINVAL;
}
if (id < 0)
return id;
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
list_add(&hdev->list, head);
write_lock(&hci_dev_list_lock);
list_add(&hdev->list, &hci_dev_list);
write_unlock(&hci_dev_list_lock);
hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1);
WQ_MEM_RECLAIM, 1);
if (!hdev->workqueue) {
error = -ENOMEM;
goto err;
@ -1752,7 +1737,8 @@ int hci_register_dev(struct hci_dev *hdev)
goto err_wqueue;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
hdev);
if (hdev->rfkill) {
if (rfkill_register(hdev->rfkill) < 0) {
rfkill_destroy(hdev->rfkill);
@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev)
err_wqueue:
destroy_workqueue(hdev->workqueue);
err:
ida_simple_remove(&hci_index_ida, hdev->id);
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
void hci_unregister_dev(struct hci_dev *hdev)
{
int i;
int i, id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
set_bit(HCI_UNREGISTER, &hdev->dev_flags);
id = hdev->id;
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
!test_bit(HCI_SETUP, &hdev->dev_flags)) {
!test_bit(HCI_SETUP, &hdev->dev_flags)) {
hci_dev_lock(hdev);
mgmt_index_removed(hdev);
hci_dev_unlock(hdev);
@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_dev_unlock(hdev);
hci_dev_put(hdev);
ida_simple_remove(&hci_index_ida, id);
}
EXPORT_SYMBOL(hci_unregister_dev);
@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
&& !test_bit(HCI_INIT, &hdev->flags))) {
&& !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb)
EXPORT_SYMBOL(hci_recv_frame);
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
int count, __u8 index)
int count, __u8 index)
{
int len = 0;
int hlen = 0;
@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
struct bt_skb_cb *scb;
if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
index >= NUM_REASSEMBLY)
index >= NUM_REASSEMBLY)
return -EILSEQ;
skb = hdev->reassembly[index];
@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
type = bt_cb(skb)->pkt_type;
rem = hci_reassembly(hdev, type, data, count,
STREAM_REASSEMBLY);
STREAM_REASSEMBLY);
if (rem < 0)
return rem;
@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
}
static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
struct sk_buff *skb, __u16 flags)
struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
queue_work(hdev->workqueue, &hdev->tx_work);
}
EXPORT_SYMBOL(hci_send_acl);
/* Send SCO data */
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb_queue_tail(&conn->data_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
}
EXPORT_SYMBOL(hci_send_sco);
/* ---- HCI TX task (outgoing data) ---- */
/* HCI Connection scheduler */
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
return conn;
}
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
list_for_each_entry_rcu(c, &h->list, list) {
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, 0x13);
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
}
}
rcu_read_unlock();
}
static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
int *quote)
static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_chan *chan = NULL;
@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
skb->priority = HCI_PRIO_MAX - 1;
BT_DBG("chan %p skb %p promoted to %d", chan, skb,
skb->priority);
skb->priority);
}
if (hci_conn_num(hdev, type) == num)
@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
}
static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
{
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!cnt && time_after(jiffies, hdev->acl_last_tx +
msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
hci_link_tx_to(hdev, ACL_LINK);
}
}
static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
static void hci_sched_acl_pkt(struct hci_dev *hdev)
{
unsigned int cnt = hdev->acl_cnt;
struct hci_chan *chan;
@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
__check_timeout(hdev, cnt);
while (hdev->acl_cnt &&
(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb->len, skb->priority);
skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK);
}
static inline void hci_sched_acl_blk(struct hci_dev *hdev)
static void hci_sched_acl_blk(struct hci_dev *hdev)
{
unsigned int cnt = hdev->block_cnt;
struct hci_chan *chan;
@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
__check_timeout(hdev, cnt);
while (hdev->block_cnt > 0 &&
(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
int blocks;
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb->len, skb->priority);
skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
return;
hci_conn_enter_active_mode(chan->conn,
bt_cb(skb)->force_active);
bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK);
}
static inline void hci_sched_acl(struct hci_dev *hdev)
static void hci_sched_acl(struct hci_dev *hdev)
{
BT_DBG("%s", hdev->name);
@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
}
/* Schedule SCO */
static inline void hci_sched_sco(struct hci_dev *hdev)
static void hci_sched_sco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
}
}
static inline void hci_sched_esco(struct hci_dev *hdev)
static void hci_sched_esco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
if (!hci_conn_num(hdev, ESCO_LINK))
return;
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
&quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
}
}
static inline void hci_sched_le(struct hci_dev *hdev)
static void hci_sched_le(struct hci_dev *hdev)
{
struct hci_chan *chan;
struct sk_buff *skb;
@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->le_cnt && hdev->le_pkts &&
time_after(jiffies, hdev->le_last_tx + HZ * 45))
time_after(jiffies, hdev->le_last_tx + HZ * 45))
hci_link_tx_to(hdev, LE_LINK);
}
@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb->len, skb->priority);
skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work)
struct sk_buff *skb;
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
hdev->sco_cnt, hdev->le_cnt);
/* Schedule queues and send stuff to HCI driver */
@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work)
/* ----- HCI RX task (incoming data processing) ----- */
/* ACL data packet */
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
flags = hci_flags(handle);
handle = hci_handle(handle);
BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
handle, flags);
hdev->stat.acl_rx++;
@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
return;
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
hdev->name, handle);
hdev->name, handle);
}
kfree_skb(skb);
}
/* SCO data packet */
static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
return;
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
hdev->name, handle);
hdev->name, handle);
}
kfree_skb(skb);

File diff suppressed because it is too large Load Diff

View File

@ -24,25 +24,7 @@
/* Bluetooth HCI sockets. */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
flt = &hci_pi(sk)->filter;
if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
&flt->type_mask))
continue;
if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
if (!hci_test_bit(evt, &flt->event_mask))
continue;
@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_mon_hdr *hdr;
/* Create a private copy with headroom */
skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
GFP_ATOMIC);
if (!skb_copy)
continue;
@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
}
/* Ioctls that require bound socket */
static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
unsigned long arg)
{
struct hci_dev *hdev = hci_pi(sk)->hdev;
@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
}
}
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *) arg;
@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
}
}
static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
struct sockaddr_hci haddr;
struct sock *sk = sock->sk;
@ -690,7 +677,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
return err;
}
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
int *addr_len, int peer)
{
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
return 0;
}
static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb)
{
__u32 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR) {
int incoming = bt_cb(skb)->incoming;
put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
&incoming);
}
if (mask & HCI_CMSG_TSTAMP) {
@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
}
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
u16 ocf = hci_opcode_ocf(opcode);
if (((ogf > HCI_SFLT_MAX_OGF) ||
!hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
!capable(CAP_NET_RAW)) {
!hci_test_bit(ocf & HCI_FLT_OCF_BITS,
&hci_sec_filter.ocf_mask[ogf])) &&
!capable(CAP_NET_RAW)) {
err = -EPERM;
goto drop;
}
@ -891,7 +882,8 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto done;
}
static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int len)
{
struct hci_ufilter uf = { .opcode = 0 };
struct sock *sk = sock->sk;
@ -973,7 +965,8 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
return err;
}
static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;

View File

@ -1,10 +1,6 @@
/* Bluetooth HCI driver model support. */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
@ -31,27 +27,30 @@ static inline char *link_typetostr(int type)
}
}
static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_link_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", link_typetostr(conn->type));
}
static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_link_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", batostr(&conn->dst));
}
static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_link_features(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
conn->features[0], conn->features[1],
conn->features[2], conn->features[3],
conn->features[4], conn->features[5],
conn->features[6], conn->features[7]);
conn->features[0], conn->features[1],
conn->features[2], conn->features[3],
conn->features[4], conn->features[5],
conn->features[6], conn->features[7]);
}
#define LINK_ATTR(_name, _mode, _show, _store) \
@ -185,19 +184,22 @@ static inline char *host_typetostr(int type)
}
}
static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_bus(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
}
static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
char name[HCI_MAX_NAME_LENGTH + 1];
@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
return sprintf(buf, "%s\n", name);
}
static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_class(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%.2x%.2x%.2x\n",
hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
hdev->dev_class[1], hdev->dev_class[0]);
}
static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
}
static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_features(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
hdev->features[0], hdev->features[1],
hdev->features[2], hdev->features[3],
hdev->features[4], hdev->features[5],
hdev->features[6], hdev->features[7]);
hdev->features[0], hdev->features[1],
hdev->features[2], hdev->features[3],
hdev->features[4], hdev->features[5],
hdev->features[6], hdev->features[7]);
}
static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_manufacturer(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->manufacturer);
}
static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_hci_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_ver);
}
static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_hci_revision(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_rev);
}
static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_idle_timeout(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->idle_timeout);
}
static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
static ssize_t store_idle_timeout(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
unsigned int val;
@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
return count;
}
static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_sniff_max_interval(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_max_interval);
}
static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
static ssize_t store_sniff_max_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
return count;
}
static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t show_sniff_min_interval(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_min_interval);
}
static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
static ssize_t store_sniff_min_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
show_idle_timeout, store_idle_timeout);
show_idle_timeout, store_idle_timeout);
static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
show_sniff_max_interval, store_sniff_max_interval);
show_sniff_max_interval, store_sniff_max_interval);
static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
show_sniff_min_interval, store_sniff_min_interval);
show_sniff_min_interval, store_sniff_min_interval);
static struct attribute *bt_host_attrs[] = {
&dev_attr_bus.attr,
@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
memcpy(&data5, &uuid[14], 2);
seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
ntohl(data0), ntohs(data1), ntohs(data2),
ntohs(data3), ntohl(data4), ntohs(data5));
ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
ntohl(data4), ntohs(data5));
}
static int uuids_show(struct seq_file *f, void *p)
@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
auto_accept_delay_set, "%llu\n");
auto_accept_delay_set, "%llu\n");
void hci_init_sysfs(struct hci_dev *hdev)
{
@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev)
return 0;
debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
hdev, &inquiry_cache_fops);
hdev, &inquiry_cache_fops);
debugfs_create_file("blacklist", 0444, hdev->debugfs,
hdev, &blacklist_fops);
hdev, &blacklist_fops);
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
&auto_accept_delay_fops);
&auto_accept_delay_fops);
return 0;
}

View File

@ -21,27 +21,8 @@
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/freezer.h>
#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <net/sock.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <net/bluetooth/bluetooth.h>
@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
}
static int __hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, unsigned char *data, int size)
unsigned char hdr, unsigned char *data,
int size)
{
struct sk_buff *skb;
@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
return 0;
}
static inline int hidp_send_ctrl_message(struct hidp_session *session,
static int hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, unsigned char *data, int size)
{
int err;
@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session)
mod_timer(&session->timer, jiffies + HZ * session->idle_to);
}
static inline void hidp_del_timer(struct hidp_session *session)
static void hidp_del_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
del_timer(&session->timer);

View File

@ -20,22 +20,8 @@
SOFTWARE IS DISCLAIMED.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/gfp.h>
#include <net/sock.h>
#include "hidp.h"

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,6 @@
/* Bluetooth L2CAP sockets. */
#include <linux/security.h>
#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (err < 0)
goto done;
if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
__le16_to_cpu(la.l2_psm) == 0x0003)
if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
__le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
chan->sec_level = BT_SECURITY_SDP;
bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
return err;
}
static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
{
switch (chan->scid) {
case L2CAP_CID_LE_DATA:
if (mtu < L2CAP_LE_MIN_MTU)
return false;
break;
default:
if (mtu < L2CAP_DEFAULT_MIN_MTU)
return false;
}
return true;
}
static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
if (!l2cap_valid_mtu(chan, opts.imtu)) {
err = -EINVAL;
break;
}
chan->mode = opts.mode;
switch (chan->mode) {
case L2CAP_MODE_BASIC:
@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock)
return err;
}
static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
static void l2cap_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk, *parent = data;
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
l2cap_chan_lock(chan);
__clear_chan_timer(chan);
l2cap_chan_close(chan, ECONNRESET);
l2cap_chan_unlock(chan);
l2cap_sock_kill(sk);
}
}
static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
{
struct sock *sk, *parent = chan->data;
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
return NULL;
}
sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
GFP_ATOMIC);
@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
return l2cap_pi(sk)->chan;
}
static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
struct sock *sk = data;
struct sock *sk = chan->data;
struct l2cap_pinfo *pi = l2cap_pi(sk);
lock_sock(sk);
@ -925,16 +970,57 @@ static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
return err;
}
static void l2cap_sock_close_cb(void *data)
static void l2cap_sock_close_cb(struct l2cap_chan *chan)
{
struct sock *sk = data;
struct sock *sk = chan->data;
l2cap_sock_kill(sk);
}
static void l2cap_sock_state_change_cb(void *data, int state)
static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
{
struct sock *sk = data;
struct sock *sk = chan->data;
struct sock *parent;
lock_sock(sk);
parent = bt_sk(sk)->parent;
sock_set_flag(sk, SOCK_ZAPPED);
switch (chan->state) {
case BT_OPEN:
case BT_BOUND:
case BT_CLOSED:
break;
case BT_LISTEN:
l2cap_sock_cleanup_listen(sk);
sk->sk_state = BT_CLOSED;
chan->state = BT_CLOSED;
break;
default:
sk->sk_state = BT_CLOSED;
chan->state = BT_CLOSED;
sk->sk_err = err;
if (parent) {
bt_accept_unlink(sk);
parent->sk_data_ready(parent, 0);
} else {
sk->sk_state_change(sk);
}
break;
}
release_sock(sk);
}
static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
{
struct sock *sk = chan->data;
sk->sk_state = state;
}
@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
return skb;
}
static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
struct sock *parent;
lock_sock(sk);
parent = bt_sk(sk)->parent;
BT_DBG("sk %p, parent %p", sk, parent);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
if (parent)
parent->sk_data_ready(parent, 0);
release_sock(sk);
}
static struct l2cap_ops l2cap_chan_ops = {
.name = "L2CAP Socket Interface",
.new_connection = l2cap_sock_new_connection_cb,
.recv = l2cap_sock_recv_cb,
.close = l2cap_sock_close_cb,
.teardown = l2cap_sock_teardown_cb,
.state_change = l2cap_sock_state_change_cb,
.ready = l2cap_sock_ready_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
};

View File

@ -26,12 +26,7 @@
#define pr_fmt(fmt) "Bluetooth: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <asm/errno.h>
#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>

View File

@ -24,8 +24,6 @@
/* Bluetooth HCI Management interface */
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <asm/unaligned.h>
@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
}
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
void (*cb)(struct pending_cmd *cmd, void *data),
void (*cb)(struct pending_cmd *cmd,
void *data),
void *data)
{
struct list_head *p, *n;
@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_BUSY);
goto failed;
@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
MGMT_STATUS_BUSY);
goto failed;
@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
scan = 0;
if (test_bit(HCI_ISCAN, &hdev->flags) &&
hdev->discov_timeout > 0)
hdev->discov_timeout > 0)
cancel_delayed_work(&hdev->discov_off);
}
@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
bool changed = false;
if (!!cp->val != test_bit(HCI_LINK_SECURITY,
&hdev->dev_flags)) {
&hdev->dev_flags)) {
change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
changed = true;
}
@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
}
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
u16 len)
{
struct mgmt_cp_remove_uuid *cp = data;
struct pending_cmd *cmd;
@ -1442,7 +1441,7 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
}
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
u16 len)
{
struct mgmt_cp_load_link_keys *cp = data;
u16 key_count, expected_len;
@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
sizeof(struct mgmt_link_key_info);
if (expected_len != len) {
BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
len, expected_len);
len, expected_len);
return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
MGMT_STATUS_INVALID_PARAMS);
}
BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
key_count);
key_count);
hci_dev_lock(hdev);
@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->disconnect) {
if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
&cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
&cp->addr.bdaddr);
&cp->addr.bdaddr);
} else {
conn = NULL;
}
@ -1594,7 +1593,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@ -1813,7 +1813,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name,
hdev->io_capability);
hdev->io_capability);
hci_dev_unlock(hdev);
@ -1821,7 +1821,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
0);
}
static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
static struct pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
struct pending_cmd *cmd;
@ -1873,6 +1873,22 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
pairing_complete(cmd, mgmt_status(status));
}
static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
{
struct pending_cmd *cmd;
BT_DBG("status %u", status);
if (!status)
return;
cmd = find_pairing(conn);
if (!cmd)
BT_DBG("Unable to find a pending command");
else
pairing_complete(cmd, mgmt_status(status));
}
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@ -1911,8 +1927,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
rp.addr.type = cp->addr.type;
if (IS_ERR(conn)) {
int status;
if (PTR_ERR(conn) == -EBUSY)
status = MGMT_STATUS_BUSY;
else
status = MGMT_STATUS_CONNECT_FAILED;
err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
MGMT_STATUS_CONNECT_FAILED, &rp,
status, &rp,
sizeof(rp));
goto unlock;
}
@ -1934,6 +1957,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
/* For LE, just connecting isn't a proof that the pairing finished */
if (cp->addr.type == BDADDR_BREDR)
conn->connect_cfm_cb = pairing_complete_cb;
else
conn->connect_cfm_cb = le_connect_complete_cb;
conn->security_cfm_cb = pairing_complete_cb;
conn->disconn_cfm_cb = pairing_complete_cb;
@ -1941,7 +1966,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->user_data = conn;
if (conn->state == BT_CONNECTED &&
hci_conn_security(conn, sec_level, auth_type))
hci_conn_security(conn, sec_level, auth_type))
pairing_complete(cmd, 0);
err = 0;
@ -2238,7 +2263,7 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
}
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
void *data, u16 len)
{
struct mgmt_cp_remove_remote_oob_data *cp = data;
u8 status;
@ -2407,7 +2432,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
case DISCOVERY_RESOLVING:
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
NAME_PENDING);
NAME_PENDING);
if (!e) {
mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id,
@ -2629,7 +2654,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
sizeof(struct mgmt_ltk_info);
if (expected_len != len) {
BT_ERR("load_keys: expected %u bytes, got %u bytes",
len, expected_len);
len, expected_len);
return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
EINVAL);
}
@ -2754,7 +2779,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
mgmt_handlers[opcode].func == NULL) {
mgmt_handlers[opcode].func == NULL) {
BT_DBG("Unknown op %u", opcode);
err = cmd_status(sk, index, opcode,
MGMT_STATUS_UNKNOWN_COMMAND);
@ -2762,7 +2787,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
if ((hdev && opcode < MGMT_OP_READ_INFO) ||
(!hdev && opcode >= MGMT_OP_READ_INFO)) {
(!hdev && opcode >= MGMT_OP_READ_INFO)) {
err = cmd_status(sk, index, opcode,
MGMT_STATUS_INVALID_INDEX);
goto done;
@ -2771,7 +2796,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
handler = &mgmt_handlers[opcode];
if ((handler->var_len && len < handler->data_len) ||
(!handler->var_len && len != handler->data_len)) {
(!handler->var_len && len != handler->data_len)) {
err = cmd_status(sk, index, opcode,
MGMT_STATUS_INVALID_PARAMS);
goto done;
@ -2955,7 +2980,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, 16);
memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
@ -3090,7 +3115,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_remove(cmd);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
hdev);
return err;
}
@ -3180,7 +3205,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type)
u8 link_type, u8 addr_type)
{
struct mgmt_ev_user_passkey_request ev;
@ -3194,8 +3219,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status,
u8 opcode)
u8 link_type, u8 addr_type, u8 status,
u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
@ -3226,7 +3251,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status)
{
return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
status, MGMT_OP_USER_CONFIRM_NEG_REPLY);
status,
MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@ -3240,7 +3266,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status)
{
return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
status, MGMT_OP_USER_PASSKEY_NEG_REPLY);
status,
MGMT_OP_USER_PASSKEY_NEG_REPLY);
}
int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,

View File

@ -26,22 +26,8 @@
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/net.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s);
#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
static inline void rfcomm_schedule(void)
static void rfcomm_schedule(void)
{
if (!rfcomm_thread)
return;
wake_up_process(rfcomm_thread);
}
static inline void rfcomm_session_put(struct rfcomm_session *s)
static void rfcomm_session_put(struct rfcomm_session *s)
{
if (atomic_dec_and_test(&s->refcnt))
rfcomm_session_del(s);
@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
return err;
}
static inline int rfcomm_check_security(struct rfcomm_dlc *d)
static int rfcomm_check_security(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
/* Send data queued for the DLC.
* Return number of frames left in the queue.
*/
static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
static int rfcomm_process_tx(struct rfcomm_dlc *d)
{
struct sk_buff *skb;
int err;
@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
return skb_queue_len(&d->tx_queue);
}
static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
static void rfcomm_process_dlcs(struct rfcomm_session *s)
{
struct rfcomm_dlc *d;
struct list_head *p, *n;
@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
}
}
static inline void rfcomm_process_rx(struct rfcomm_session *s)
static void rfcomm_process_rx(struct rfcomm_session *s)
{
struct socket *sock = s->sock;
struct sock *sk = sock->sk;
@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
}
}
static inline void rfcomm_accept_connection(struct rfcomm_session *s)
static void rfcomm_accept_connection(struct rfcomm_session *s)
{
struct socket *sock = s->sock, *nsock;
int err;
@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
sock_release(nsock);
}
static inline void rfcomm_check_connection(struct rfcomm_session *s)
static void rfcomm_check_connection(struct rfcomm_session *s)
{
struct sock *sk = s->sock->sk;
@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
}
}
static inline void rfcomm_process_sessions(void)
static void rfcomm_process_sessions(void)
{
struct list_head *p, *n;

View File

@ -25,27 +25,8 @@
* RFCOMM sockets.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/security.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>

View File

@ -31,11 +31,6 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/rfcomm.h>
@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id)
return NULL;
}
static inline struct rfcomm_dev *rfcomm_dev_get(int id)
static struct rfcomm_dev *rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
tty_port_put(&dev->port);
}
static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
{
tty_port_get(&dev->port);
atomic_add(skb->truesize, &dev->wmem_alloc);

View File

@ -25,26 +25,8 @@
/* Bluetooth SCO sockets. */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include <linux/security.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
return conn;
}
static inline struct sock *sco_chan_get(struct sco_conn *conn)
static struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
return 0;
}
static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
struct sock *parent)
{
int err = 0;
@ -228,7 +211,7 @@ static int sco_connect(struct sock *sk)
return err;
}
static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
return len;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
sk->sk_lingertime);
}
release_sock(sk);
return err;
@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn)
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
BTPROTO_SCO, GFP_ATOMIC);
BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
@ -907,7 +890,7 @@ static void sco_conn_ready(struct sco_conn *conn)
/* ----- SCO interface with lower layer (HCI) ----- */
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
register struct sock *sk;
struct sock *sk;
struct hlist_node *node;
int lm = 0;
@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
break;
}
@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
sk_for_each(sk, node, &sco_sk_list.head) {
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst), sk->sk_state);
batostr(&bt_sk(sk)->dst), sk->sk_state);
}
read_unlock(&sco_sk_list.lock);
@ -1044,8 +1027,8 @@ int __init sco_init(void)
}
if (bt_debugfs) {
sco_debugfs = debugfs_create_file("sco", 0444,
bt_debugfs, NULL, &sco_debugfs_fops);
sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
NULL, &sco_debugfs_fops);
if (!sco_debugfs)
BT_ERR("Failed to create SCO debug file");
}

View File

@ -20,14 +20,15 @@
SOFTWARE IS DISCLAIMED.
*/
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/b128ops.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/b128ops.h>
#define SMP_TIMEOUT msecs_to_jiffies(30000)
@ -648,7 +649,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability);
ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
if (ret)
return SMP_UNSPECIFIED;
@ -703,7 +704,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
{
struct smp_ltk *key;
struct hci_conn *hcon = conn->hcon;
@ -712,6 +713,9 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
if (!key)
return 0;
if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
return 0;
if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
return 1;
@ -732,7 +736,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
if (smp_ltk_encrypt(conn))
if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
return 0;
if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
@ -771,7 +775,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
return 1;
if (hcon->link_mode & HCI_LM_MASTER)
if (smp_ltk_encrypt(conn))
if (smp_ltk_encrypt(conn, sec_level))
goto done;
if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))

View File

@ -2097,6 +2097,9 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
int i, ret;
if (!ieee80211_sdata_running(sdata))
return -ENETDOWN;
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
ret = drv_set_bitrate_mask(local, sdata, mask);
if (ret)

View File

@ -1337,6 +1337,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->associated))
return;
ieee80211_stop_poll(sdata);
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
ifmgd->associated = NULL;
@ -2592,8 +2594,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[DEAUTH_DISASSOC_LEN];
ieee80211_stop_poll(sdata);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
false, frame_buf);
mutex_unlock(&ifmgd->mtx);

View File

@ -271,6 +271,9 @@ struct sta_ampdu_mlme {
* @plink_timer: peer link watch timer
* @plink_timer_was_running: used by suspend/resume to restore timers
* @t_offset: timing offset relative to this host
* @t_offset_setpoint: reference timing offset of this sta to be used when
* calculating clockdrift
* @ch_type: peer's channel type
* @debugfs: debug filesystem info
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
@ -278,6 +281,8 @@ struct sta_ampdu_mlme {
* @sta: station information we share with the driver
* @sta_state: duplicates information about station state (for debug)
* @beacon_loss_count: number of times beacon loss has triggered
* @supports_40mhz: tracks whether the station advertised 40 MHz support
* as we overwrite its HT parameters with the currently used value
*/
struct sta_info {
/* General information, mostly static */

View File

@ -1389,7 +1389,7 @@ static void reg_set_request_processed(void)
spin_unlock(&reg_requests_lock);
if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
cancel_delayed_work_sync(&reg_timeout);
cancel_delayed_work(&reg_timeout);
if (need_more_processing)
schedule_work(&reg_work);

View File

@ -804,7 +804,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
ntype == NL80211_IFTYPE_P2P_CLIENT))
return -EBUSY;
if (ntype != otype) {
if (ntype != otype && netif_running(dev)) {
err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
ntype);
if (err)