2013-06-13 00:52:10 +07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
|
|
|
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/spinlock.h>
|
2013-11-20 15:00:49 +07:00
|
|
|
#include <linux/bitops.h>
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
#include "core.h"
|
|
|
|
#include "debug.h"
|
|
|
|
|
|
|
|
#include "targaddrs.h"
|
|
|
|
#include "bmi.h"
|
|
|
|
|
|
|
|
#include "hif.h"
|
|
|
|
#include "htc.h"
|
|
|
|
|
|
|
|
#include "ce.h"
|
|
|
|
#include "pci.h"
|
|
|
|
|
2014-03-28 14:32:27 +07:00
|
|
|
enum ath10k_pci_reset_mode {
|
|
|
|
ATH10K_PCI_RESET_AUTO = 0,
|
|
|
|
ATH10K_PCI_RESET_WARM_ONLY = 1,
|
|
|
|
};
|
|
|
|
|
2013-11-25 20:06:27 +07:00
|
|
|
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
|
2014-03-28 14:32:27 +07:00
|
|
|
static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
|
2013-11-25 20:06:27 +07:00
|
|
|
|
|
|
|
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
|
|
|
|
|
2014-03-28 14:32:27 +07:00
|
|
|
module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
|
|
|
|
|
2014-03-28 14:32:21 +07:00
|
|
|
/* how long wait to wait for target to initialise, in ms */
|
|
|
|
#define ATH10K_PCI_TARGET_WAIT 3000
|
2014-05-14 20:56:16 +07:00
|
|
|
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
|
2014-03-28 14:32:21 +07:00
|
|
|
|
2014-08-08 20:56:03 +07:00
|
|
|
static const struct pci_device_id ath10k_pci_id_table[] = {
|
2013-06-13 00:52:10 +07:00
|
|
|
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
|
2015-08-13 19:32:26 +07:00
|
|
|
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
|
2015-01-24 17:14:49 +07:00
|
|
|
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
|
2015-07-29 15:40:39 +07:00
|
|
|
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
|
2016-05-24 03:12:45 +07:00
|
|
|
{ PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
|
2015-10-28 21:09:53 +07:00
|
|
|
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
|
2016-06-02 21:59:49 +07:00
|
|
|
{ PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
|
2013-06-13 00:52:10 +07:00
|
|
|
{0}
|
|
|
|
};
|
|
|
|
|
2014-12-02 15:55:54 +07:00
|
|
|
static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
|
|
|
|
/* QCA988X pre 2.0 chips are not supported because they need some nasty
|
|
|
|
* hacks. ath10k doesn't have them and these devices crash horribly
|
|
|
|
* because of that.
|
|
|
|
*/
|
|
|
|
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
|
2015-08-13 19:32:26 +07:00
|
|
|
|
|
|
|
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
|
|
|
|
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
|
|
|
|
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
|
|
|
|
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
|
|
|
|
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
|
|
|
|
|
2015-01-24 17:14:49 +07:00
|
|
|
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
|
|
|
|
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
|
|
|
|
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
|
|
|
|
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
|
|
|
|
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
|
2015-08-13 19:32:26 +07:00
|
|
|
|
2015-07-29 15:40:39 +07:00
|
|
|
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
|
2015-11-05 15:50:43 +07:00
|
|
|
|
2016-05-24 03:12:45 +07:00
|
|
|
{ QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
|
|
|
|
|
2015-11-05 15:50:43 +07:00
|
|
|
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
|
2015-11-05 15:50:40 +07:00
|
|
|
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
|
2016-05-24 03:12:45 +07:00
|
|
|
|
2016-06-02 21:59:49 +07:00
|
|
|
{ QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
|
2014-12-02 15:55:54 +07:00
|
|
|
};
|
|
|
|
|
2014-08-22 19:33:14 +07:00
|
|
|
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
|
2014-02-10 23:14:22 +07:00
|
|
|
static int ath10k_pci_cold_reset(struct ath10k *ar);
|
2015-06-18 14:01:06 +07:00
|
|
|
static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
|
2013-11-25 20:06:21 +07:00
|
|
|
static int ath10k_pci_init_irq(struct ath10k *ar);
|
|
|
|
static int ath10k_pci_deinit_irq(struct ath10k *ar);
|
|
|
|
static int ath10k_pci_request_irq(struct ath10k *ar);
|
|
|
|
static void ath10k_pci_free_irq(struct ath10k *ar);
|
2013-11-25 20:06:22 +07:00
|
|
|
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
|
|
|
|
struct ath10k_ce_pipe *rx_pipe,
|
|
|
|
struct bmi_xfer *xfer);
|
2015-06-18 14:01:06 +07:00
|
|
|
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
|
2015-10-12 19:57:01 +07:00
|
|
|
static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
|
2015-10-12 19:57:02 +07:00
|
|
|
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
|
2015-10-12 19:57:04 +07:00
|
|
|
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
|
|
|
|
static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
|
2015-11-11 19:01:26 +07:00
|
|
|
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
|
2015-10-30 16:27:58 +07:00
|
|
|
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2015-11-06 09:44:27 +07:00
|
|
|
static struct ce_attr host_ce_config_wlan[] = {
|
2013-09-01 14:01:32 +07:00
|
|
|
/* CE0: host->target HTC control and raw streams */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 16,
|
|
|
|
.src_sz_max = 256,
|
|
|
|
.dest_nentries = 0,
|
2015-10-12 19:57:01 +07:00
|
|
|
.send_cb = ath10k_pci_htc_tx_cb,
|
2013-09-01 14:01:32 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE1: target->host HTT + HTC control */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 0,
|
2015-02-09 21:04:55 +07:00
|
|
|
.src_sz_max = 2048,
|
2013-09-01 14:01:32 +07:00
|
|
|
.dest_nentries = 512,
|
2015-11-11 19:01:26 +07:00
|
|
|
.recv_cb = ath10k_pci_htt_htc_rx_cb,
|
2013-09-01 14:01:32 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE2: target->host WMI */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 0,
|
|
|
|
.src_sz_max = 2048,
|
2015-03-04 20:43:44 +07:00
|
|
|
.dest_nentries = 128,
|
2015-10-12 19:57:02 +07:00
|
|
|
.recv_cb = ath10k_pci_htc_rx_cb,
|
2013-09-01 14:01:32 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE3: host->target WMI */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 32,
|
|
|
|
.src_sz_max = 2048,
|
|
|
|
.dest_nentries = 0,
|
2015-10-12 19:57:01 +07:00
|
|
|
.send_cb = ath10k_pci_htc_tx_cb,
|
2013-09-01 14:01:32 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE4: host->target HTT */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
|
|
|
|
.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
|
|
|
|
.src_sz_max = 256,
|
|
|
|
.dest_nentries = 0,
|
2015-10-12 19:57:04 +07:00
|
|
|
.send_cb = ath10k_pci_htt_tx_cb,
|
2013-09-01 14:01:32 +07:00
|
|
|
},
|
|
|
|
|
2015-10-12 19:57:04 +07:00
|
|
|
/* CE5: target->host HTT (HIF->HTT) */
|
2013-09-01 14:01:32 +07:00
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 0,
|
2015-10-12 19:57:04 +07:00
|
|
|
.src_sz_max = 512,
|
|
|
|
.dest_nentries = 512,
|
|
|
|
.recv_cb = ath10k_pci_htt_rx_cb,
|
2013-09-01 14:01:32 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE6: target autonomous hif_memcpy */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 0,
|
|
|
|
.src_sz_max = 0,
|
|
|
|
.dest_nentries = 0,
|
|
|
|
},
|
|
|
|
|
|
|
|
/* CE7: ce_diag, the Diagnostic Window */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 2,
|
|
|
|
.src_sz_max = DIAG_TRANSFER_LIMIT,
|
|
|
|
.dest_nentries = 2,
|
|
|
|
},
|
2015-06-18 14:01:04 +07:00
|
|
|
|
|
|
|
/* CE8: target->host pktlog */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 0,
|
|
|
|
.src_sz_max = 2048,
|
|
|
|
.dest_nentries = 128,
|
2015-10-30 16:27:58 +07:00
|
|
|
.recv_cb = ath10k_pci_pktlog_rx_cb,
|
2015-06-18 14:01:04 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE9 target autonomous qcache memcpy */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 0,
|
|
|
|
.src_sz_max = 0,
|
|
|
|
.dest_nentries = 0,
|
|
|
|
},
|
|
|
|
|
|
|
|
/* CE10: target autonomous hif memcpy */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 0,
|
|
|
|
.src_sz_max = 0,
|
|
|
|
.dest_nentries = 0,
|
|
|
|
},
|
|
|
|
|
|
|
|
/* CE11: target autonomous hif memcpy */
|
|
|
|
{
|
|
|
|
.flags = CE_ATTR_FLAGS,
|
|
|
|
.src_nentries = 0,
|
|
|
|
.src_sz_max = 0,
|
|
|
|
.dest_nentries = 0,
|
|
|
|
},
|
2013-06-13 00:52:10 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Target firmware's Copy Engine configuration. */
|
2015-11-06 09:44:27 +07:00
|
|
|
static struct ce_pipe_config target_ce_config_wlan[] = {
|
2013-09-01 14:01:39 +07:00
|
|
|
/* CE0: host->target HTC control and raw streams */
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
.pipenum = __cpu_to_le32(0),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
|
|
.nentries = __cpu_to_le32(32),
|
|
|
|
.nbytes_max = __cpu_to_le32(256),
|
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
2013-09-01 14:01:39 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE1: target->host HTT + HTC control */
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
.pipenum = __cpu_to_le32(1),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_IN),
|
|
|
|
.nentries = __cpu_to_le32(32),
|
2015-02-09 21:04:55 +07:00
|
|
|
.nbytes_max = __cpu_to_le32(2048),
|
2014-08-26 23:14:03 +07:00
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
2013-09-01 14:01:39 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE2: target->host WMI */
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
.pipenum = __cpu_to_le32(2),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_IN),
|
2015-03-04 20:43:44 +07:00
|
|
|
.nentries = __cpu_to_le32(64),
|
2014-08-26 23:14:03 +07:00
|
|
|
.nbytes_max = __cpu_to_le32(2048),
|
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
2013-09-01 14:01:39 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE3: host->target WMI */
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
.pipenum = __cpu_to_le32(3),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
|
|
.nentries = __cpu_to_le32(32),
|
|
|
|
.nbytes_max = __cpu_to_le32(2048),
|
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
2013-09-01 14:01:39 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE4: host->target HTT */
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
.pipenum = __cpu_to_le32(4),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
|
|
.nentries = __cpu_to_le32(256),
|
|
|
|
.nbytes_max = __cpu_to_le32(256),
|
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
2013-09-01 14:01:39 +07:00
|
|
|
},
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
/* NB: 50% of src nentries, since tx has 2 frags */
|
2013-09-01 14:01:39 +07:00
|
|
|
|
2015-10-12 19:57:04 +07:00
|
|
|
/* CE5: target->host HTT (HIF->HTT) */
|
2013-09-01 14:01:39 +07:00
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
.pipenum = __cpu_to_le32(5),
|
2015-10-12 19:57:04 +07:00
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_IN),
|
2014-08-26 23:14:03 +07:00
|
|
|
.nentries = __cpu_to_le32(32),
|
2015-10-12 19:57:04 +07:00
|
|
|
.nbytes_max = __cpu_to_le32(512),
|
2014-08-26 23:14:03 +07:00
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
2013-09-01 14:01:39 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* CE6: Reserved for target autonomous hif_memcpy */
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
.pipenum = __cpu_to_le32(6),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
|
|
|
|
.nentries = __cpu_to_le32(32),
|
|
|
|
.nbytes_max = __cpu_to_le32(4096),
|
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
2013-09-01 14:01:39 +07:00
|
|
|
},
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
/* CE7 used only by Host */
|
2015-06-18 14:01:04 +07:00
|
|
|
{
|
|
|
|
.pipenum = __cpu_to_le32(7),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
|
|
|
|
.nentries = __cpu_to_le32(0),
|
|
|
|
.nbytes_max = __cpu_to_le32(0),
|
|
|
|
.flags = __cpu_to_le32(0),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
|
|
|
},
|
|
|
|
|
|
|
|
/* CE8 target->host packtlog */
|
|
|
|
{
|
|
|
|
.pipenum = __cpu_to_le32(8),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_IN),
|
|
|
|
.nentries = __cpu_to_le32(64),
|
|
|
|
.nbytes_max = __cpu_to_le32(2048),
|
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
|
|
|
},
|
|
|
|
|
|
|
|
/* CE9 target autonomous qcache memcpy */
|
|
|
|
{
|
|
|
|
.pipenum = __cpu_to_le32(9),
|
|
|
|
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
|
|
|
|
.nentries = __cpu_to_le32(32),
|
|
|
|
.nbytes_max = __cpu_to_le32(2048),
|
|
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
|
|
|
|
.reserved = __cpu_to_le32(0),
|
|
|
|
},
|
|
|
|
|
|
|
|
/* It not necessary to send target wlan configuration for CE10 & CE11
|
|
|
|
* as these CEs are not actively used in target.
|
|
|
|
*/
|
2013-06-13 00:52:10 +07:00
|
|
|
};
|
|
|
|
|
2014-08-26 23:14:02 +07:00
|
|
|
/*
|
|
|
|
* Map from service/endpoint to Copy Engine.
|
|
|
|
* This table is derived from the CE_PCI TABLE, above.
|
|
|
|
* It is passed to the Target at startup for use by firmware.
|
|
|
|
*/
|
2015-11-06 09:44:27 +07:00
|
|
|
static struct service_to_pipe target_service_to_ce_map_wlan[] = {
|
2014-08-26 23:14:02 +07:00
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
|
|
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
|
|
__cpu_to_le32(3),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
|
|
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
|
|
__cpu_to_le32(2),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
|
|
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
|
|
__cpu_to_le32(3),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
|
|
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
|
|
__cpu_to_le32(2),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
|
|
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
|
|
__cpu_to_le32(3),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
|
|
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
|
|
__cpu_to_le32(2),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
|
|
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
|
|
__cpu_to_le32(3),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
|
|
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
|
|
__cpu_to_le32(2),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
|
|
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
|
|
__cpu_to_le32(3),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
|
|
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
|
|
__cpu_to_le32(2),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
|
|
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
|
|
__cpu_to_le32(0),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
|
|
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
|
|
__cpu_to_le32(1),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
2014-08-26 23:14:03 +07:00
|
|
|
{ /* not used */
|
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
|
|
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
|
|
__cpu_to_le32(0),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
2014-08-26 23:14:03 +07:00
|
|
|
{ /* not used */
|
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
|
|
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
|
|
__cpu_to_le32(1),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
|
|
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
|
|
__cpu_to_le32(4),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
|
|
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
2015-10-12 19:57:04 +07:00
|
|
|
__cpu_to_le32(5),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
|
|
|
|
/* (Additions here) */
|
|
|
|
|
2014-08-26 23:14:03 +07:00
|
|
|
{ /* must be last */
|
|
|
|
__cpu_to_le32(0),
|
|
|
|
__cpu_to_le32(0),
|
|
|
|
__cpu_to_le32(0),
|
2014-08-26 23:14:02 +07:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
static bool ath10k_pci_is_awake(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
|
|
|
|
RTC_STATE_ADDRESS);
|
|
|
|
|
|
|
|
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __ath10k_pci_wake(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
lockdep_assert_held(&ar_pci->ps_lock);
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
|
|
|
|
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
|
|
|
|
|
|
|
|
iowrite32(PCIE_SOC_WAKE_V_MASK,
|
|
|
|
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
|
|
|
|
PCIE_SOC_WAKE_ADDRESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __ath10k_pci_sleep(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
lockdep_assert_held(&ar_pci->ps_lock);
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
|
|
|
|
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
|
|
|
|
|
|
|
|
iowrite32(PCIE_SOC_WAKE_RESET,
|
|
|
|
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
|
|
|
|
PCIE_SOC_WAKE_ADDRESS);
|
|
|
|
ar_pci->ps_awake = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ath10k_pci_wake_wait(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
int tot_delay = 0;
|
|
|
|
int curr_delay = 5;
|
|
|
|
|
|
|
|
while (tot_delay < PCIE_WAKE_TIMEOUT) {
|
2015-10-06 19:19:28 +07:00
|
|
|
if (ath10k_pci_is_awake(ar)) {
|
|
|
|
if (tot_delay > PCIE_WAKE_LATE_US)
|
|
|
|
ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
|
|
|
|
tot_delay / 1000);
|
2015-05-18 16:38:18 +07:00
|
|
|
return 0;
|
2015-10-06 19:19:28 +07:00
|
|
|
}
|
2015-05-18 16:38:18 +07:00
|
|
|
|
|
|
|
udelay(curr_delay);
|
|
|
|
tot_delay += curr_delay;
|
|
|
|
|
|
|
|
if (curr_delay < 50)
|
|
|
|
curr_delay += 5;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
2015-10-16 19:54:51 +07:00
|
|
|
static int ath10k_pci_force_wake(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
2015-12-16 21:52:19 +07:00
|
|
|
if (ar_pci->pci_ps)
|
|
|
|
return ret;
|
|
|
|
|
2015-10-16 19:54:51 +07:00
|
|
|
spin_lock_irqsave(&ar_pci->ps_lock, flags);
|
|
|
|
|
|
|
|
if (!ar_pci->ps_awake) {
|
|
|
|
iowrite32(PCIE_SOC_WAKE_V_MASK,
|
|
|
|
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
|
|
|
|
PCIE_SOC_WAKE_ADDRESS);
|
|
|
|
|
|
|
|
ret = ath10k_pci_wake_wait(ar);
|
|
|
|
if (ret == 0)
|
|
|
|
ar_pci->ps_awake = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_force_sleep(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ar_pci->ps_lock, flags);
|
|
|
|
|
|
|
|
iowrite32(PCIE_SOC_WAKE_RESET,
|
|
|
|
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
|
|
|
|
PCIE_SOC_WAKE_ADDRESS);
|
|
|
|
ar_pci->ps_awake = false;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
|
|
|
|
}
|
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
static int ath10k_pci_wake(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
2015-10-16 19:54:51 +07:00
|
|
|
if (ar_pci->pci_ps == 0)
|
|
|
|
return ret;
|
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
spin_lock_irqsave(&ar_pci->ps_lock, flags);
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
|
|
|
|
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
|
|
|
|
|
|
|
|
/* This function can be called very frequently. To avoid excessive
|
|
|
|
* CPU stalls for MMIO reads use a cache var to hold the device state.
|
|
|
|
*/
|
|
|
|
if (!ar_pci->ps_awake) {
|
|
|
|
__ath10k_pci_wake(ar);
|
|
|
|
|
|
|
|
ret = ath10k_pci_wake_wait(ar);
|
|
|
|
if (ret == 0)
|
|
|
|
ar_pci->ps_awake = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
ar_pci->ps_wake_refcount++;
|
|
|
|
WARN_ON(ar_pci->ps_wake_refcount == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_sleep(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
unsigned long flags;
|
|
|
|
|
2015-10-16 19:54:51 +07:00
|
|
|
if (ar_pci->pci_ps == 0)
|
|
|
|
return;
|
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
spin_lock_irqsave(&ar_pci->ps_lock, flags);
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
|
|
|
|
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
|
|
|
|
|
|
|
|
if (WARN_ON(ar_pci->ps_wake_refcount == 0))
|
|
|
|
goto skip;
|
|
|
|
|
|
|
|
ar_pci->ps_wake_refcount--;
|
|
|
|
|
|
|
|
mod_timer(&ar_pci->ps_timer, jiffies +
|
|
|
|
msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
|
|
|
|
|
|
|
|
skip:
|
|
|
|
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_ps_timer(unsigned long ptr)
|
|
|
|
{
|
|
|
|
struct ath10k *ar = (void *)ptr;
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ar_pci->ps_lock, flags);
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
|
|
|
|
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
|
|
|
|
|
|
|
|
if (ar_pci->ps_wake_refcount > 0)
|
|
|
|
goto skip;
|
|
|
|
|
|
|
|
__ath10k_pci_sleep(ar);
|
|
|
|
|
|
|
|
skip:
|
|
|
|
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_sleep_sync(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
unsigned long flags;
|
|
|
|
|
2015-10-16 19:54:51 +07:00
|
|
|
if (ar_pci->pci_ps == 0) {
|
|
|
|
ath10k_pci_force_sleep(ar);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
del_timer_sync(&ar_pci->ps_timer);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ar_pci->ps_lock, flags);
|
|
|
|
WARN_ON(ar_pci->ps_wake_refcount > 0);
|
|
|
|
__ath10k_pci_sleep(ar);
|
|
|
|
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:23 +07:00
|
|
|
static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
|
2015-05-18 16:38:18 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int ret;
|
|
|
|
|
2015-06-15 18:46:42 +07:00
|
|
|
if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
|
|
|
|
ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
|
|
|
|
offset, offset + sizeof(value), ar_pci->mem_len);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
ret = ath10k_pci_wake(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
|
|
|
|
value, offset, ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
iowrite32(value, ar_pci->mem + offset);
|
|
|
|
ath10k_pci_sleep(ar);
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:23 +07:00
|
|
|
static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
|
2015-05-18 16:38:18 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
u32 val;
|
|
|
|
int ret;
|
|
|
|
|
2015-06-15 18:46:42 +07:00
|
|
|
if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
|
|
|
|
ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
|
|
|
|
offset, offset + sizeof(val), ar_pci->mem_len);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
ret = ath10k_pci_wake(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
|
|
|
|
offset, ret);
|
|
|
|
return 0xffffffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
val = ioread32(ar_pci->mem + offset);
|
|
|
|
ath10k_pci_sleep(ar);
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:23 +07:00
|
|
|
inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
ar_pci->bus_ops->write32(ar, offset, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
return ar_pci->bus_ops->read32(ar, offset);
|
|
|
|
}
|
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
|
|
|
|
{
|
|
|
|
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
|
|
|
|
{
|
|
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
|
|
|
|
{
|
|
|
|
return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
|
|
|
|
{
|
|
|
|
ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
bool ath10k_pci_irq_pending(struct ath10k *ar)
|
2013-11-25 20:06:20 +07:00
|
|
|
{
|
|
|
|
u32 cause;
|
|
|
|
|
|
|
|
/* Check if the shared legacy irq is for us */
|
|
|
|
cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
PCIE_INTR_CAUSE_ADDRESS);
|
|
|
|
if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
|
2013-11-25 20:06:25 +07:00
|
|
|
{
|
|
|
|
/* IMPORTANT: INTR_CLR register has to be set after
|
|
|
|
* INTR_ENABLE is set to 0, otherwise interrupt can not be
|
|
|
|
* really cleared. */
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
|
|
|
|
0);
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
|
|
|
|
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
|
|
|
|
|
|
|
|
/* IMPORTANT: this extra read transaction is required to
|
|
|
|
* flush the posted write buffer. */
|
2014-09-14 16:50:23 +07:00
|
|
|
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
PCIE_INTR_ENABLE_ADDRESS);
|
2013-11-25 20:06:25 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
|
2013-11-25 20:06:25 +07:00
|
|
|
{
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
PCIE_INTR_ENABLE_ADDRESS,
|
|
|
|
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
|
|
|
|
|
|
|
|
/* IMPORTANT: this extra read transaction is required to
|
|
|
|
* flush the posted write buffer. */
|
2014-09-14 16:50:23 +07:00
|
|
|
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
PCIE_INTR_ENABLE_ADDRESS);
|
2013-11-25 20:06:25 +07:00
|
|
|
}
|
|
|
|
|
2014-08-22 19:23:31 +07:00
|
|
|
static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
|
2013-11-25 20:06:26 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
2016-04-07 13:40:58 +07:00
|
|
|
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
|
2014-08-22 19:23:31 +07:00
|
|
|
return "msi";
|
2014-09-14 16:50:33 +07:00
|
|
|
|
|
|
|
return "legacy";
|
2013-11-25 20:06:26 +07:00
|
|
|
}
|
|
|
|
|
2014-08-22 19:33:14 +07:00
|
|
|
static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
|
2013-11-25 20:06:26 +07:00
|
|
|
{
|
2014-08-22 19:33:14 +07:00
|
|
|
struct ath10k *ar = pipe->hif_ce_state;
|
2013-11-25 20:06:26 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2014-08-22 19:33:14 +07:00
|
|
|
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
dma_addr_t paddr;
|
2013-11-25 20:06:26 +07:00
|
|
|
int ret;
|
|
|
|
|
2014-08-22 19:33:14 +07:00
|
|
|
skb = dev_alloc_skb(pipe->buf_sz);
|
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
|
|
|
|
|
|
|
|
paddr = dma_map_single(ar->dev, skb->data,
|
|
|
|
skb->len + skb_tailroom(skb),
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (unlikely(dma_mapping_error(ar->dev, paddr))) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "failed to dma map pci rx buf\n");
|
2014-08-22 19:33:14 +07:00
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2015-01-24 17:14:47 +07:00
|
|
|
ATH10K_SKB_RXCB(skb)->paddr = paddr;
|
2014-08-22 19:33:14 +07:00
|
|
|
|
2015-10-06 19:19:33 +07:00
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
2014-08-22 19:33:14 +07:00
|
|
|
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
|
2015-10-06 19:19:33 +07:00
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
2013-11-25 20:06:26 +07:00
|
|
|
if (ret) {
|
2014-08-22 19:33:14 +07:00
|
|
|
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
dev_kfree_skb_any(skb);
|
2013-11-25 20:06:26 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-06 19:19:33 +07:00
|
|
|
static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
|
2013-11-25 20:06:26 +07:00
|
|
|
{
|
2014-08-22 19:33:14 +07:00
|
|
|
struct ath10k *ar = pipe->hif_ce_state;
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
|
|
|
|
int ret, num;
|
|
|
|
|
|
|
|
if (pipe->buf_sz == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!ce_pipe->dest_ring)
|
|
|
|
return;
|
|
|
|
|
2015-10-06 19:19:33 +07:00
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
2014-08-22 19:33:14 +07:00
|
|
|
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
|
2015-10-06 19:19:33 +07:00
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
2016-03-22 18:52:18 +07:00
|
|
|
|
|
|
|
while (num >= 0) {
|
2014-08-22 19:33:14 +07:00
|
|
|
ret = __ath10k_pci_rx_post_buf(pipe);
|
|
|
|
if (ret) {
|
2015-10-06 19:19:33 +07:00
|
|
|
if (ret == -ENOSPC)
|
|
|
|
break;
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
|
2014-08-22 19:33:14 +07:00
|
|
|
mod_timer(&ar_pci->rx_post_retry, jiffies +
|
|
|
|
ATH10K_PCI_RX_POST_RETRY_MS);
|
|
|
|
break;
|
|
|
|
}
|
2016-03-22 18:52:18 +07:00
|
|
|
num--;
|
2014-08-22 19:33:14 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_rx_post(struct ath10k *ar)
|
2014-08-22 19:33:14 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < CE_COUNT; i++)
|
2015-10-06 19:19:33 +07:00
|
|
|
ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
|
2014-08-22 19:33:14 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_rx_replenish_retry(unsigned long ptr)
|
2014-08-22 19:33:14 +07:00
|
|
|
{
|
|
|
|
struct ath10k *ar = (void *)ptr;
|
|
|
|
|
|
|
|
ath10k_pci_rx_post(ar);
|
2013-11-25 20:06:26 +07:00
|
|
|
}
|
|
|
|
|
2015-06-18 14:01:05 +07:00
|
|
|
static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
|
|
|
|
{
|
|
|
|
u32 val = 0;
|
|
|
|
|
|
|
|
switch (ar->hw_rev) {
|
|
|
|
case ATH10K_HW_QCA988X:
|
2016-06-02 21:59:49 +07:00
|
|
|
case ATH10K_HW_QCA9887:
|
2015-06-18 14:01:05 +07:00
|
|
|
case ATH10K_HW_QCA6174:
|
2015-10-28 21:09:53 +07:00
|
|
|
case ATH10K_HW_QCA9377:
|
2015-06-18 14:01:05 +07:00
|
|
|
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
CORE_CTRL_ADDRESS) &
|
2015-07-03 20:55:27 +07:00
|
|
|
0x7ff) << 21;
|
2015-06-18 14:01:05 +07:00
|
|
|
break;
|
|
|
|
case ATH10K_HW_QCA99X0:
|
2016-05-24 03:12:45 +07:00
|
|
|
case ATH10K_HW_QCA9984:
|
ath10k: add basic skeleton to support ahb
qca4019 uses ahb instead of pci where it slightly differs in device
enumeration, clock control, reset control, etc. Good thing is that
ahb also uses copy engine for the data transaction. So, the most of
the stuff implemented in pci.c/ce.c are reusable in ahb case too.
Device enumeration in ahb case comes through platform driver/device
model. All resource details like irq, memory map, clocks, etc for
qca4019 can be fetched from of_node of platform device.
Simply flow would look like,
device tree => platform device (kernel) => platform driver (ath10k)
Device tree entry will have all qca4019 resource details and the same
info will be passed to kernel. Kernel will prepare new platform device
for that entry and expose DT info to of_node in platform device.
Later, ath10k would register platform driver with unique compatible name
and then kernels binds to corresponding compatible entry & calls ath10k
ahb probe functions. From there onwards, ath10k will take control of it
and move forward.
New bool flag CONFIG_ATH10K_AHB is added in Kconfig to conditionally
enable ahb support in ath10k. On enabling this flag, ath10k_pci.ko
will have ahb support. This patch adds only basic skeleton and few
macros to support ahb in the context of qca4019.
Signed-off-by: Raja Mani <rmani@qti.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
2016-01-27 16:54:25 +07:00
|
|
|
case ATH10K_HW_QCA4019:
|
2015-06-18 14:01:05 +07:00
|
|
|
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
val |= 0x100000 | (addr & 0xfffff);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
/*
|
|
|
|
* Diagnostic read/write access is provided for startup/config/debug usage.
|
|
|
|
* Caller must guarantee proper alignment, when applicable, and single user
|
|
|
|
* at any moment.
|
|
|
|
*/
|
|
|
|
static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
|
|
|
|
int nbytes)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int ret = 0;
|
2016-03-22 18:52:17 +07:00
|
|
|
u32 *buf;
|
2016-05-28 15:25:40 +07:00
|
|
|
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
|
2013-08-27 18:08:02 +07:00
|
|
|
struct ath10k_ce_pipe *ce_diag;
|
2013-06-13 00:52:10 +07:00
|
|
|
/* Host buffer address in CE space */
|
|
|
|
u32 ce_data;
|
|
|
|
dma_addr_t ce_data_base = 0;
|
|
|
|
void *data_buf = NULL;
|
|
|
|
int i;
|
|
|
|
|
2014-09-24 18:16:52 +07:00
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
ce_diag = ar_pci->ce_diag;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a temporary bounce buffer to hold caller's data
|
|
|
|
* to be DMA'ed from Target. This guarantees
|
|
|
|
* 1) 4-byte alignment
|
|
|
|
* 2) Buffer in DMA-able space
|
|
|
|
*/
|
2016-05-28 15:25:40 +07:00
|
|
|
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
|
|
|
|
|
2014-03-28 15:02:35 +07:00
|
|
|
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
|
2016-05-28 15:25:40 +07:00
|
|
|
alloc_nbytes,
|
2014-03-28 15:02:35 +07:00
|
|
|
&ce_data_base,
|
|
|
|
GFP_ATOMIC);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
if (!data_buf) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto done;
|
|
|
|
}
|
2016-05-28 15:25:40 +07:00
|
|
|
memset(data_buf, 0, alloc_nbytes);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2016-05-28 15:25:40 +07:00
|
|
|
remaining_bytes = nbytes;
|
2013-06-13 00:52:10 +07:00
|
|
|
ce_data = ce_data_base;
|
|
|
|
while (remaining_bytes) {
|
|
|
|
nbytes = min_t(unsigned int, remaining_bytes,
|
|
|
|
DIAG_TRANSFER_LIMIT);
|
|
|
|
|
2016-03-22 18:52:17 +07:00
|
|
|
ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* Request CE to send from Target(!) address to Host buffer */
|
|
|
|
/*
|
|
|
|
* The address supplied by the caller is in the
|
|
|
|
* Target CPU virtual address space.
|
|
|
|
*
|
|
|
|
* In order to use this address with the diagnostic CE,
|
|
|
|
* convert it from Target CPU virtual address space
|
|
|
|
* to CE address space
|
|
|
|
*/
|
2015-06-18 14:01:05 +07:00
|
|
|
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-09-24 18:16:52 +07:00
|
|
|
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
|
|
|
|
0);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
i = 0;
|
2015-10-23 19:31:05 +07:00
|
|
|
while (ath10k_ce_completed_send_next_nolock(ce_diag,
|
|
|
|
NULL) != 0) {
|
2013-06-13 00:52:10 +07:00
|
|
|
mdelay(1);
|
|
|
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i = 0;
|
2016-03-22 18:52:17 +07:00
|
|
|
while (ath10k_ce_completed_recv_next_nolock(ce_diag,
|
|
|
|
(void **)&buf,
|
|
|
|
&completed_nbytes)
|
|
|
|
!= 0) {
|
2013-06-13 00:52:10 +07:00
|
|
|
mdelay(1);
|
|
|
|
|
|
|
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nbytes != completed_nbytes) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2016-03-22 18:52:17 +07:00
|
|
|
if (*buf != ce_data) {
|
2013-06-13 00:52:10 +07:00
|
|
|
ret = -EIO;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
remaining_bytes -= nbytes;
|
2016-05-28 15:25:40 +07:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
|
|
|
|
address, ret);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
memcpy(data, data_buf, nbytes);
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
address += nbytes;
|
2016-05-28 15:25:40 +07:00
|
|
|
data += nbytes;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
|
|
|
if (data_buf)
|
2016-05-28 15:25:40 +07:00
|
|
|
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
|
2014-03-28 15:02:35 +07:00
|
|
|
ce_data_base);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-09-24 18:16:52 +07:00
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-08-25 12:37:26 +07:00
|
|
|
static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
|
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__le32 val = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
|
|
|
|
*value = __le32_to_cpu(val);
|
|
|
|
|
|
|
|
return ret;
|
2014-08-25 12:37:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
|
|
|
|
u32 src, u32 len)
|
|
|
|
{
|
|
|
|
u32 host_addr, addr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
host_addr = host_interest_item_address(src);
|
|
|
|
|
|
|
|
ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
|
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
|
2014-08-25 12:37:26 +07:00
|
|
|
src, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
|
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
|
2014-08-25 12:37:26 +07:00
|
|
|
addr, len, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
|
2014-09-14 16:50:39 +07:00
|
|
|
__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
|
2014-08-25 12:37:26 +07:00
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
|
|
|
|
const void *data, int nbytes)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int ret = 0;
|
2016-03-22 18:52:17 +07:00
|
|
|
u32 *buf;
|
2013-06-13 00:52:10 +07:00
|
|
|
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
|
2013-08-27 18:08:02 +07:00
|
|
|
struct ath10k_ce_pipe *ce_diag;
|
2013-06-13 00:52:10 +07:00
|
|
|
void *data_buf = NULL;
|
|
|
|
u32 ce_data; /* Host buffer address in CE space */
|
|
|
|
dma_addr_t ce_data_base = 0;
|
|
|
|
int i;
|
|
|
|
|
2014-09-24 18:16:52 +07:00
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
ce_diag = ar_pci->ce_diag;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a temporary bounce buffer to hold caller's data
|
|
|
|
* to be DMA'ed to Target. This guarantees
|
|
|
|
* 1) 4-byte alignment
|
|
|
|
* 2) Buffer in DMA-able space
|
|
|
|
*/
|
|
|
|
orig_nbytes = nbytes;
|
2014-03-28 15:02:35 +07:00
|
|
|
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
|
|
|
|
orig_nbytes,
|
|
|
|
&ce_data_base,
|
|
|
|
GFP_ATOMIC);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (!data_buf) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy caller's data to allocated DMA buf */
|
2014-08-26 23:14:03 +07:00
|
|
|
memcpy(data_buf, data, orig_nbytes);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The address supplied by the caller is in the
|
|
|
|
* Target CPU virtual address space.
|
|
|
|
*
|
|
|
|
* In order to use this address with the diagnostic CE,
|
|
|
|
* convert it from
|
|
|
|
* Target CPU virtual address space
|
|
|
|
* to
|
|
|
|
* CE address space
|
|
|
|
*/
|
2015-06-18 14:01:05 +07:00
|
|
|
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
remaining_bytes = orig_nbytes;
|
|
|
|
ce_data = ce_data_base;
|
|
|
|
while (remaining_bytes) {
|
|
|
|
/* FIXME: check cast */
|
|
|
|
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
|
|
|
|
|
|
|
|
/* Set up to receive directly into Target(!) address */
|
2016-03-22 18:52:17 +07:00
|
|
|
ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request CE to send caller-supplied data that
|
|
|
|
* was copied to bounce buffer to Target(!) address.
|
|
|
|
*/
|
2014-09-24 18:16:52 +07:00
|
|
|
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
|
|
|
|
nbytes, 0, 0);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
i = 0;
|
2015-10-23 19:31:05 +07:00
|
|
|
while (ath10k_ce_completed_send_next_nolock(ce_diag,
|
|
|
|
NULL) != 0) {
|
2013-06-13 00:52:10 +07:00
|
|
|
mdelay(1);
|
|
|
|
|
|
|
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i = 0;
|
2016-03-22 18:52:17 +07:00
|
|
|
while (ath10k_ce_completed_recv_next_nolock(ce_diag,
|
|
|
|
(void **)&buf,
|
|
|
|
&completed_nbytes)
|
|
|
|
!= 0) {
|
2013-06-13 00:52:10 +07:00
|
|
|
mdelay(1);
|
|
|
|
|
|
|
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nbytes != completed_nbytes) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2016-03-22 18:52:17 +07:00
|
|
|
if (*buf != address) {
|
2013-06-13 00:52:10 +07:00
|
|
|
ret = -EIO;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
remaining_bytes -= nbytes;
|
|
|
|
address += nbytes;
|
|
|
|
ce_data += nbytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (data_buf) {
|
2014-03-28 15:02:35 +07:00
|
|
|
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
|
|
|
|
ce_data_base);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ret != 0)
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
|
2014-03-28 14:32:52 +07:00
|
|
|
address, ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-09-24 18:16:52 +07:00
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-08-26 23:14:03 +07:00
|
|
|
static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
|
|
|
|
{
|
|
|
|
__le32 val = __cpu_to_le32(value);
|
|
|
|
|
|
|
|
return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
|
|
|
|
}
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
/* Called by lower (CE) layer when a send to Target completes. */
|
2015-10-12 19:57:01 +07:00
|
|
|
static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k *ar = ce_state->ar;
|
2014-11-27 17:09:38 +07:00
|
|
|
struct sk_buff_head list;
|
|
|
|
struct sk_buff *skb;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-11-27 17:09:38 +07:00
|
|
|
__skb_queue_head_init(&list);
|
2015-10-23 19:31:05 +07:00
|
|
|
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
|
2014-02-27 23:50:04 +07:00
|
|
|
/* no need to call tx completion for NULL pointers */
|
2014-11-27 17:09:38 +07:00
|
|
|
if (skb == NULL)
|
2014-02-27 23:50:04 +07:00
|
|
|
continue;
|
|
|
|
|
2014-11-27 17:09:38 +07:00
|
|
|
__skb_queue_tail(&list, skb);
|
2013-09-03 20:09:58 +07:00
|
|
|
}
|
2014-11-27 17:09:38 +07:00
|
|
|
|
|
|
|
while ((skb = __skb_dequeue(&list)))
|
2015-10-12 19:57:01 +07:00
|
|
|
ath10k_htc_tx_completion_handler(ar, skb);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2015-10-12 19:57:04 +07:00
|
|
|
static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
|
|
|
|
void (*callback)(struct ath10k *ar,
|
|
|
|
struct sk_buff *skb))
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k *ar = ce_state->ar;
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2013-08-27 18:08:01 +07:00
|
|
|
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
|
2013-06-13 00:52:10 +07:00
|
|
|
struct sk_buff *skb;
|
2014-11-27 17:09:38 +07:00
|
|
|
struct sk_buff_head list;
|
2013-09-03 20:09:58 +07:00
|
|
|
void *transfer_context;
|
2014-02-27 23:50:05 +07:00
|
|
|
unsigned int nbytes, max_nbytes;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-11-27 17:09:38 +07:00
|
|
|
__skb_queue_head_init(&list);
|
2013-09-03 20:09:58 +07:00
|
|
|
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
|
2016-03-22 18:52:17 +07:00
|
|
|
&nbytes) == 0) {
|
2013-06-13 00:52:10 +07:00
|
|
|
skb = transfer_context;
|
2014-02-27 23:50:05 +07:00
|
|
|
max_nbytes = skb->len + skb_tailroom(skb);
|
2015-01-24 17:14:47 +07:00
|
|
|
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
2014-02-27 23:50:05 +07:00
|
|
|
max_nbytes, DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
if (unlikely(max_nbytes < nbytes)) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
|
2014-02-27 23:50:05 +07:00
|
|
|
nbytes, max_nbytes);
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
continue;
|
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-02-27 23:50:05 +07:00
|
|
|
skb_put(skb, nbytes);
|
2014-11-27 17:09:38 +07:00
|
|
|
__skb_queue_tail(&list, skb);
|
|
|
|
}
|
2014-09-23 15:22:54 +07:00
|
|
|
|
2014-11-27 17:09:38 +07:00
|
|
|
while ((skb = __skb_dequeue(&list))) {
|
2014-09-23 15:22:54 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
|
|
|
|
ce_state->id, skb->len);
|
|
|
|
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
|
|
|
|
skb->data, skb->len);
|
|
|
|
|
2015-10-12 19:57:04 +07:00
|
|
|
callback(ar, skb);
|
2014-02-27 23:50:05 +07:00
|
|
|
}
|
2014-07-22 01:03:10 +07:00
|
|
|
|
2014-08-22 19:33:14 +07:00
|
|
|
ath10k_pci_rx_post_pipe(pipe_info);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-03-22 18:52:18 +07:00
|
|
|
static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
|
|
|
|
void (*callback)(struct ath10k *ar,
|
|
|
|
struct sk_buff *skb))
|
|
|
|
{
|
|
|
|
struct ath10k *ar = ce_state->ar;
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
|
|
|
|
struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct sk_buff_head list;
|
|
|
|
void *transfer_context;
|
|
|
|
unsigned int nbytes, max_nbytes, nentries;
|
|
|
|
int orig_len;
|
|
|
|
|
|
|
|
/* No need to aquire ce_lock for CE5, since this is the only place CE5
|
|
|
|
* is processed other than init and deinit. Before releasing CE5
|
|
|
|
* buffers, interrupts are disabled. Thus CE5 access is serialized.
|
|
|
|
*/
|
|
|
|
__skb_queue_head_init(&list);
|
|
|
|
while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
|
|
|
|
&nbytes) == 0) {
|
|
|
|
skb = transfer_context;
|
|
|
|
max_nbytes = skb->len + skb_tailroom(skb);
|
|
|
|
|
|
|
|
if (unlikely(max_nbytes < nbytes)) {
|
|
|
|
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
|
|
|
|
nbytes, max_nbytes);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
|
|
|
max_nbytes, DMA_FROM_DEVICE);
|
|
|
|
skb_put(skb, nbytes);
|
|
|
|
__skb_queue_tail(&list, skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
nentries = skb_queue_len(&list);
|
|
|
|
while ((skb = __skb_dequeue(&list))) {
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
|
|
|
|
ce_state->id, skb->len);
|
|
|
|
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
|
|
|
|
skb->data, skb->len);
|
|
|
|
|
|
|
|
orig_len = skb->len;
|
|
|
|
callback(ar, skb);
|
|
|
|
skb_push(skb, orig_len - skb->len);
|
|
|
|
skb_reset_tail_pointer(skb);
|
|
|
|
skb_trim(skb, 0);
|
|
|
|
|
|
|
|
/*let device gain the buffer again*/
|
|
|
|
dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
|
|
|
skb->len + skb_tailroom(skb),
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
}
|
|
|
|
ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
|
|
|
|
}
|
|
|
|
|
2015-10-12 19:57:04 +07:00
|
|
|
/* Called by lower (CE) layer when data is received from the Target. */
|
|
|
|
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
|
|
|
|
{
|
|
|
|
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
|
2015-11-11 19:01:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
|
|
|
|
{
|
|
|
|
/* CE4 polling needs to be done whenever CE pipe which transports
|
|
|
|
* HTT Rx (target->host) is processed.
|
|
|
|
*/
|
|
|
|
ath10k_ce_per_engine_service(ce_state->ar, 4);
|
|
|
|
|
|
|
|
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
|
2015-10-12 19:57:04 +07:00
|
|
|
}
|
|
|
|
|
2015-10-30 16:27:58 +07:00
|
|
|
/* Called by lower (CE) layer when data is received from the Target.
|
|
|
|
* Only 10.4 firmware uses separate CE to transfer pktlog data.
|
|
|
|
*/
|
|
|
|
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
|
|
|
|
{
|
|
|
|
ath10k_pci_process_rx_cb(ce_state,
|
|
|
|
ath10k_htt_rx_pktlog_completion_handler);
|
|
|
|
}
|
|
|
|
|
2015-10-12 19:57:04 +07:00
|
|
|
/* Called by lower (CE) layer when a send to HTT Target completes. */
|
|
|
|
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
|
|
|
|
{
|
|
|
|
struct ath10k *ar = ce_state->ar;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2015-10-23 19:31:05 +07:00
|
|
|
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
|
2015-10-12 19:57:04 +07:00
|
|
|
/* no need to call tx completion for NULL pointers */
|
|
|
|
if (!skb)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
|
|
|
skb->len, DMA_TO_DEVICE);
|
|
|
|
ath10k_htt_hif_tx_complete(ar, skb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
|
|
|
|
ath10k_htt_t2h_msg_handler(ar, skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called by lower (CE) layer when HTT data is received from the Target. */
|
|
|
|
static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
|
|
|
|
{
|
|
|
|
/* CE4 polling needs to be done whenever CE pipe which transports
|
|
|
|
* HTT Rx (target->host) is processed.
|
|
|
|
*/
|
|
|
|
ath10k_ce_per_engine_service(ce_state->ar, 4);
|
|
|
|
|
2016-03-22 18:52:18 +07:00
|
|
|
ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
|
2015-10-12 19:57:04 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
|
|
|
|
struct ath10k_hif_sg_item *items, int n_items)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2014-02-27 23:50:04 +07:00
|
|
|
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
|
|
|
|
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
|
|
|
|
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
|
2014-05-26 17:02:58 +07:00
|
|
|
unsigned int nentries_mask;
|
|
|
|
unsigned int sw_index;
|
|
|
|
unsigned int write_index;
|
2014-05-26 17:02:59 +07:00
|
|
|
int err, i = 0;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-02-27 23:50:04 +07:00
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-05-26 17:02:58 +07:00
|
|
|
nentries_mask = src_ring->nentries_mask;
|
|
|
|
sw_index = src_ring->sw_index;
|
|
|
|
write_index = src_ring->write_index;
|
|
|
|
|
2014-02-27 23:50:04 +07:00
|
|
|
if (unlikely(CE_RING_DELTA(nentries_mask,
|
|
|
|
write_index, sw_index - 1) < n_items)) {
|
|
|
|
err = -ENOBUFS;
|
2014-05-26 17:02:59 +07:00
|
|
|
goto err;
|
2014-02-27 23:50:04 +07:00
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-02-27 23:50:04 +07:00
|
|
|
for (i = 0; i < n_items - 1; i++) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI,
|
2014-02-27 23:50:04 +07:00
|
|
|
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
|
|
|
|
i, items[i].paddr, items[i].len, n_items);
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
|
2014-02-27 23:50:04 +07:00
|
|
|
items[i].vaddr, items[i].len);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-02-27 23:50:04 +07:00
|
|
|
err = ath10k_ce_send_nolock(ce_pipe,
|
|
|
|
items[i].transfer_context,
|
|
|
|
items[i].paddr,
|
|
|
|
items[i].len,
|
|
|
|
items[i].transfer_id,
|
|
|
|
CE_SEND_FLAG_GATHER);
|
|
|
|
if (err)
|
2014-05-26 17:02:59 +07:00
|
|
|
goto err;
|
2014-02-27 23:50:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* `i` is equal to `n_items -1` after for() */
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI,
|
2014-02-27 23:50:04 +07:00
|
|
|
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
|
|
|
|
i, items[i].paddr, items[i].len, n_items);
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
|
2014-02-27 23:50:04 +07:00
|
|
|
items[i].vaddr, items[i].len);
|
|
|
|
|
|
|
|
err = ath10k_ce_send_nolock(ce_pipe,
|
|
|
|
items[i].transfer_context,
|
|
|
|
items[i].paddr,
|
|
|
|
items[i].len,
|
|
|
|
items[i].transfer_id,
|
|
|
|
0);
|
|
|
|
if (err)
|
2014-05-26 17:02:59 +07:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
for (; i > 0; i--)
|
|
|
|
__ath10k_ce_send_revert(ce_pipe);
|
2014-02-27 23:50:04 +07:00
|
|
|
|
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
|
|
return err;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
|
|
|
|
size_t buf_len)
|
2014-09-24 18:16:52 +07:00
|
|
|
{
|
|
|
|
return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2013-10-02 16:03:41 +07:00
|
|
|
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2014-08-25 12:37:32 +07:00
|
|
|
static void ath10k_pci_dump_registers(struct ath10k *ar,
|
|
|
|
struct ath10k_fw_crash_data *crash_data)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
|
|
|
|
int i, ret;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-25 12:37:32 +07:00
|
|
|
lockdep_assert_held(&ar->data_lock);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-25 12:37:26 +07:00
|
|
|
ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
|
|
|
|
hi_failure_state,
|
2014-08-26 23:14:03 +07:00
|
|
|
REG_DUMP_COUNT_QCA988X * sizeof(__le32));
|
2013-11-08 14:01:34 +07:00
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "firmware register dump:\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
|
2013-06-13 00:52:10 +07:00
|
|
|
i,
|
2014-08-26 23:14:03 +07:00
|
|
|
__le32_to_cpu(reg_dump_values[i]),
|
|
|
|
__le32_to_cpu(reg_dump_values[i + 1]),
|
|
|
|
__le32_to_cpu(reg_dump_values[i + 2]),
|
|
|
|
__le32_to_cpu(reg_dump_values[i + 3]));
|
2013-07-16 14:54:35 +07:00
|
|
|
|
2014-08-25 17:13:14 +07:00
|
|
|
if (!crash_data)
|
|
|
|
return;
|
|
|
|
|
2014-08-25 12:37:32 +07:00
|
|
|
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
|
2014-08-26 23:14:03 +07:00
|
|
|
crash_data->registers[i] = reg_dump_values[i];
|
2014-08-25 12:37:32 +07:00
|
|
|
}
|
|
|
|
|
2014-08-25 12:37:37 +07:00
|
|
|
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
2014-08-25 12:37:32 +07:00
|
|
|
{
|
|
|
|
struct ath10k_fw_crash_data *crash_data;
|
|
|
|
char uuid[50];
|
|
|
|
|
|
|
|
spin_lock_bh(&ar->data_lock);
|
|
|
|
|
2014-09-29 18:41:46 +07:00
|
|
|
ar->stats.fw_crash_counter++;
|
|
|
|
|
2014-08-25 12:37:32 +07:00
|
|
|
crash_data = ath10k_debug_get_new_fw_crash_data(ar);
|
|
|
|
|
|
|
|
if (crash_data)
|
|
|
|
scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
|
|
|
|
else
|
|
|
|
scnprintf(uuid, sizeof(uuid), "n/a");
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
|
2014-08-25 12:37:45 +07:00
|
|
|
ath10k_print_driver_info(ar);
|
2014-08-25 12:37:32 +07:00
|
|
|
ath10k_pci_dump_registers(ar, crash_data);
|
|
|
|
|
|
|
|
spin_unlock_bh(&ar->data_lock);
|
2013-07-16 14:54:35 +07:00
|
|
|
|
2013-10-16 20:46:05 +07:00
|
|
|
queue_work(ar->workqueue, &ar->restart_work);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
|
|
|
int force)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
if (!force) {
|
|
|
|
int resources;
|
|
|
|
/*
|
|
|
|
* Decide whether to actually poll for completions, or just
|
|
|
|
* wait for a later chance.
|
|
|
|
* If there seem to be plenty of resources left, then just wait
|
|
|
|
* since checking involves reading a CE register, which is a
|
|
|
|
* relatively expensive operation.
|
|
|
|
*/
|
|
|
|
resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If at least 50% of the total resources are still available,
|
|
|
|
* don't bother checking again yet.
|
|
|
|
*/
|
|
|
|
if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ath10k_ce_per_engine_service(ar, pipe);
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_kill_tasklet(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
tasklet_kill(&ar_pci->intr_tq);
|
2014-08-22 19:33:14 +07:00
|
|
|
|
|
|
|
del_timer_sync(&ar_pci->rx_post_retry);
|
2013-11-08 14:01:25 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
|
|
|
|
u8 *ul_pipe, u8 *dl_pipe)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-08-26 23:14:03 +07:00
|
|
|
const struct service_to_pipe *entry;
|
|
|
|
bool ul_set = false, dl_set = false;
|
|
|
|
int i;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2014-08-26 23:14:03 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
|
|
|
|
entry = &target_service_to_ce_map_wlan[i];
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-26 23:14:03 +07:00
|
|
|
if (__le32_to_cpu(entry->service_id) != service_id)
|
2014-08-26 23:14:03 +07:00
|
|
|
continue;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-26 23:14:03 +07:00
|
|
|
switch (__le32_to_cpu(entry->pipedir)) {
|
2014-08-26 23:14:03 +07:00
|
|
|
case PIPEDIR_NONE:
|
|
|
|
break;
|
|
|
|
case PIPEDIR_IN:
|
|
|
|
WARN_ON(dl_set);
|
2014-08-26 23:14:03 +07:00
|
|
|
*dl_pipe = __le32_to_cpu(entry->pipenum);
|
2014-08-26 23:14:03 +07:00
|
|
|
dl_set = true;
|
|
|
|
break;
|
|
|
|
case PIPEDIR_OUT:
|
|
|
|
WARN_ON(ul_set);
|
2014-08-26 23:14:03 +07:00
|
|
|
*ul_pipe = __le32_to_cpu(entry->pipenum);
|
2014-08-26 23:14:03 +07:00
|
|
|
ul_set = true;
|
|
|
|
break;
|
|
|
|
case PIPEDIR_INOUT:
|
|
|
|
WARN_ON(dl_set);
|
|
|
|
WARN_ON(ul_set);
|
2014-08-26 23:14:03 +07:00
|
|
|
*dl_pipe = __le32_to_cpu(entry->pipenum);
|
|
|
|
*ul_pipe = __le32_to_cpu(entry->pipenum);
|
2014-08-26 23:14:03 +07:00
|
|
|
dl_set = true;
|
|
|
|
ul_set = true;
|
|
|
|
break;
|
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2014-08-26 23:14:03 +07:00
|
|
|
if (WARN_ON(!ul_set || !dl_set))
|
|
|
|
return -ENOENT;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-26 23:14:03 +07:00
|
|
|
return 0;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
|
|
|
|
u8 *ul_pipe, u8 *dl_pipe)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
(void)ath10k_pci_hif_map_service_to_pipe(ar,
|
|
|
|
ATH10K_HTC_SVC_ID_RSVD_CTRL,
|
2015-10-12 19:57:06 +07:00
|
|
|
ul_pipe, dl_pipe);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2014-10-20 19:14:38 +07:00
|
|
|
static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-10-20 19:14:38 +07:00
|
|
|
u32 val;
|
|
|
|
|
2015-06-18 14:01:06 +07:00
|
|
|
switch (ar->hw_rev) {
|
|
|
|
case ATH10K_HW_QCA988X:
|
2016-06-02 21:59:49 +07:00
|
|
|
case ATH10K_HW_QCA9887:
|
2015-06-18 14:01:06 +07:00
|
|
|
case ATH10K_HW_QCA6174:
|
2015-10-28 21:09:53 +07:00
|
|
|
case ATH10K_HW_QCA9377:
|
2015-06-18 14:01:06 +07:00
|
|
|
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
CORE_CTRL_ADDRESS);
|
|
|
|
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
CORE_CTRL_ADDRESS, val);
|
|
|
|
break;
|
|
|
|
case ATH10K_HW_QCA99X0:
|
2016-05-24 03:12:45 +07:00
|
|
|
case ATH10K_HW_QCA9984:
|
ath10k: add basic skeleton to support ahb
qca4019 uses ahb instead of pci where it slightly differs in device
enumeration, clock control, reset control, etc. Good thing is that
ahb also uses copy engine for the data transaction. So, the most of
the stuff implemented in pci.c/ce.c are reusable in ahb case too.
Device enumeration in ahb case comes through platform driver/device
model. All resource details like irq, memory map, clocks, etc for
qca4019 can be fetched from of_node of platform device.
Simply flow would look like,
device tree => platform device (kernel) => platform driver (ath10k)
Device tree entry will have all qca4019 resource details and the same
info will be passed to kernel. Kernel will prepare new platform device
for that entry and expose DT info to of_node in platform device.
Later, ath10k would register platform driver with unique compatible name
and then kernels binds to corresponding compatible entry & calls ath10k
ahb probe functions. From there onwards, ath10k will take control of it
and move forward.
New bool flag CONFIG_ATH10K_AHB is added in Kconfig to conditionally
enable ahb support in ath10k. On enabling this flag, ath10k_pci.ko
will have ahb support. This patch adds only basic skeleton and few
macros to support ahb in the context of qca4019.
Signed-off-by: Raja Mani <rmani@qti.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
2016-01-27 16:54:25 +07:00
|
|
|
case ATH10K_HW_QCA4019:
|
2015-06-18 14:01:06 +07:00
|
|
|
/* TODO: Find appropriate register configuration for QCA99X0
|
|
|
|
* to mask irq/MSI.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
2014-10-20 19:14:38 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
2015-06-18 14:01:06 +07:00
|
|
|
switch (ar->hw_rev) {
|
|
|
|
case ATH10K_HW_QCA988X:
|
2016-06-02 21:59:49 +07:00
|
|
|
case ATH10K_HW_QCA9887:
|
2015-06-18 14:01:06 +07:00
|
|
|
case ATH10K_HW_QCA6174:
|
2015-10-28 21:09:53 +07:00
|
|
|
case ATH10K_HW_QCA9377:
|
2015-06-18 14:01:06 +07:00
|
|
|
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
CORE_CTRL_ADDRESS);
|
|
|
|
val |= CORE_CTRL_PCIE_REG_31_MASK;
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
|
|
|
|
CORE_CTRL_ADDRESS, val);
|
|
|
|
break;
|
|
|
|
case ATH10K_HW_QCA99X0:
|
2016-05-24 03:12:45 +07:00
|
|
|
case ATH10K_HW_QCA9984:
|
ath10k: add basic skeleton to support ahb
qca4019 uses ahb instead of pci where it slightly differs in device
enumeration, clock control, reset control, etc. Good thing is that
ahb also uses copy engine for the data transaction. So, the most of
the stuff implemented in pci.c/ce.c are reusable in ahb case too.
Device enumeration in ahb case comes through platform driver/device
model. All resource details like irq, memory map, clocks, etc for
qca4019 can be fetched from of_node of platform device.
Simply flow would look like,
device tree => platform device (kernel) => platform driver (ath10k)
Device tree entry will have all qca4019 resource details and the same
info will be passed to kernel. Kernel will prepare new platform device
for that entry and expose DT info to of_node in platform device.
Later, ath10k would register platform driver with unique compatible name
and then kernels binds to corresponding compatible entry & calls ath10k
ahb probe functions. From there onwards, ath10k will take control of it
and move forward.
New bool flag CONFIG_ATH10K_AHB is added in Kconfig to conditionally
enable ahb support in ath10k. On enabling this flag, ath10k_pci.ko
will have ahb support. This patch adds only basic skeleton and few
macros to support ahb in the context of qca4019.
Signed-off-by: Raja Mani <rmani@qti.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
2016-01-27 16:54:25 +07:00
|
|
|
case ATH10K_HW_QCA4019:
|
2015-06-18 14:01:06 +07:00
|
|
|
/* TODO: Find appropriate register configuration for QCA99X0
|
|
|
|
* to unmask irq/MSI.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
2014-10-20 19:14:38 +07:00
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-20 19:14:38 +07:00
|
|
|
static void ath10k_pci_irq_disable(struct ath10k *ar)
|
|
|
|
{
|
2014-08-22 19:23:33 +07:00
|
|
|
ath10k_ce_disable_interrupts(ar);
|
2014-08-29 02:14:16 +07:00
|
|
|
ath10k_pci_disable_and_clear_legacy_irq(ar);
|
2014-10-20 19:14:38 +07:00
|
|
|
ath10k_pci_irq_msi_fw_mask(ar);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_irq_sync(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2016-04-07 13:40:58 +07:00
|
|
|
synchronize_irq(ar_pci->pdev->irq);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2014-08-22 19:23:33 +07:00
|
|
|
static void ath10k_pci_irq_enable(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-08-22 19:23:33 +07:00
|
|
|
ath10k_ce_enable_interrupts(ar);
|
2014-08-29 02:14:16 +07:00
|
|
|
ath10k_pci_enable_legacy_irq(ar);
|
2014-10-20 19:14:38 +07:00
|
|
|
ath10k_pci_irq_msi_fw_unmask(ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ath10k_pci_hif_start(struct ath10k *ar)
|
|
|
|
{
|
2015-05-18 16:38:16 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2015-10-05 21:56:36 +07:00
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-22 19:23:33 +07:00
|
|
|
ath10k_pci_irq_enable(ar);
|
2014-08-22 19:33:14 +07:00
|
|
|
ath10k_pci_rx_post(ar);
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2015-05-18 16:38:16 +07:00
|
|
|
pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
|
|
|
|
ar_pci->link_ctl);
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k *ar;
|
2014-10-28 16:32:05 +07:00
|
|
|
struct ath10k_ce_pipe *ce_pipe;
|
|
|
|
struct ath10k_ce_ring *ce_ring;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int i;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
ar = pci_pipe->hif_ce_state;
|
|
|
|
ce_pipe = pci_pipe->ce_hdl;
|
|
|
|
ce_ring = ce_pipe->dest_ring;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
if (!ce_ring)
|
2013-06-13 00:52:10 +07:00
|
|
|
return;
|
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
if (!pci_pipe->buf_sz)
|
|
|
|
return;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
for (i = 0; i < ce_ring->nentries; i++) {
|
|
|
|
skb = ce_ring->per_transfer_context[i];
|
|
|
|
if (!skb)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ce_ring->per_transfer_context[i] = NULL;
|
|
|
|
|
2015-01-24 17:14:47 +07:00
|
|
|
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
2014-10-28 16:32:05 +07:00
|
|
|
skb->len + skb_tailroom(skb),
|
2013-06-13 00:52:10 +07:00
|
|
|
DMA_FROM_DEVICE);
|
2014-10-28 16:32:05 +07:00
|
|
|
dev_kfree_skb_any(skb);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k *ar;
|
|
|
|
struct ath10k_pci *ar_pci;
|
2014-10-28 16:32:05 +07:00
|
|
|
struct ath10k_ce_pipe *ce_pipe;
|
|
|
|
struct ath10k_ce_ring *ce_ring;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int i;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
ar = pci_pipe->hif_ce_state;
|
|
|
|
ar_pci = ath10k_pci_priv(ar);
|
|
|
|
ce_pipe = pci_pipe->ce_hdl;
|
|
|
|
ce_ring = ce_pipe->src_ring;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
if (!ce_ring)
|
2013-06-13 00:52:10 +07:00
|
|
|
return;
|
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
if (!pci_pipe->buf_sz)
|
|
|
|
return;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
for (i = 0; i < ce_ring->nentries; i++) {
|
|
|
|
skb = ce_ring->per_transfer_context[i];
|
|
|
|
if (!skb)
|
2013-11-08 14:01:32 +07:00
|
|
|
continue;
|
|
|
|
|
2014-10-28 16:32:05 +07:00
|
|
|
ce_ring->per_transfer_context[i] = NULL;
|
|
|
|
|
2015-10-12 19:57:01 +07:00
|
|
|
ath10k_htc_tx_completion_handler(ar, skb);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup residual buffers for device shutdown:
|
|
|
|
* buffers that were enqueued for receive
|
|
|
|
* buffers that were to be sent
|
|
|
|
* Note: Buffers that had completed but which were
|
|
|
|
* not yet processed are on a completion queue. They
|
|
|
|
* are handled when the completion thread shuts down.
|
|
|
|
*/
|
|
|
|
static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int pipe_num;
|
|
|
|
|
2013-11-08 14:01:23 +07:00
|
|
|
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
|
2013-08-27 18:08:01 +07:00
|
|
|
struct ath10k_pci_pipe *pipe_info;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
pipe_info = &ar_pci->pipe_info[pipe_num];
|
|
|
|
ath10k_pci_rx_pipe_cleanup(pipe_info);
|
|
|
|
ath10k_pci_tx_pipe_cleanup(pipe_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_ce_deinit(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-03-28 15:02:38 +07:00
|
|
|
int i;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-03-28 15:02:38 +07:00
|
|
|
for (i = 0; i < CE_COUNT; i++)
|
|
|
|
ath10k_ce_deinit_pipe(ar, i);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_flush(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2013-11-25 20:06:24 +07:00
|
|
|
ath10k_pci_kill_tasklet(ar);
|
2014-08-22 19:33:14 +07:00
|
|
|
ath10k_pci_buffer_cleanup(ar);
|
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
static void ath10k_pci_hif_stop(struct ath10k *ar)
|
|
|
|
{
|
2015-05-18 16:38:18 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
unsigned long flags;
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-22 19:33:15 +07:00
|
|
|
/* Most likely the device has HTT Rx ring configured. The only way to
|
|
|
|
* prevent the device from accessing (and possible corrupting) host
|
|
|
|
* memory is to reset the chip now.
|
2014-08-29 02:14:16 +07:00
|
|
|
*
|
|
|
|
* There's also no known way of masking MSI interrupts on the device.
|
|
|
|
* For ranged MSI the CE-related interrupts can be masked. However
|
|
|
|
* regardless how many MSI interrupts are assigned the first one
|
|
|
|
* is always used for firmware indications (crashes) and cannot be
|
|
|
|
* masked. To prevent the device from asserting the interrupt reset it
|
|
|
|
* before proceeding with cleanup.
|
2014-08-22 19:33:15 +07:00
|
|
|
*/
|
2015-06-18 14:01:06 +07:00
|
|
|
ath10k_pci_safe_chip_reset(ar);
|
2014-08-29 02:14:16 +07:00
|
|
|
|
|
|
|
ath10k_pci_irq_disable(ar);
|
2014-10-20 19:14:38 +07:00
|
|
|
ath10k_pci_irq_sync(ar);
|
2014-08-29 02:14:16 +07:00
|
|
|
ath10k_pci_flush(ar);
|
2015-05-18 16:38:18 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&ar_pci->ps_lock, flags);
|
|
|
|
WARN_ON(ar_pci->ps_wake_refcount > 0);
|
|
|
|
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
|
|
|
|
void *req, u32 req_len,
|
|
|
|
void *resp, u32 *resp_len)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2013-08-27 18:08:02 +07:00
|
|
|
struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
|
|
|
|
struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
|
|
|
|
struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
|
|
|
|
struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
|
2013-06-13 00:52:10 +07:00
|
|
|
dma_addr_t req_paddr = 0;
|
|
|
|
dma_addr_t resp_paddr = 0;
|
|
|
|
struct bmi_xfer xfer = {};
|
|
|
|
void *treq, *tresp = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
|
2013-11-25 20:06:22 +07:00
|
|
|
might_sleep();
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
if (resp && !resp_len)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (resp && resp_len && *resp_len == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
treq = kmemdup(req, req_len, GFP_KERNEL);
|
|
|
|
if (!treq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
|
|
|
|
ret = dma_mapping_error(ar->dev, req_paddr);
|
2015-08-19 18:10:43 +07:00
|
|
|
if (ret) {
|
|
|
|
ret = -EIO;
|
2013-06-13 00:52:10 +07:00
|
|
|
goto err_dma;
|
2015-08-19 18:10:43 +07:00
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
if (resp && resp_len) {
|
|
|
|
tresp = kzalloc(*resp_len, GFP_KERNEL);
|
|
|
|
if (!tresp) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_req;
|
|
|
|
}
|
|
|
|
|
|
|
|
resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
ret = dma_mapping_error(ar->dev, resp_paddr);
|
2015-08-19 18:10:43 +07:00
|
|
|
if (ret) {
|
2016-02-10 23:58:55 +07:00
|
|
|
ret = -EIO;
|
2013-06-13 00:52:10 +07:00
|
|
|
goto err_req;
|
2015-08-19 18:10:43 +07:00
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
xfer.wait_for_resp = true;
|
|
|
|
xfer.resp_len = 0;
|
|
|
|
|
2014-08-22 19:33:14 +07:00
|
|
|
ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
|
|
|
|
if (ret)
|
|
|
|
goto err_resp;
|
|
|
|
|
2013-11-25 20:06:22 +07:00
|
|
|
ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
|
|
|
|
if (ret) {
|
2013-06-13 00:52:10 +07:00
|
|
|
u32 unused_buffer;
|
|
|
|
unsigned int unused_nbytes;
|
|
|
|
unsigned int unused_id;
|
|
|
|
|
|
|
|
ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
|
|
|
|
&unused_nbytes, &unused_id);
|
|
|
|
} else {
|
|
|
|
/* non-zero means we did not time out */
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err_resp:
|
|
|
|
if (resp) {
|
|
|
|
u32 unused_buffer;
|
|
|
|
|
|
|
|
ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
|
|
|
|
dma_unmap_single(ar->dev, resp_paddr,
|
|
|
|
*resp_len, DMA_FROM_DEVICE);
|
|
|
|
}
|
|
|
|
err_req:
|
|
|
|
dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
if (ret == 0 && resp_len) {
|
|
|
|
*resp_len = min(*resp_len, xfer.resp_len);
|
|
|
|
memcpy(resp, tresp, xfer.resp_len);
|
|
|
|
}
|
|
|
|
err_dma:
|
|
|
|
kfree(treq);
|
|
|
|
kfree(tresp);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-09-03 20:09:58 +07:00
|
|
|
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2013-09-03 20:09:58 +07:00
|
|
|
struct bmi_xfer *xfer;
|
|
|
|
|
2015-10-23 19:31:05 +07:00
|
|
|
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
|
2013-09-03 20:09:58 +07:00
|
|
|
return;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-07-14 20:25:25 +07:00
|
|
|
xfer->tx_done = true;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2013-09-03 20:09:58 +07:00
|
|
|
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-08-25 17:09:38 +07:00
|
|
|
struct ath10k *ar = ce_state->ar;
|
2013-09-03 20:09:58 +07:00
|
|
|
struct bmi_xfer *xfer;
|
|
|
|
unsigned int nbytes;
|
|
|
|
|
2016-03-22 18:52:17 +07:00
|
|
|
if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
|
|
|
|
&nbytes))
|
2013-09-03 20:09:58 +07:00
|
|
|
return;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-28 16:34:36 +07:00
|
|
|
if (WARN_ON_ONCE(!xfer))
|
|
|
|
return;
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
if (!xfer->wait_for_resp) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfer->resp_len = nbytes;
|
2014-07-14 20:25:25 +07:00
|
|
|
xfer->rx_done = true;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2013-11-25 20:06:22 +07:00
|
|
|
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
|
|
|
|
struct ath10k_ce_pipe *rx_pipe,
|
|
|
|
struct bmi_xfer *xfer)
|
|
|
|
{
|
|
|
|
unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
|
|
|
|
|
|
|
|
while (time_before_eq(jiffies, timeout)) {
|
|
|
|
ath10k_pci_bmi_send_done(tx_pipe);
|
|
|
|
ath10k_pci_bmi_recv_data(rx_pipe);
|
|
|
|
|
2014-07-14 20:25:25 +07:00
|
|
|
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
|
2013-11-25 20:06:22 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
schedule();
|
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2013-11-25 20:06:22 +07:00
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Send an interrupt to the device to wake up the Target CPU
|
|
|
|
* so it has an opportunity to notice any changed state.
|
|
|
|
*/
|
|
|
|
static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
|
|
|
|
{
|
2014-09-02 15:00:21 +07:00
|
|
|
u32 addr, val;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
|
|
|
|
val = ath10k_pci_read32(ar, addr);
|
|
|
|
val |= CORE_CTRL_CPU_INTR_MASK;
|
|
|
|
ath10k_pci_write32(ar, addr, val);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2013-11-08 14:01:34 +07:00
|
|
|
return 0;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2015-01-24 17:14:49 +07:00
|
|
|
static int ath10k_pci_get_num_banks(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
switch (ar_pci->pdev->device) {
|
|
|
|
case QCA988X_2_0_DEVICE_ID:
|
2015-06-18 14:01:03 +07:00
|
|
|
case QCA99X0_2_0_DEVICE_ID:
|
2016-05-24 03:12:45 +07:00
|
|
|
case QCA9984_1_0_DEVICE_ID:
|
2016-06-02 21:59:49 +07:00
|
|
|
case QCA9887_1_0_DEVICE_ID:
|
2015-01-24 17:14:49 +07:00
|
|
|
return 1;
|
2015-08-13 19:32:26 +07:00
|
|
|
case QCA6164_2_1_DEVICE_ID:
|
2015-01-24 17:14:49 +07:00
|
|
|
case QCA6174_2_1_DEVICE_ID:
|
|
|
|
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
|
|
|
|
case QCA6174_HW_1_0_CHIP_ID_REV:
|
|
|
|
case QCA6174_HW_1_1_CHIP_ID_REV:
|
2015-04-20 16:20:41 +07:00
|
|
|
case QCA6174_HW_2_1_CHIP_ID_REV:
|
|
|
|
case QCA6174_HW_2_2_CHIP_ID_REV:
|
2015-01-24 17:14:49 +07:00
|
|
|
return 3;
|
|
|
|
case QCA6174_HW_1_3_CHIP_ID_REV:
|
|
|
|
return 2;
|
|
|
|
case QCA6174_HW_3_0_CHIP_ID_REV:
|
|
|
|
case QCA6174_HW_3_1_CHIP_ID_REV:
|
|
|
|
case QCA6174_HW_3_2_CHIP_ID_REV:
|
|
|
|
return 9;
|
|
|
|
}
|
|
|
|
break;
|
2015-10-28 21:09:53 +07:00
|
|
|
case QCA9377_1_0_DEVICE_ID:
|
|
|
|
return 2;
|
2015-01-24 17:14:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:23 +07:00
|
|
|
static int ath10k_bus_get_num_banks(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
return ar_pci->bus_ops->get_num_banks(ar);
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_init_config(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
u32 interconnect_targ_addr;
|
|
|
|
u32 pcie_state_targ_addr = 0;
|
|
|
|
u32 pipe_cfg_targ_addr = 0;
|
|
|
|
u32 svc_to_pipe_map = 0;
|
|
|
|
u32 pcie_config_flags = 0;
|
|
|
|
u32 ealloc_value;
|
|
|
|
u32 ealloc_targ_addr;
|
|
|
|
u32 flag2_value;
|
|
|
|
u32 flag2_targ_addr;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Download to Target the CE Config and the service-to-CE map */
|
|
|
|
interconnect_targ_addr =
|
|
|
|
host_interest_item_address(HI_ITEM(hi_interconnect_state));
|
|
|
|
|
|
|
|
/* Supply Target-side CE configuration */
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
|
|
|
|
&pcie_state_targ_addr);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pcie_state_targ_addr == 0) {
|
|
|
|
ret = -EIO;
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Invalid pcie state addr\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
2013-06-13 00:52:10 +07:00
|
|
|
offsetof(struct pcie_state,
|
2014-09-02 15:00:21 +07:00
|
|
|
pipe_cfg_addr)),
|
|
|
|
&pipe_cfg_targ_addr);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pipe_cfg_targ_addr == 0) {
|
|
|
|
ret = -EIO;
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Invalid pipe cfg addr\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
|
2014-09-14 16:50:06 +07:00
|
|
|
target_ce_config_wlan,
|
2015-06-18 14:01:04 +07:00
|
|
|
sizeof(struct ce_pipe_config) *
|
|
|
|
NUM_TARGET_CE_CONFIG_WLAN);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
2013-06-13 00:52:10 +07:00
|
|
|
offsetof(struct pcie_state,
|
2014-09-02 15:00:21 +07:00
|
|
|
svc_to_pipe_map)),
|
|
|
|
&svc_to_pipe_map);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (svc_to_pipe_map == 0) {
|
|
|
|
ret = -EIO;
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Invalid svc_to_pipe map\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
|
2014-09-14 16:50:06 +07:00
|
|
|
target_service_to_ce_map_wlan,
|
|
|
|
sizeof(target_service_to_ce_map_wlan));
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
2013-06-13 00:52:10 +07:00
|
|
|
offsetof(struct pcie_state,
|
2014-09-02 15:00:21 +07:00
|
|
|
config_flags)),
|
|
|
|
&pcie_config_flags);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
|
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
|
|
|
|
offsetof(struct pcie_state,
|
|
|
|
config_flags)),
|
|
|
|
pcie_config_flags);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* configure early allocation */
|
|
|
|
ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
|
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* first bank is switched to IRAM */
|
|
|
|
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
|
|
|
|
HI_EARLY_ALLOC_MAGIC_MASK);
|
2016-01-27 16:54:23 +07:00
|
|
|
ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
|
2015-01-24 17:14:49 +07:00
|
|
|
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
|
2013-06-13 00:52:10 +07:00
|
|
|
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
|
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tell Target to proceed with initialization */
|
|
|
|
flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
|
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to get option val: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
flag2_value |= HI_OPTION_EARLY_CFG_DONE;
|
|
|
|
|
2014-09-02 15:00:21 +07:00
|
|
|
ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret != 0) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "Failed to set option val: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-06 09:44:27 +07:00
|
|
|
static void ath10k_pci_override_ce_config(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ce_attr *attr;
|
|
|
|
struct ce_pipe_config *config;
|
|
|
|
|
|
|
|
/* For QCA6174 we're overriding the Copy Engine 5 configuration,
|
|
|
|
* since it is currently used for other feature.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Override Host's Copy Engine 5 configuration */
|
|
|
|
attr = &host_ce_config_wlan[5];
|
|
|
|
attr->src_sz_max = 0;
|
|
|
|
attr->dest_nentries = 0;
|
|
|
|
|
|
|
|
/* Override Target firmware's Copy Engine configuration */
|
|
|
|
config = &target_ce_config_wlan[5];
|
|
|
|
config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
|
|
|
|
config->nbytes_max = __cpu_to_le32(2048);
|
|
|
|
|
|
|
|
/* Map from service/endpoint to Copy Engine */
|
|
|
|
target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_alloc_pipes(struct ath10k *ar)
|
2014-03-28 15:02:38 +07:00
|
|
|
{
|
2014-10-20 19:14:39 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
struct ath10k_pci_pipe *pipe;
|
2014-03-28 15:02:38 +07:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < CE_COUNT; i++) {
|
2014-10-20 19:14:39 +07:00
|
|
|
pipe = &ar_pci->pipe_info[i];
|
|
|
|
pipe->ce_hdl = &ar_pci->ce_states[i];
|
|
|
|
pipe->pipe_num = i;
|
|
|
|
pipe->hif_ce_state = ar;
|
|
|
|
|
2015-10-12 19:57:02 +07:00
|
|
|
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
|
2014-03-28 15:02:38 +07:00
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
|
2014-03-28 15:02:38 +07:00
|
|
|
i, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2014-10-20 19:14:39 +07:00
|
|
|
|
|
|
|
/* Last CE is Diagnostic Window */
|
2015-06-18 14:01:04 +07:00
|
|
|
if (i == CE_DIAG_PIPE) {
|
2014-10-20 19:14:39 +07:00
|
|
|
ar_pci->ce_diag = pipe->ce_hdl;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
|
2014-03-28 15:02:38 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_free_pipes(struct ath10k *ar)
|
2014-03-28 15:02:38 +07:00
|
|
|
{
|
|
|
|
int i;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-03-28 15:02:38 +07:00
|
|
|
for (i = 0; i < CE_COUNT; i++)
|
|
|
|
ath10k_ce_free_pipe(ar, i);
|
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_init_pipes(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-10-20 19:14:39 +07:00
|
|
|
int i, ret;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-20 19:14:39 +07:00
|
|
|
for (i = 0; i < CE_COUNT; i++) {
|
|
|
|
ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
|
2014-03-28 15:02:38 +07:00
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
|
2014-10-20 19:14:39 +07:00
|
|
|
i, ret);
|
2014-03-28 15:02:38 +07:00
|
|
|
return ret;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-22 19:23:34 +07:00
|
|
|
static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-08-22 19:23:34 +07:00
|
|
|
return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
|
|
|
|
FW_IND_EVENT_PENDING;
|
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-22 19:23:34 +07:00
|
|
|
static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
u32 val;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-22 19:23:34 +07:00
|
|
|
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
|
|
val &= ~FW_IND_EVENT_PENDING;
|
|
|
|
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-06-29 23:29:24 +07:00
|
|
|
static bool ath10k_pci_has_device_gone(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
|
|
return (val == 0xffffffff);
|
|
|
|
}
|
|
|
|
|
2014-05-14 20:56:16 +07:00
|
|
|
/* this function effectively clears target memory controller assert line */
|
|
|
|
static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
|
|
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
|
|
|
|
val | SOC_RESET_CONTROL_SI0_RST_MASK);
|
|
|
|
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
|
|
|
|
|
|
msleep(10);
|
|
|
|
|
|
|
|
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
|
|
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
|
|
|
|
val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
|
|
|
|
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
|
|
|
|
|
|
msleep(10);
|
|
|
|
}
|
|
|
|
|
2014-10-28 16:32:06 +07:00
|
|
|
static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
|
2014-02-10 23:14:22 +07:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
2014-03-28 14:32:46 +07:00
|
|
|
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
|
2014-02-10 23:14:22 +07:00
|
|
|
|
|
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
2014-10-28 16:32:06 +07:00
|
|
|
SOC_RESET_CONTROL_ADDRESS);
|
|
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
|
|
val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
u32 val;
|
2014-02-10 23:14:22 +07:00
|
|
|
|
|
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
|
|
SOC_RESET_CONTROL_ADDRESS);
|
2014-10-28 16:32:06 +07:00
|
|
|
|
2014-02-10 23:14:22 +07:00
|
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
|
|
val | SOC_RESET_CONTROL_CE_RST_MASK);
|
|
|
|
msleep(10);
|
|
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
|
|
val & ~SOC_RESET_CONTROL_CE_RST_MASK);
|
2014-10-28 16:32:06 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
2014-02-10 23:14:22 +07:00
|
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
2014-10-28 16:32:06 +07:00
|
|
|
SOC_LF_TIMER_CONTROL0_ADDRESS);
|
|
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
|
|
|
|
SOC_LF_TIMER_CONTROL0_ADDRESS,
|
|
|
|
val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
|
|
|
|
}
|
2014-02-10 23:14:22 +07:00
|
|
|
|
2014-10-28 16:32:06 +07:00
|
|
|
static int ath10k_pci_warm_reset(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
|
2014-05-14 20:56:16 +07:00
|
|
|
|
2014-10-28 16:32:06 +07:00
|
|
|
spin_lock_bh(&ar->data_lock);
|
|
|
|
ar->stats.fw_warm_reset_counter++;
|
|
|
|
spin_unlock_bh(&ar->data_lock);
|
2014-02-10 23:14:22 +07:00
|
|
|
|
2014-10-28 16:32:06 +07:00
|
|
|
ath10k_pci_irq_disable(ar);
|
2014-02-10 23:14:22 +07:00
|
|
|
|
2014-10-28 16:32:06 +07:00
|
|
|
/* Make sure the target CPU is not doing anything dangerous, e.g. if it
|
|
|
|
* were to access copy engine while host performs copy engine reset
|
|
|
|
* then it is possible for the device to confuse pci-e controller to
|
|
|
|
* the point of bringing host system to a complete stop (i.e. hang).
|
|
|
|
*/
|
|
|
|
ath10k_pci_warm_reset_si0(ar);
|
|
|
|
ath10k_pci_warm_reset_cpu(ar);
|
|
|
|
ath10k_pci_init_pipes(ar);
|
|
|
|
ath10k_pci_wait_for_target_init(ar);
|
2014-02-10 23:14:22 +07:00
|
|
|
|
2014-10-28 16:32:06 +07:00
|
|
|
ath10k_pci_warm_reset_clear_lf(ar);
|
|
|
|
ath10k_pci_warm_reset_ce(ar);
|
|
|
|
ath10k_pci_warm_reset_cpu(ar);
|
|
|
|
ath10k_pci_init_pipes(ar);
|
2014-02-10 23:14:22 +07:00
|
|
|
|
2014-10-28 16:32:06 +07:00
|
|
|
ret = ath10k_pci_wait_for_target_init(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2014-02-10 23:14:22 +07:00
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
|
2014-02-10 23:14:22 +07:00
|
|
|
|
2014-08-07 16:03:28 +07:00
|
|
|
return 0;
|
2014-02-10 23:14:22 +07:00
|
|
|
}
|
|
|
|
|
2016-05-24 03:12:43 +07:00
|
|
|
static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
ath10k_pci_irq_disable(ar);
|
|
|
|
return ath10k_pci_qca99x0_chip_reset(ar);
|
|
|
|
}
|
|
|
|
|
2015-06-18 14:01:06 +07:00
|
|
|
static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
|
|
|
|
{
|
2016-05-24 03:12:43 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
if (!ar_pci->pci_soft_reset)
|
2015-06-18 14:01:06 +07:00
|
|
|
return -ENOTSUPP;
|
2016-05-24 03:12:43 +07:00
|
|
|
|
|
|
|
return ar_pci->pci_soft_reset(ar);
|
2015-06-18 14:01:06 +07:00
|
|
|
}
|
|
|
|
|
2015-01-24 17:14:49 +07:00
|
|
|
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
|
2014-10-28 16:32:07 +07:00
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
u32 val;
|
|
|
|
|
2015-01-24 17:14:49 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
|
2014-10-28 16:32:07 +07:00
|
|
|
|
|
|
|
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
|
|
|
|
* It is thus preferred to use warm reset which is safer but may not be
|
|
|
|
* able to recover the device from all possible fail scenarios.
|
|
|
|
*
|
|
|
|
* Warm reset doesn't always work on first try so attempt it a few
|
|
|
|
* times before giving up.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
|
|
|
|
ret = ath10k_pci_warm_reset(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
|
|
|
|
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
|
|
|
|
ret);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: Sometimes copy engine doesn't recover after warm
|
|
|
|
* reset. In most cases this needs cold reset. In some of these
|
|
|
|
* cases the device is in such a state that a cold reset may
|
|
|
|
* lock up the host.
|
|
|
|
*
|
|
|
|
* Reading any host interest register via copy engine is
|
|
|
|
* sufficient to verify if device is capable of booting
|
|
|
|
* firmware blob.
|
|
|
|
*/
|
|
|
|
ret = ath10k_pci_init_pipes(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to init copy engine: %d\n",
|
|
|
|
ret);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
|
|
|
|
&val);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to poke copy engine: %d\n",
|
|
|
|
ret);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
|
|
|
|
ath10k_warn(ar, "refusing cold reset as requested\n");
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_cold_reset(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_wait_for_target_init(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-01-24 17:14:49 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
|
|
|
|
|
|
|
|
/* FIXME: QCA6174 requires cold + warm reset to work. */
|
|
|
|
|
|
|
|
ret = ath10k_pci_cold_reset(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_wait_for_target_init(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
|
2015-10-05 21:56:35 +07:00
|
|
|
ret);
|
2015-01-24 17:14:49 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_warm_reset(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to warm reset: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
|
2014-10-28 16:32:07 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-18 14:01:06 +07:00
|
|
|
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
|
|
|
|
|
|
|
|
ret = ath10k_pci_cold_reset(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ath10k_pci_wait_for_target_init(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-24 17:14:49 +07:00
|
|
|
static int ath10k_pci_chip_reset(struct ath10k *ar)
|
|
|
|
{
|
2016-05-24 03:12:43 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
if (WARN_ON(!ar_pci->pci_hard_reset))
|
2015-01-24 17:14:49 +07:00
|
|
|
return -ENOTSUPP;
|
2016-05-24 03:12:43 +07:00
|
|
|
|
|
|
|
return ar_pci->pci_hard_reset(ar);
|
2015-01-24 17:14:49 +07:00
|
|
|
}
|
|
|
|
|
2014-10-28 16:32:07 +07:00
|
|
|
static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
2013-07-16 14:38:50 +07:00
|
|
|
{
|
2015-05-18 16:38:16 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2013-07-16 14:38:50 +07:00
|
|
|
int ret;
|
|
|
|
|
2014-10-28 16:32:07 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
|
|
|
|
|
2015-05-18 16:38:16 +07:00
|
|
|
pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
|
|
|
|
&ar_pci->link_ctl);
|
|
|
|
pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
|
|
|
|
ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
|
|
|
|
|
2013-07-16 14:38:50 +07:00
|
|
|
/*
|
|
|
|
* Bring the target up cleanly.
|
|
|
|
*
|
|
|
|
* The target may be in an undefined state with an AUX-powered Target
|
|
|
|
* and a Host in WoW mode. If the Host crashes, loses power, or is
|
|
|
|
* restarted (without unloading the driver) then the Target is left
|
|
|
|
* (aux) powered and running. On a subsequent driver load, the Target
|
|
|
|
* is in an unexpected state. We try to catch that here in order to
|
|
|
|
* reset the Target and retry the probe.
|
|
|
|
*/
|
2014-10-28 16:32:07 +07:00
|
|
|
ret = ath10k_pci_chip_reset(ar);
|
2013-11-08 14:01:30 +07:00
|
|
|
if (ret) {
|
2015-01-12 21:29:37 +07:00
|
|
|
if (ath10k_pci_has_fw_crashed(ar)) {
|
|
|
|
ath10k_warn(ar, "firmware crashed during chip reset\n");
|
|
|
|
ath10k_pci_fw_crashed_clear(ar);
|
|
|
|
ath10k_pci_fw_crashed_dump(ar);
|
|
|
|
}
|
|
|
|
|
2014-10-28 16:32:07 +07:00
|
|
|
ath10k_err(ar, "failed to reset chip: %d\n", ret);
|
2014-10-31 15:03:43 +07:00
|
|
|
goto err_sleep;
|
2013-11-08 14:01:30 +07:00
|
|
|
}
|
2013-07-16 14:38:50 +07:00
|
|
|
|
2014-10-20 19:14:39 +07:00
|
|
|
ret = ath10k_pci_init_pipes(ar);
|
2013-11-08 14:01:34 +07:00
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
|
2014-10-31 15:03:43 +07:00
|
|
|
goto err_sleep;
|
2013-11-25 20:06:26 +07:00
|
|
|
}
|
|
|
|
|
2013-11-08 14:01:33 +07:00
|
|
|
ret = ath10k_pci_init_config(ar);
|
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to setup init config: %d\n", ret);
|
2014-08-22 19:23:34 +07:00
|
|
|
goto err_ce;
|
2013-11-08 14:01:33 +07:00
|
|
|
}
|
2013-07-16 14:38:50 +07:00
|
|
|
|
|
|
|
ret = ath10k_pci_wake_target_cpu(ar);
|
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
|
2014-08-22 19:23:34 +07:00
|
|
|
goto err_ce;
|
2013-07-16 14:38:50 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_ce:
|
|
|
|
ath10k_pci_ce_deinit(ar);
|
2014-05-14 20:56:16 +07:00
|
|
|
|
2014-10-31 15:03:43 +07:00
|
|
|
err_sleep:
|
2014-05-14 20:56:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_hif_power_down(struct ath10k *ar)
|
2013-07-16 14:38:50 +07:00
|
|
|
{
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2014-10-28 16:32:08 +07:00
|
|
|
/* Currently hif_power_up performs effectively a reset and hif_stop
|
|
|
|
* resets the chip as well so there's no point in resetting here.
|
|
|
|
*/
|
2013-07-16 14:38:50 +07:00
|
|
|
}
|
|
|
|
|
2013-07-16 14:38:54 +07:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
|
|
|
|
static int ath10k_pci_hif_suspend(struct ath10k *ar)
|
|
|
|
{
|
2015-05-18 16:38:18 +07:00
|
|
|
/* The grace timer can still be counting down and ar->ps_awake be true.
|
|
|
|
* It is known that the device may be asleep after resuming regardless
|
|
|
|
* of the SoC powersave state before suspending. Hence make sure the
|
|
|
|
* device is asleep before proceeding.
|
|
|
|
*/
|
|
|
|
ath10k_pci_sleep_sync(ar);
|
2015-03-02 19:22:13 +07:00
|
|
|
|
2013-07-16 14:38:54 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ath10k_pci_hif_resume(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
struct pci_dev *pdev = ar_pci->pdev;
|
|
|
|
u32 val;
|
2015-10-16 19:54:51 +07:00
|
|
|
int ret = 0;
|
|
|
|
|
2015-12-16 21:52:19 +07:00
|
|
|
ret = ath10k_pci_force_wake(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_err(ar, "failed to wake up target: %d\n", ret);
|
|
|
|
return ret;
|
2015-10-16 19:54:51 +07:00
|
|
|
}
|
2013-07-16 14:38:54 +07:00
|
|
|
|
2015-03-02 19:22:14 +07:00
|
|
|
/* Suspend/Resume resets the PCI configuration space, so we have to
|
|
|
|
* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
|
|
|
|
* from interfering with C3 CPU state. pci_restore_state won't help
|
|
|
|
* here since it only restores the first 64 bytes pci config header.
|
|
|
|
*/
|
|
|
|
pci_read_config_dword(pdev, 0x40, &val);
|
|
|
|
if ((val & 0x0000ff00) != 0)
|
|
|
|
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
|
2013-07-16 14:38:54 +07:00
|
|
|
|
2015-10-16 19:54:51 +07:00
|
|
|
return ret;
|
2013-07-16 14:38:54 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-06-02 21:59:50 +07:00
|
|
|
static bool ath10k_pci_validate_cal(void *data, size_t size)
|
|
|
|
{
|
|
|
|
__le16 *cal_words = data;
|
|
|
|
u16 checksum = 0;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (size % 2 != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (i = 0; i < size / 2; i++)
|
|
|
|
checksum ^= le16_to_cpu(cal_words[i]);
|
|
|
|
|
|
|
|
return checksum == 0xffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_enable_eeprom(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
/* Enable SI clock */
|
|
|
|
ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
|
|
|
|
|
|
|
|
/* Configure GPIOs for I2C operation */
|
|
|
|
ath10k_pci_write32(ar,
|
|
|
|
GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
|
|
|
|
4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
|
|
|
|
SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
|
|
|
|
GPIO_PIN0_CONFIG) |
|
|
|
|
SM(1, GPIO_PIN0_PAD_PULL));
|
|
|
|
|
|
|
|
ath10k_pci_write32(ar,
|
|
|
|
GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
|
|
|
|
4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
|
|
|
|
SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
|
|
|
|
SM(1, GPIO_PIN0_PAD_PULL));
|
|
|
|
|
|
|
|
ath10k_pci_write32(ar,
|
|
|
|
GPIO_BASE_ADDRESS +
|
|
|
|
QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
|
|
|
|
1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
|
|
|
|
|
|
|
|
/* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
|
|
|
|
ath10k_pci_write32(ar,
|
|
|
|
SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
|
|
|
|
SM(1, SI_CONFIG_ERR_INT) |
|
|
|
|
SM(1, SI_CONFIG_BIDIR_OD_DATA) |
|
|
|
|
SM(1, SI_CONFIG_I2C) |
|
|
|
|
SM(1, SI_CONFIG_POS_SAMPLE) |
|
|
|
|
SM(1, SI_CONFIG_INACTIVE_DATA) |
|
|
|
|
SM(1, SI_CONFIG_INACTIVE_CLK) |
|
|
|
|
SM(8, SI_CONFIG_DIVIDER));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
|
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
int wait_limit;
|
|
|
|
|
|
|
|
/* set device select byte and for the read operation */
|
|
|
|
reg = QCA9887_EEPROM_SELECT_READ |
|
|
|
|
SM(addr, QCA9887_EEPROM_ADDR_LO) |
|
|
|
|
SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
|
|
|
|
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
|
|
|
|
|
|
|
|
/* write transmit data, transfer length, and START bit */
|
|
|
|
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
|
|
|
|
SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
|
|
|
|
SM(4, SI_CS_TX_CNT));
|
|
|
|
|
|
|
|
/* wait max 1 sec */
|
|
|
|
wait_limit = 100000;
|
|
|
|
|
|
|
|
/* wait for SI_CS_DONE_INT */
|
|
|
|
do {
|
|
|
|
reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
|
|
|
|
if (MS(reg, SI_CS_DONE_INT))
|
|
|
|
break;
|
|
|
|
|
|
|
|
wait_limit--;
|
|
|
|
udelay(10);
|
|
|
|
} while (wait_limit > 0);
|
|
|
|
|
|
|
|
if (!MS(reg, SI_CS_DONE_INT)) {
|
|
|
|
ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
|
|
|
|
addr);
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clear SI_CS_DONE_INT */
|
|
|
|
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
|
|
|
|
|
|
|
|
if (MS(reg, SI_CS_DONE_ERR)) {
|
|
|
|
ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* extract receive data */
|
|
|
|
reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
|
|
|
|
*out = reg;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
|
|
|
|
size_t *data_len)
|
|
|
|
{
|
|
|
|
u8 *caldata = NULL;
|
|
|
|
size_t calsize, i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!QCA_REV_9887(ar))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
calsize = ar->hw_params.cal_data_len;
|
|
|
|
caldata = kmalloc(calsize, GFP_KERNEL);
|
|
|
|
if (!caldata)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ath10k_pci_enable_eeprom(ar);
|
|
|
|
|
|
|
|
for (i = 0; i < calsize; i++) {
|
|
|
|
ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
|
|
|
|
if (ret)
|
|
|
|
goto err_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ath10k_pci_validate_cal(caldata, calsize))
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
*data = caldata;
|
|
|
|
*data_len = calsize;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
kfree(data);
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
|
2014-02-27 23:50:04 +07:00
|
|
|
.tx_sg = ath10k_pci_hif_tx_sg,
|
2014-09-24 18:16:52 +07:00
|
|
|
.diag_read = ath10k_pci_hif_diag_read,
|
2014-11-25 17:24:48 +07:00
|
|
|
.diag_write = ath10k_pci_diag_write_mem,
|
2013-06-13 00:52:10 +07:00
|
|
|
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
|
|
|
|
.start = ath10k_pci_hif_start,
|
|
|
|
.stop = ath10k_pci_hif_stop,
|
|
|
|
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
|
|
|
|
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
|
|
|
|
.send_complete_check = ath10k_pci_hif_send_complete_check,
|
|
|
|
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
|
2013-07-16 14:38:50 +07:00
|
|
|
.power_up = ath10k_pci_hif_power_up,
|
|
|
|
.power_down = ath10k_pci_hif_power_down,
|
2014-11-25 17:24:33 +07:00
|
|
|
.read32 = ath10k_pci_read32,
|
|
|
|
.write32 = ath10k_pci_write32,
|
2013-07-16 14:38:54 +07:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
.suspend = ath10k_pci_hif_suspend,
|
|
|
|
.resume = ath10k_pci_hif_resume,
|
|
|
|
#endif
|
2016-06-02 21:59:50 +07:00
|
|
|
.fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
|
2013-06-13 00:52:10 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Top-level interrupt handler for all PCI interrupts from a Target.
|
|
|
|
* When a block of MSI interrupts is allocated, this top-level handler
|
|
|
|
* is not used; instead, we directly call the correct sub-handler.
|
|
|
|
*/
|
|
|
|
static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
|
|
|
|
{
|
|
|
|
struct ath10k *ar = arg;
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2015-10-16 19:54:51 +07:00
|
|
|
int ret;
|
|
|
|
|
2016-06-29 23:29:24 +07:00
|
|
|
if (ath10k_pci_has_device_gone(ar))
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
2015-12-16 21:52:19 +07:00
|
|
|
ret = ath10k_pci_force_wake(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
|
|
|
|
return IRQ_NONE;
|
2015-10-16 19:54:51 +07:00
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2016-04-07 13:40:58 +07:00
|
|
|
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
|
2013-11-25 20:06:20 +07:00
|
|
|
if (!ath10k_pci_irq_pending(ar))
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
2013-11-25 20:06:25 +07:00
|
|
|
ath10k_pci_disable_and_clear_legacy_irq(ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
tasklet_schedule(&ar_pci->intr_tq);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2014-08-22 19:23:34 +07:00
|
|
|
static void ath10k_pci_tasklet(unsigned long data)
|
2013-11-25 20:06:26 +07:00
|
|
|
{
|
|
|
|
struct ath10k *ar = (struct ath10k *)data;
|
2014-08-22 19:23:34 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2013-11-25 20:06:26 +07:00
|
|
|
|
2014-08-22 19:23:34 +07:00
|
|
|
if (ath10k_pci_has_fw_crashed(ar)) {
|
2015-01-24 17:14:52 +07:00
|
|
|
ath10k_pci_irq_disable(ar);
|
2014-08-22 19:23:34 +07:00
|
|
|
ath10k_pci_fw_crashed_clear(ar);
|
2014-08-25 12:37:37 +07:00
|
|
|
ath10k_pci_fw_crashed_dump(ar);
|
2013-11-25 20:06:26 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
ath10k_ce_per_engine_service_any(ar);
|
|
|
|
|
2013-11-25 20:06:25 +07:00
|
|
|
/* Re-enable legacy irq that was disabled in the irq handler */
|
2016-04-07 13:40:58 +07:00
|
|
|
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
|
2013-11-25 20:06:25 +07:00
|
|
|
ath10k_pci_enable_legacy_irq(ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = request_irq(ar_pci->pdev->irq,
|
|
|
|
ath10k_pci_interrupt_handler,
|
|
|
|
IRQF_SHARED, "ath10k_pci", ar);
|
2013-11-25 20:06:21 +07:00
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
|
2013-11-25 20:06:21 +07:00
|
|
|
ar_pci->pdev->irq, ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = request_irq(ar_pci->pdev->irq,
|
|
|
|
ath10k_pci_interrupt_handler,
|
|
|
|
IRQF_SHARED, "ath10k_pci", ar);
|
2013-10-17 15:36:15 +07:00
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
|
2013-11-25 20:06:21 +07:00
|
|
|
ar_pci->pdev->irq, ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
2013-10-17 15:36:15 +07:00
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
static int ath10k_pci_request_irq(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2016-04-07 13:40:58 +07:00
|
|
|
switch (ar_pci->oper_irq_mode) {
|
|
|
|
case ATH10K_PCI_IRQ_LEGACY:
|
2013-11-25 20:06:21 +07:00
|
|
|
return ath10k_pci_request_irq_legacy(ar);
|
2016-04-07 13:40:58 +07:00
|
|
|
case ATH10K_PCI_IRQ_MSI:
|
2013-11-25 20:06:21 +07:00
|
|
|
return ath10k_pci_request_irq_msi(ar);
|
2015-09-17 13:17:33 +07:00
|
|
|
default:
|
2016-04-07 13:40:58 +07:00
|
|
|
return -EINVAL;
|
2013-11-25 20:06:21 +07:00
|
|
|
}
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
static void ath10k_pci_free_irq(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
2016-04-07 13:40:58 +07:00
|
|
|
free_irq(ar_pci->pdev->irq, ar);
|
2013-11-25 20:06:21 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ath10k_pci_init_irq(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int ret;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
ath10k_pci_init_irq_tasklets(ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-22 19:23:31 +07:00
|
|
|
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_info(ar, "limiting irq mode to: %d\n",
|
|
|
|
ath10k_pci_irq_mode);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
/* Try MSI */
|
2013-11-25 20:06:27 +07:00
|
|
|
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
|
2016-04-07 13:40:58 +07:00
|
|
|
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
|
2013-11-25 20:06:27 +07:00
|
|
|
ret = pci_enable_msi(ar_pci->pdev);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret == 0)
|
2013-11-25 20:06:27 +07:00
|
|
|
return 0;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2013-11-25 20:06:27 +07:00
|
|
|
/* fall-through */
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
/* Try legacy irq
|
|
|
|
*
|
|
|
|
* A potential race occurs here: The CORE_BASE write
|
|
|
|
* depends on target correctly decoding AXI address but
|
|
|
|
* host won't know when target writes BAR to CORE_CTRL.
|
|
|
|
* This write might get lost if target has NOT written BAR.
|
|
|
|
* For now, fix the race by repeating the write in below
|
|
|
|
* synchronization checking. */
|
2016-04-07 13:40:58 +07:00
|
|
|
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
|
|
|
|
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
|
|
|
|
|
|
|
|
return 0;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2014-08-07 16:03:28 +07:00
|
|
|
static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2013-11-25 20:06:21 +07:00
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
|
|
|
|
0);
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2013-11-25 20:06:21 +07:00
|
|
|
static int ath10k_pci_deinit_irq(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
2016-04-07 13:40:58 +07:00
|
|
|
switch (ar_pci->oper_irq_mode) {
|
|
|
|
case ATH10K_PCI_IRQ_LEGACY:
|
2014-08-07 16:03:28 +07:00
|
|
|
ath10k_pci_deinit_irq_legacy(ar);
|
2015-09-17 13:17:33 +07:00
|
|
|
break;
|
2014-02-13 22:50:01 +07:00
|
|
|
default:
|
|
|
|
pci_disable_msi(ar_pci->pdev);
|
2015-09-17 13:17:33 +07:00
|
|
|
break;
|
2013-11-25 20:06:21 +07:00
|
|
|
}
|
|
|
|
|
2015-09-17 13:17:33 +07:00
|
|
|
return 0;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:22 +07:00
|
|
|
int ath10k_pci_wait_for_target_init(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
2014-03-28 14:32:21 +07:00
|
|
|
unsigned long timeout;
|
|
|
|
u32 val;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-03-28 14:32:21 +07:00
|
|
|
timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
|
|
|
|
|
|
|
|
do {
|
|
|
|
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
|
|
|
|
val);
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2014-03-28 14:32:21 +07:00
|
|
|
/* target should never return this */
|
|
|
|
if (val == 0xffffffff)
|
|
|
|
continue;
|
|
|
|
|
2014-04-23 23:30:04 +07:00
|
|
|
/* the device has crashed so don't bother trying anymore */
|
|
|
|
if (val & FW_IND_EVENT_PENDING)
|
|
|
|
break;
|
|
|
|
|
2014-03-28 14:32:21 +07:00
|
|
|
if (val & FW_IND_INITIALIZED)
|
|
|
|
break;
|
|
|
|
|
2016-04-07 13:40:58 +07:00
|
|
|
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
|
2013-06-13 00:52:10 +07:00
|
|
|
/* Fix potential race by repeating CORE_BASE writes */
|
2014-10-20 19:14:37 +07:00
|
|
|
ath10k_pci_enable_legacy_irq(ar);
|
2014-03-28 14:32:21 +07:00
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
mdelay(10);
|
2014-03-28 14:32:21 +07:00
|
|
|
} while (time_before(jiffies, timeout));
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-10-20 19:14:37 +07:00
|
|
|
ath10k_pci_disable_and_clear_legacy_irq(ar);
|
2014-10-20 19:14:38 +07:00
|
|
|
ath10k_pci_irq_msi_fw_mask(ar);
|
2014-10-20 19:14:37 +07:00
|
|
|
|
2014-04-23 23:30:03 +07:00
|
|
|
if (val == 0xffffffff) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to read device register, device is gone\n");
|
2014-08-07 16:03:28 +07:00
|
|
|
return -EIO;
|
2014-04-23 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
2014-04-23 23:30:04 +07:00
|
|
|
if (val & FW_IND_EVENT_PENDING) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "device has crashed during init\n");
|
2014-08-07 16:03:28 +07:00
|
|
|
return -ECOMM;
|
2014-04-23 23:30:04 +07:00
|
|
|
}
|
|
|
|
|
2014-04-23 23:30:03 +07:00
|
|
|
if (!(val & FW_IND_INITIALIZED)) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
|
2014-03-28 14:32:21 +07:00
|
|
|
val);
|
2014-08-07 16:03:28 +07:00
|
|
|
return -ETIMEDOUT;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
|
2014-08-07 16:03:28 +07:00
|
|
|
return 0;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2014-02-10 23:14:22 +07:00
|
|
|
static int ath10k_pci_cold_reset(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-09-29 18:41:46 +07:00
|
|
|
spin_lock_bh(&ar->data_lock);
|
|
|
|
|
|
|
|
ar->stats.fw_cold_reset_counter++;
|
|
|
|
|
|
|
|
spin_unlock_bh(&ar->data_lock);
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
/* Put Target, including PCIe, into RESET. */
|
2013-09-01 14:01:53 +07:00
|
|
|
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
|
2013-06-13 00:52:10 +07:00
|
|
|
val |= 1;
|
2013-09-01 14:01:53 +07:00
|
|
|
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2015-07-10 16:01:20 +07:00
|
|
|
/* After writing into SOC_GLOBAL_RESET to put device into
|
|
|
|
* reset and pulling out of reset pcie may not be stable
|
|
|
|
* for any immediate pcie register access and cause bus error,
|
|
|
|
* add delay before any pcie access request to fix this issue.
|
|
|
|
*/
|
|
|
|
msleep(20);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
/* Pull Target, including PCIe, out of RESET. */
|
|
|
|
val &= ~1;
|
2013-09-01 14:01:53 +07:00
|
|
|
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2015-07-10 16:01:20 +07:00
|
|
|
msleep(20);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
|
2014-03-28 14:32:52 +07:00
|
|
|
|
2013-11-08 14:01:30 +07:00
|
|
|
return 0;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2014-08-07 16:03:30 +07:00
|
|
|
static int ath10k_pci_claim(struct ath10k *ar)
|
2013-06-13 00:52:10 +07:00
|
|
|
{
|
2014-08-07 16:03:30 +07:00
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
struct pci_dev *pdev = ar_pci->pdev;
|
|
|
|
int ret;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
pci_set_drvdata(pdev, ar);
|
|
|
|
|
|
|
|
ret = pci_enable_device(pdev);
|
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to enable pci device: %d\n", ret);
|
2014-08-07 16:03:30 +07:00
|
|
|
return ret;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = pci_request_region(pdev, BAR_NUM, "ath");
|
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
|
2014-08-07 16:03:30 +07:00
|
|
|
ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
goto err_device;
|
|
|
|
}
|
|
|
|
|
2014-08-07 16:03:30 +07:00
|
|
|
/* Target expects 32 bit DMA. Enforce it. */
|
2013-06-13 00:52:10 +07:00
|
|
|
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
goto err_region;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
|
2014-08-07 16:03:30 +07:00
|
|
|
ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
goto err_region;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
|
|
|
/* Arrange for access to Target SoC registers. */
|
2015-06-15 18:46:42 +07:00
|
|
|
ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
|
2014-08-07 16:03:30 +07:00
|
|
|
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
|
|
|
|
if (!ar_pci->mem) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
|
2013-06-13 00:52:10 +07:00
|
|
|
ret = -EIO;
|
|
|
|
goto err_master;
|
|
|
|
}
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
|
2014-08-07 16:03:30 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_master:
|
|
|
|
pci_clear_master(pdev);
|
|
|
|
|
|
|
|
err_region:
|
|
|
|
pci_release_region(pdev, BAR_NUM);
|
|
|
|
|
|
|
|
err_device:
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_release(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
struct pci_dev *pdev = ar_pci->pdev;
|
|
|
|
|
|
|
|
pci_iounmap(pdev, ar_pci->mem);
|
|
|
|
pci_release_region(pdev, BAR_NUM);
|
|
|
|
pci_clear_master(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
}
|
|
|
|
|
2014-12-02 15:55:54 +07:00
|
|
|
static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
|
|
|
|
{
|
|
|
|
const struct ath10k_pci_supp_chip *supp_chip;
|
|
|
|
int i;
|
|
|
|
u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
|
|
|
|
supp_chip = &ath10k_pci_supp_chips[i];
|
|
|
|
|
|
|
|
if (supp_chip->dev_id == dev_id &&
|
|
|
|
supp_chip->rev_id == rev_id)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:24 +07:00
|
|
|
int ath10k_pci_setup_resource(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_init(&ar_pci->ce_lock);
|
|
|
|
spin_lock_init(&ar_pci->ps_lock);
|
|
|
|
|
|
|
|
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
|
|
|
|
(unsigned long)ar);
|
|
|
|
|
|
|
|
if (QCA_REV_6174(ar))
|
|
|
|
ath10k_pci_override_ce_config(ar);
|
|
|
|
|
|
|
|
ret = ath10k_pci_alloc_pipes(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ath10k_pci_release_resource(struct ath10k *ar)
|
|
|
|
{
|
|
|
|
ath10k_pci_kill_tasklet(ar);
|
|
|
|
ath10k_pci_ce_deinit(ar);
|
|
|
|
ath10k_pci_free_pipes(ar);
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:23 +07:00
|
|
|
static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
|
|
|
|
.read32 = ath10k_bus_pci_read32,
|
|
|
|
.write32 = ath10k_bus_pci_write32,
|
|
|
|
.get_num_banks = ath10k_pci_get_num_banks,
|
|
|
|
};
|
|
|
|
|
2014-08-07 16:03:30 +07:00
|
|
|
static int ath10k_pci_probe(struct pci_dev *pdev,
|
|
|
|
const struct pci_device_id *pci_dev)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct ath10k *ar;
|
|
|
|
struct ath10k_pci *ar_pci;
|
2015-01-24 17:14:49 +07:00
|
|
|
enum ath10k_hw_rev hw_rev;
|
2014-08-07 16:03:30 +07:00
|
|
|
u32 chip_id;
|
2015-10-16 19:54:51 +07:00
|
|
|
bool pci_ps;
|
2016-05-24 03:12:43 +07:00
|
|
|
int (*pci_soft_reset)(struct ath10k *ar);
|
|
|
|
int (*pci_hard_reset)(struct ath10k *ar);
|
2014-08-07 16:03:30 +07:00
|
|
|
|
2015-01-24 17:14:49 +07:00
|
|
|
switch (pci_dev->device) {
|
|
|
|
case QCA988X_2_0_DEVICE_ID:
|
|
|
|
hw_rev = ATH10K_HW_QCA988X;
|
2015-10-16 19:54:51 +07:00
|
|
|
pci_ps = false;
|
2016-05-24 03:12:43 +07:00
|
|
|
pci_soft_reset = ath10k_pci_warm_reset;
|
|
|
|
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
|
2015-01-24 17:14:49 +07:00
|
|
|
break;
|
2016-06-02 21:59:49 +07:00
|
|
|
case QCA9887_1_0_DEVICE_ID:
|
|
|
|
dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
|
|
|
|
hw_rev = ATH10K_HW_QCA9887;
|
|
|
|
pci_ps = false;
|
|
|
|
pci_soft_reset = ath10k_pci_warm_reset;
|
|
|
|
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
|
|
|
|
break;
|
2015-08-13 19:32:26 +07:00
|
|
|
case QCA6164_2_1_DEVICE_ID:
|
2015-01-24 17:14:49 +07:00
|
|
|
case QCA6174_2_1_DEVICE_ID:
|
|
|
|
hw_rev = ATH10K_HW_QCA6174;
|
2015-10-16 19:54:51 +07:00
|
|
|
pci_ps = true;
|
2016-05-24 03:12:43 +07:00
|
|
|
pci_soft_reset = ath10k_pci_warm_reset;
|
|
|
|
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
|
2015-01-24 17:14:49 +07:00
|
|
|
break;
|
2015-06-18 14:01:03 +07:00
|
|
|
case QCA99X0_2_0_DEVICE_ID:
|
|
|
|
hw_rev = ATH10K_HW_QCA99X0;
|
2015-10-16 19:54:51 +07:00
|
|
|
pci_ps = false;
|
2016-05-24 03:12:43 +07:00
|
|
|
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
|
|
|
|
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
|
2015-06-18 14:01:03 +07:00
|
|
|
break;
|
2016-05-24 03:12:45 +07:00
|
|
|
case QCA9984_1_0_DEVICE_ID:
|
|
|
|
hw_rev = ATH10K_HW_QCA9984;
|
|
|
|
pci_ps = false;
|
|
|
|
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
|
|
|
|
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
|
|
|
|
break;
|
2015-10-28 21:09:53 +07:00
|
|
|
case QCA9377_1_0_DEVICE_ID:
|
|
|
|
hw_rev = ATH10K_HW_QCA9377;
|
|
|
|
pci_ps = true;
|
2016-05-24 03:12:43 +07:00
|
|
|
pci_soft_reset = NULL;
|
|
|
|
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
|
2015-10-28 21:09:53 +07:00
|
|
|
break;
|
2015-01-24 17:14:49 +07:00
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
|
|
|
|
hw_rev, &ath10k_pci_hif_ops);
|
2014-08-07 16:03:30 +07:00
|
|
|
if (!ar) {
|
2014-08-25 17:09:38 +07:00
|
|
|
dev_err(&pdev->dev, "failed to allocate core\n");
|
2014-08-07 16:03:30 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2015-10-09 15:55:58 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
|
|
|
|
pdev->vendor, pdev->device,
|
|
|
|
pdev->subsystem_vendor, pdev->subsystem_device);
|
2014-08-25 17:09:38 +07:00
|
|
|
|
2014-08-07 16:03:30 +07:00
|
|
|
ar_pci = ath10k_pci_priv(ar);
|
|
|
|
ar_pci->pdev = pdev;
|
|
|
|
ar_pci->dev = &pdev->dev;
|
|
|
|
ar_pci->ar = ar;
|
2015-08-13 19:32:26 +07:00
|
|
|
ar->dev_id = pci_dev->device;
|
2015-10-16 19:54:51 +07:00
|
|
|
ar_pci->pci_ps = pci_ps;
|
2016-01-27 16:54:23 +07:00
|
|
|
ar_pci->bus_ops = &ath10k_pci_bus_ops;
|
2016-05-24 03:12:43 +07:00
|
|
|
ar_pci->pci_soft_reset = pci_soft_reset;
|
|
|
|
ar_pci->pci_hard_reset = pci_hard_reset;
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2015-10-09 15:55:58 +07:00
|
|
|
ar->id.vendor = pdev->vendor;
|
|
|
|
ar->id.device = pdev->device;
|
|
|
|
ar->id.subsystem_vendor = pdev->subsystem_vendor;
|
|
|
|
ar->id.subsystem_device = pdev->subsystem_device;
|
2015-04-17 16:19:17 +07:00
|
|
|
|
2015-05-18 16:38:18 +07:00
|
|
|
setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
|
|
|
|
(unsigned long)ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
2016-01-27 16:54:24 +07:00
|
|
|
ret = ath10k_pci_setup_resource(ar);
|
2013-09-01 15:22:14 +07:00
|
|
|
if (ret) {
|
2016-01-27 16:54:24 +07:00
|
|
|
ath10k_err(ar, "failed to setup resource: %d\n", ret);
|
2014-08-07 16:03:30 +07:00
|
|
|
goto err_core_destroy;
|
2013-09-01 15:22:14 +07:00
|
|
|
}
|
|
|
|
|
2016-01-27 16:54:24 +07:00
|
|
|
ret = ath10k_pci_claim(ar);
|
2014-03-28 15:02:38 +07:00
|
|
|
if (ret) {
|
2016-01-27 16:54:24 +07:00
|
|
|
ath10k_err(ar, "failed to claim device: %d\n", ret);
|
|
|
|
goto err_free_pipes;
|
2014-03-28 15:02:38 +07:00
|
|
|
}
|
|
|
|
|
2015-12-16 21:52:19 +07:00
|
|
|
ret = ath10k_pci_force_wake(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_warn(ar, "failed to wake up device : %d\n", ret);
|
2016-01-27 16:54:24 +07:00
|
|
|
goto err_sleep;
|
2015-10-16 19:54:51 +07:00
|
|
|
}
|
|
|
|
|
2015-12-16 21:52:19 +07:00
|
|
|
ath10k_pci_ce_deinit(ar);
|
|
|
|
ath10k_pci_irq_disable(ar);
|
|
|
|
|
2014-08-22 19:23:31 +07:00
|
|
|
ret = ath10k_pci_init_irq(ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to init irqs: %d\n", ret);
|
2016-01-27 16:54:24 +07:00
|
|
|
goto err_sleep;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
2016-04-07 13:40:58 +07:00
|
|
|
ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
|
|
|
|
ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
|
2014-08-22 19:23:31 +07:00
|
|
|
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
|
|
|
|
|
2014-08-22 19:23:34 +07:00
|
|
|
ret = ath10k_pci_request_irq(ar);
|
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_warn(ar, "failed to request irqs: %d\n", ret);
|
2014-08-22 19:23:34 +07:00
|
|
|
goto err_deinit_irq;
|
|
|
|
}
|
|
|
|
|
2015-01-24 17:14:48 +07:00
|
|
|
ret = ath10k_pci_chip_reset(ar);
|
|
|
|
if (ret) {
|
|
|
|
ath10k_err(ar, "failed to reset chip: %d\n", ret);
|
|
|
|
goto err_free_irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
|
|
|
|
if (chip_id == 0xffffffff) {
|
|
|
|
ath10k_err(ar, "failed to get chip id\n");
|
|
|
|
goto err_free_irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
|
|
|
|
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
|
|
|
|
pdev->device, chip_id);
|
2015-04-10 20:01:27 +07:00
|
|
|
goto err_free_irq;
|
2015-01-24 17:14:48 +07:00
|
|
|
}
|
|
|
|
|
2013-09-01 15:22:14 +07:00
|
|
|
ret = ath10k_core_register(ar, chip_id);
|
2013-06-13 00:52:10 +07:00
|
|
|
if (ret) {
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_err(ar, "failed to register driver core: %d\n", ret);
|
2014-08-22 19:23:34 +07:00
|
|
|
goto err_free_irq;
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2014-08-22 19:23:34 +07:00
|
|
|
err_free_irq:
|
|
|
|
ath10k_pci_free_irq(ar);
|
2014-08-28 15:24:40 +07:00
|
|
|
ath10k_pci_kill_tasklet(ar);
|
2014-08-22 19:23:34 +07:00
|
|
|
|
2014-08-22 19:23:31 +07:00
|
|
|
err_deinit_irq:
|
|
|
|
ath10k_pci_deinit_irq(ar);
|
|
|
|
|
2014-08-07 16:03:28 +07:00
|
|
|
err_sleep:
|
2015-05-29 12:35:24 +07:00
|
|
|
ath10k_pci_sleep_sync(ar);
|
2014-08-07 16:03:30 +07:00
|
|
|
ath10k_pci_release(ar);
|
|
|
|
|
2016-01-27 16:54:24 +07:00
|
|
|
err_free_pipes:
|
|
|
|
ath10k_pci_free_pipes(ar);
|
|
|
|
|
2014-08-07 16:03:27 +07:00
|
|
|
err_core_destroy:
|
2013-06-13 00:52:10 +07:00
|
|
|
ath10k_core_destroy(ar);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ath10k_pci_remove(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct ath10k *ar = pci_get_drvdata(pdev);
|
|
|
|
struct ath10k_pci *ar_pci;
|
|
|
|
|
2014-08-25 17:09:38 +07:00
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
|
2013-06-13 00:52:10 +07:00
|
|
|
|
|
|
|
if (!ar)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ar_pci = ath10k_pci_priv(ar);
|
|
|
|
|
|
|
|
if (!ar_pci)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ath10k_core_unregister(ar);
|
2014-08-22 19:23:34 +07:00
|
|
|
ath10k_pci_free_irq(ar);
|
2014-08-22 19:23:31 +07:00
|
|
|
ath10k_pci_deinit_irq(ar);
|
2016-01-27 16:54:24 +07:00
|
|
|
ath10k_pci_release_resource(ar);
|
2015-05-18 16:38:18 +07:00
|
|
|
ath10k_pci_sleep_sync(ar);
|
2014-08-07 16:03:30 +07:00
|
|
|
ath10k_pci_release(ar);
|
2013-06-13 00:52:10 +07:00
|
|
|
ath10k_core_destroy(ar);
|
|
|
|
}
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
|
|
|
|
|
|
|
|
static struct pci_driver ath10k_pci_driver = {
|
|
|
|
.name = "ath10k_pci",
|
|
|
|
.id_table = ath10k_pci_id_table,
|
|
|
|
.probe = ath10k_pci_probe,
|
|
|
|
.remove = ath10k_pci_remove,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init ath10k_pci_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = pci_register_driver(&ath10k_pci_driver);
|
|
|
|
if (ret)
|
2014-08-25 17:09:38 +07:00
|
|
|
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
|
|
|
|
ret);
|
2013-06-13 00:52:10 +07:00
|
|
|
|
ath10k: add basic skeleton to support ahb
qca4019 uses ahb instead of pci where it slightly differs in device
enumeration, clock control, reset control, etc. Good thing is that
ahb also uses copy engine for the data transaction. So, the most of
the stuff implemented in pci.c/ce.c are reusable in ahb case too.
Device enumeration in ahb case comes through platform driver/device
model. All resource details like irq, memory map, clocks, etc for
qca4019 can be fetched from of_node of platform device.
Simply flow would look like,
device tree => platform device (kernel) => platform driver (ath10k)
Device tree entry will have all qca4019 resource details and the same
info will be passed to kernel. Kernel will prepare new platform device
for that entry and expose DT info to of_node in platform device.
Later, ath10k would register platform driver with unique compatible name
and then kernels binds to corresponding compatible entry & calls ath10k
ahb probe functions. From there onwards, ath10k will take control of it
and move forward.
New bool flag CONFIG_ATH10K_AHB is added in Kconfig to conditionally
enable ahb support in ath10k. On enabling this flag, ath10k_pci.ko
will have ahb support. This patch adds only basic skeleton and few
macros to support ahb in the context of qca4019.
Signed-off-by: Raja Mani <rmani@qti.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
2016-01-27 16:54:25 +07:00
|
|
|
ret = ath10k_ahb_init();
|
|
|
|
if (ret)
|
|
|
|
printk(KERN_ERR "ahb init failed: %d\n", ret);
|
|
|
|
|
2013-06-13 00:52:10 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
module_init(ath10k_pci_init);
|
|
|
|
|
|
|
|
static void __exit ath10k_pci_exit(void)
|
|
|
|
{
|
|
|
|
pci_unregister_driver(&ath10k_pci_driver);
|
ath10k: add basic skeleton to support ahb
qca4019 uses ahb instead of pci where it slightly differs in device
enumeration, clock control, reset control, etc. Good thing is that
ahb also uses copy engine for the data transaction. So, the most of
the stuff implemented in pci.c/ce.c are reusable in ahb case too.
Device enumeration in ahb case comes through platform driver/device
model. All resource details like irq, memory map, clocks, etc for
qca4019 can be fetched from of_node of platform device.
Simply flow would look like,
device tree => platform device (kernel) => platform driver (ath10k)
Device tree entry will have all qca4019 resource details and the same
info will be passed to kernel. Kernel will prepare new platform device
for that entry and expose DT info to of_node in platform device.
Later, ath10k would register platform driver with unique compatible name
and then kernels binds to corresponding compatible entry & calls ath10k
ahb probe functions. From there onwards, ath10k will take control of it
and move forward.
New bool flag CONFIG_ATH10K_AHB is added in Kconfig to conditionally
enable ahb support in ath10k. On enabling this flag, ath10k_pci.ko
will have ahb support. This patch adds only basic skeleton and few
macros to support ahb in the context of qca4019.
Signed-off-by: Raja Mani <rmani@qti.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
2016-01-27 16:54:25 +07:00
|
|
|
ath10k_ahb_exit();
|
2013-06-13 00:52:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
module_exit(ath10k_pci_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Qualcomm Atheros");
|
2016-05-27 21:45:57 +07:00
|
|
|
MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
|
2013-06-13 00:52:10 +07:00
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
2015-02-18 19:16:37 +07:00
|
|
|
|
|
|
|
/* QCA988x 2.0 firmware files */
|
2014-10-06 19:16:41 +07:00
|
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
|
|
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
|
2015-02-18 19:16:37 +07:00
|
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
|
2015-03-25 18:12:42 +07:00
|
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
|
2013-06-13 00:52:10 +07:00
|
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
|
2015-10-09 15:55:58 +07:00
|
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
|
2015-02-18 19:16:37 +07:00
|
|
|
|
2016-06-02 21:59:49 +07:00
|
|
|
/* QCA9887 1.0 firmware files */
|
|
|
|
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
|
|
|
|
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
|
|
|
|
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
|
|
|
|
|
2015-02-18 19:16:37 +07:00
|
|
|
/* QCA6174 2.1 firmware files */
|
|
|
|
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
|
2015-05-26 18:09:22 +07:00
|
|
|
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
|
2015-02-18 19:16:37 +07:00
|
|
|
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
|
2015-10-09 15:55:58 +07:00
|
|
|
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
|
2015-02-18 19:16:37 +07:00
|
|
|
|
|
|
|
/* QCA6174 3.1 firmware files */
|
|
|
|
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
|
2015-05-26 18:09:22 +07:00
|
|
|
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
|
2015-02-18 19:16:37 +07:00
|
|
|
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
|
2015-10-09 15:55:58 +07:00
|
|
|
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
|
2015-10-28 21:09:53 +07:00
|
|
|
|
|
|
|
/* QCA9377 1.0 firmware files */
|
|
|
|
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
|
|
|
|
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
|