mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 07:25:14 +07:00
7c0f0e3c97
This was the final missing bit to making sure the device doesn't assert interrupts to host. This should fix possible race when target crashes during driver teardown. This also removes an early warm reset workaround during pci probing. Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
2616 lines
63 KiB
C
2616 lines
63 KiB
C
/*
|
|
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
|
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
|
|
*
|
|
* Permission to use, copy, modify, and/or distribute this software for any
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
* copyright notice and this permission notice appear in all copies.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
*/
|
|
|
|
#include <linux/pci.h>
|
|
#include <linux/module.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/bitops.h>
|
|
|
|
#include "core.h"
|
|
#include "debug.h"
|
|
|
|
#include "targaddrs.h"
|
|
#include "bmi.h"
|
|
|
|
#include "hif.h"
|
|
#include "htc.h"
|
|
|
|
#include "ce.h"
|
|
#include "pci.h"
|
|
|
|
enum ath10k_pci_irq_mode {
|
|
ATH10K_PCI_IRQ_AUTO = 0,
|
|
ATH10K_PCI_IRQ_LEGACY = 1,
|
|
ATH10K_PCI_IRQ_MSI = 2,
|
|
};
|
|
|
|
enum ath10k_pci_reset_mode {
|
|
ATH10K_PCI_RESET_AUTO = 0,
|
|
ATH10K_PCI_RESET_WARM_ONLY = 1,
|
|
};
|
|
|
|
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
|
|
static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
|
|
|
|
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
|
|
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
|
|
|
|
module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
|
|
MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
|
|
|
|
/* how long wait to wait for target to initialise, in ms */
|
|
#define ATH10K_PCI_TARGET_WAIT 3000
|
|
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
|
|
|
|
#define QCA988X_2_0_DEVICE_ID (0x003c)
|
|
|
|
static const struct pci_device_id ath10k_pci_id_table[] = {
|
|
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
|
|
{0}
|
|
};
|
|
|
|
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
|
|
static int ath10k_pci_cold_reset(struct ath10k *ar);
|
|
static int ath10k_pci_warm_reset(struct ath10k *ar);
|
|
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
|
|
static int ath10k_pci_init_irq(struct ath10k *ar);
|
|
static int ath10k_pci_deinit_irq(struct ath10k *ar);
|
|
static int ath10k_pci_request_irq(struct ath10k *ar);
|
|
static void ath10k_pci_free_irq(struct ath10k *ar);
|
|
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
|
|
struct ath10k_ce_pipe *rx_pipe,
|
|
struct bmi_xfer *xfer);
|
|
|
|
static const struct ce_attr host_ce_config_wlan[] = {
|
|
/* CE0: host->target HTC control and raw streams */
|
|
{
|
|
.flags = CE_ATTR_FLAGS,
|
|
.src_nentries = 16,
|
|
.src_sz_max = 256,
|
|
.dest_nentries = 0,
|
|
},
|
|
|
|
/* CE1: target->host HTT + HTC control */
|
|
{
|
|
.flags = CE_ATTR_FLAGS,
|
|
.src_nentries = 0,
|
|
.src_sz_max = 512,
|
|
.dest_nentries = 512,
|
|
},
|
|
|
|
/* CE2: target->host WMI */
|
|
{
|
|
.flags = CE_ATTR_FLAGS,
|
|
.src_nentries = 0,
|
|
.src_sz_max = 2048,
|
|
.dest_nentries = 32,
|
|
},
|
|
|
|
/* CE3: host->target WMI */
|
|
{
|
|
.flags = CE_ATTR_FLAGS,
|
|
.src_nentries = 32,
|
|
.src_sz_max = 2048,
|
|
.dest_nentries = 0,
|
|
},
|
|
|
|
/* CE4: host->target HTT */
|
|
{
|
|
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
|
|
.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
|
|
.src_sz_max = 256,
|
|
.dest_nentries = 0,
|
|
},
|
|
|
|
/* CE5: unused */
|
|
{
|
|
.flags = CE_ATTR_FLAGS,
|
|
.src_nentries = 0,
|
|
.src_sz_max = 0,
|
|
.dest_nentries = 0,
|
|
},
|
|
|
|
/* CE6: target autonomous hif_memcpy */
|
|
{
|
|
.flags = CE_ATTR_FLAGS,
|
|
.src_nentries = 0,
|
|
.src_sz_max = 0,
|
|
.dest_nentries = 0,
|
|
},
|
|
|
|
/* CE7: ce_diag, the Diagnostic Window */
|
|
{
|
|
.flags = CE_ATTR_FLAGS,
|
|
.src_nentries = 2,
|
|
.src_sz_max = DIAG_TRANSFER_LIMIT,
|
|
.dest_nentries = 2,
|
|
},
|
|
};
|
|
|
|
/* Target firmware's Copy Engine configuration. */
|
|
static const struct ce_pipe_config target_ce_config_wlan[] = {
|
|
/* CE0: host->target HTC control and raw streams */
|
|
{
|
|
.pipenum = __cpu_to_le32(0),
|
|
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
.nentries = __cpu_to_le32(32),
|
|
.nbytes_max = __cpu_to_le32(256),
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
.reserved = __cpu_to_le32(0),
|
|
},
|
|
|
|
/* CE1: target->host HTT + HTC control */
|
|
{
|
|
.pipenum = __cpu_to_le32(1),
|
|
.pipedir = __cpu_to_le32(PIPEDIR_IN),
|
|
.nentries = __cpu_to_le32(32),
|
|
.nbytes_max = __cpu_to_le32(512),
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
.reserved = __cpu_to_le32(0),
|
|
},
|
|
|
|
/* CE2: target->host WMI */
|
|
{
|
|
.pipenum = __cpu_to_le32(2),
|
|
.pipedir = __cpu_to_le32(PIPEDIR_IN),
|
|
.nentries = __cpu_to_le32(32),
|
|
.nbytes_max = __cpu_to_le32(2048),
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
.reserved = __cpu_to_le32(0),
|
|
},
|
|
|
|
/* CE3: host->target WMI */
|
|
{
|
|
.pipenum = __cpu_to_le32(3),
|
|
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
.nentries = __cpu_to_le32(32),
|
|
.nbytes_max = __cpu_to_le32(2048),
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
.reserved = __cpu_to_le32(0),
|
|
},
|
|
|
|
/* CE4: host->target HTT */
|
|
{
|
|
.pipenum = __cpu_to_le32(4),
|
|
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
.nentries = __cpu_to_le32(256),
|
|
.nbytes_max = __cpu_to_le32(256),
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
.reserved = __cpu_to_le32(0),
|
|
},
|
|
|
|
/* NB: 50% of src nentries, since tx has 2 frags */
|
|
|
|
/* CE5: unused */
|
|
{
|
|
.pipenum = __cpu_to_le32(5),
|
|
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
.nentries = __cpu_to_le32(32),
|
|
.nbytes_max = __cpu_to_le32(2048),
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
.reserved = __cpu_to_le32(0),
|
|
},
|
|
|
|
/* CE6: Reserved for target autonomous hif_memcpy */
|
|
{
|
|
.pipenum = __cpu_to_le32(6),
|
|
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
|
|
.nentries = __cpu_to_le32(32),
|
|
.nbytes_max = __cpu_to_le32(4096),
|
|
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
.reserved = __cpu_to_le32(0),
|
|
},
|
|
|
|
/* CE7 used only by Host */
|
|
};
|
|
|
|
/*
|
|
* Map from service/endpoint to Copy Engine.
|
|
* This table is derived from the CE_PCI TABLE, above.
|
|
* It is passed to the Target at startup for use by firmware.
|
|
*/
|
|
static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
__cpu_to_le32(3),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
__cpu_to_le32(2),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
__cpu_to_le32(3),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
__cpu_to_le32(2),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
__cpu_to_le32(3),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
__cpu_to_le32(2),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
__cpu_to_le32(3),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
__cpu_to_le32(2),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
__cpu_to_le32(3),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
__cpu_to_le32(2),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
__cpu_to_le32(0),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
__cpu_to_le32(1),
|
|
},
|
|
{ /* not used */
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
__cpu_to_le32(0),
|
|
},
|
|
{ /* not used */
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
__cpu_to_le32(1),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
|
|
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
__cpu_to_le32(4),
|
|
},
|
|
{
|
|
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
|
|
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
__cpu_to_le32(1),
|
|
},
|
|
|
|
/* (Additions here) */
|
|
|
|
{ /* must be last */
|
|
__cpu_to_le32(0),
|
|
__cpu_to_le32(0),
|
|
__cpu_to_le32(0),
|
|
},
|
|
};
|
|
|
|
static bool ath10k_pci_irq_pending(struct ath10k *ar)
|
|
{
|
|
u32 cause;
|
|
|
|
/* Check if the shared legacy irq is for us */
|
|
cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
PCIE_INTR_CAUSE_ADDRESS);
|
|
if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
|
|
{
|
|
/* IMPORTANT: INTR_CLR register has to be set after
|
|
* INTR_ENABLE is set to 0, otherwise interrupt can not be
|
|
* really cleared. */
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
|
|
0);
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
|
|
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
|
|
|
|
/* IMPORTANT: this extra read transaction is required to
|
|
* flush the posted write buffer. */
|
|
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
PCIE_INTR_ENABLE_ADDRESS);
|
|
}
|
|
|
|
static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
|
|
{
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
|
|
PCIE_INTR_ENABLE_ADDRESS,
|
|
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
|
|
|
|
/* IMPORTANT: this extra read transaction is required to
|
|
* flush the posted write buffer. */
|
|
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
PCIE_INTR_ENABLE_ADDRESS);
|
|
}
|
|
|
|
static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
if (ar_pci->num_msi_intrs > 1)
|
|
return "msi-x";
|
|
|
|
if (ar_pci->num_msi_intrs == 1)
|
|
return "msi";
|
|
|
|
return "legacy";
|
|
}
|
|
|
|
static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
|
|
{
|
|
struct ath10k *ar = pipe->hif_ce_state;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
|
|
struct sk_buff *skb;
|
|
dma_addr_t paddr;
|
|
int ret;
|
|
|
|
lockdep_assert_held(&ar_pci->ce_lock);
|
|
|
|
skb = dev_alloc_skb(pipe->buf_sz);
|
|
if (!skb)
|
|
return -ENOMEM;
|
|
|
|
WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
|
|
|
|
paddr = dma_map_single(ar->dev, skb->data,
|
|
skb->len + skb_tailroom(skb),
|
|
DMA_FROM_DEVICE);
|
|
if (unlikely(dma_mapping_error(ar->dev, paddr))) {
|
|
ath10k_warn(ar, "failed to dma map pci rx buf\n");
|
|
dev_kfree_skb_any(skb);
|
|
return -EIO;
|
|
}
|
|
|
|
ATH10K_SKB_CB(skb)->paddr = paddr;
|
|
|
|
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
|
|
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
|
|
DMA_FROM_DEVICE);
|
|
dev_kfree_skb_any(skb);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
|
|
{
|
|
struct ath10k *ar = pipe->hif_ce_state;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
|
|
int ret, num;
|
|
|
|
lockdep_assert_held(&ar_pci->ce_lock);
|
|
|
|
if (pipe->buf_sz == 0)
|
|
return;
|
|
|
|
if (!ce_pipe->dest_ring)
|
|
return;
|
|
|
|
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
|
|
while (num--) {
|
|
ret = __ath10k_pci_rx_post_buf(pipe);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
|
|
mod_timer(&ar_pci->rx_post_retry, jiffies +
|
|
ATH10K_PCI_RX_POST_RETRY_MS);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
|
|
{
|
|
struct ath10k *ar = pipe->hif_ce_state;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
|
__ath10k_pci_rx_post_pipe(pipe);
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
}
|
|
|
|
static void ath10k_pci_rx_post(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int i;
|
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
|
for (i = 0; i < CE_COUNT; i++)
|
|
__ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
}
|
|
|
|
static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
|
|
{
|
|
struct ath10k *ar = (void *)ptr;
|
|
|
|
ath10k_pci_rx_post(ar);
|
|
}
|
|
|
|
/*
|
|
* Diagnostic read/write access is provided for startup/config/debug usage.
|
|
* Caller must guarantee proper alignment, when applicable, and single user
|
|
* at any moment.
|
|
*/
|
|
static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
|
|
int nbytes)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int ret = 0;
|
|
u32 buf;
|
|
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
|
|
unsigned int id;
|
|
unsigned int flags;
|
|
struct ath10k_ce_pipe *ce_diag;
|
|
/* Host buffer address in CE space */
|
|
u32 ce_data;
|
|
dma_addr_t ce_data_base = 0;
|
|
void *data_buf = NULL;
|
|
int i;
|
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
|
|
|
ce_diag = ar_pci->ce_diag;
|
|
|
|
/*
|
|
* Allocate a temporary bounce buffer to hold caller's data
|
|
* to be DMA'ed from Target. This guarantees
|
|
* 1) 4-byte alignment
|
|
* 2) Buffer in DMA-able space
|
|
*/
|
|
orig_nbytes = nbytes;
|
|
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
|
|
orig_nbytes,
|
|
&ce_data_base,
|
|
GFP_ATOMIC);
|
|
|
|
if (!data_buf) {
|
|
ret = -ENOMEM;
|
|
goto done;
|
|
}
|
|
memset(data_buf, 0, orig_nbytes);
|
|
|
|
remaining_bytes = orig_nbytes;
|
|
ce_data = ce_data_base;
|
|
while (remaining_bytes) {
|
|
nbytes = min_t(unsigned int, remaining_bytes,
|
|
DIAG_TRANSFER_LIMIT);
|
|
|
|
ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
|
|
if (ret != 0)
|
|
goto done;
|
|
|
|
/* Request CE to send from Target(!) address to Host buffer */
|
|
/*
|
|
* The address supplied by the caller is in the
|
|
* Target CPU virtual address space.
|
|
*
|
|
* In order to use this address with the diagnostic CE,
|
|
* convert it from Target CPU virtual address space
|
|
* to CE address space
|
|
*/
|
|
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
|
|
address);
|
|
|
|
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
|
|
0);
|
|
if (ret)
|
|
goto done;
|
|
|
|
i = 0;
|
|
while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
|
|
&completed_nbytes,
|
|
&id) != 0) {
|
|
mdelay(1);
|
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
ret = -EBUSY;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
if (nbytes != completed_nbytes) {
|
|
ret = -EIO;
|
|
goto done;
|
|
}
|
|
|
|
if (buf != (u32)address) {
|
|
ret = -EIO;
|
|
goto done;
|
|
}
|
|
|
|
i = 0;
|
|
while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
|
|
&completed_nbytes,
|
|
&id, &flags) != 0) {
|
|
mdelay(1);
|
|
|
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
ret = -EBUSY;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
if (nbytes != completed_nbytes) {
|
|
ret = -EIO;
|
|
goto done;
|
|
}
|
|
|
|
if (buf != ce_data) {
|
|
ret = -EIO;
|
|
goto done;
|
|
}
|
|
|
|
remaining_bytes -= nbytes;
|
|
address += nbytes;
|
|
ce_data += nbytes;
|
|
}
|
|
|
|
done:
|
|
if (ret == 0)
|
|
memcpy(data, data_buf, orig_nbytes);
|
|
else
|
|
ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
|
|
address, ret);
|
|
|
|
if (data_buf)
|
|
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
|
|
ce_data_base);
|
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
|
|
{
|
|
__le32 val = 0;
|
|
int ret;
|
|
|
|
ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
|
|
*value = __le32_to_cpu(val);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
|
|
u32 src, u32 len)
|
|
{
|
|
u32 host_addr, addr;
|
|
int ret;
|
|
|
|
host_addr = host_interest_item_address(src);
|
|
|
|
ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
|
|
if (ret != 0) {
|
|
ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
|
|
src, ret);
|
|
return ret;
|
|
}
|
|
|
|
ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
|
|
if (ret != 0) {
|
|
ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
|
|
addr, len, ret);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
|
|
__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
|
|
|
|
static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
|
|
const void *data, int nbytes)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int ret = 0;
|
|
u32 buf;
|
|
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
|
|
unsigned int id;
|
|
unsigned int flags;
|
|
struct ath10k_ce_pipe *ce_diag;
|
|
void *data_buf = NULL;
|
|
u32 ce_data; /* Host buffer address in CE space */
|
|
dma_addr_t ce_data_base = 0;
|
|
int i;
|
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
|
|
|
ce_diag = ar_pci->ce_diag;
|
|
|
|
/*
|
|
* Allocate a temporary bounce buffer to hold caller's data
|
|
* to be DMA'ed to Target. This guarantees
|
|
* 1) 4-byte alignment
|
|
* 2) Buffer in DMA-able space
|
|
*/
|
|
orig_nbytes = nbytes;
|
|
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
|
|
orig_nbytes,
|
|
&ce_data_base,
|
|
GFP_ATOMIC);
|
|
if (!data_buf) {
|
|
ret = -ENOMEM;
|
|
goto done;
|
|
}
|
|
|
|
/* Copy caller's data to allocated DMA buf */
|
|
memcpy(data_buf, data, orig_nbytes);
|
|
|
|
/*
|
|
* The address supplied by the caller is in the
|
|
* Target CPU virtual address space.
|
|
*
|
|
* In order to use this address with the diagnostic CE,
|
|
* convert it from
|
|
* Target CPU virtual address space
|
|
* to
|
|
* CE address space
|
|
*/
|
|
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
|
|
|
|
remaining_bytes = orig_nbytes;
|
|
ce_data = ce_data_base;
|
|
while (remaining_bytes) {
|
|
/* FIXME: check cast */
|
|
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
|
|
|
|
/* Set up to receive directly into Target(!) address */
|
|
ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
|
|
if (ret != 0)
|
|
goto done;
|
|
|
|
/*
|
|
* Request CE to send caller-supplied data that
|
|
* was copied to bounce buffer to Target(!) address.
|
|
*/
|
|
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
|
|
nbytes, 0, 0);
|
|
if (ret != 0)
|
|
goto done;
|
|
|
|
i = 0;
|
|
while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
|
|
&completed_nbytes,
|
|
&id) != 0) {
|
|
mdelay(1);
|
|
|
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
ret = -EBUSY;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
if (nbytes != completed_nbytes) {
|
|
ret = -EIO;
|
|
goto done;
|
|
}
|
|
|
|
if (buf != ce_data) {
|
|
ret = -EIO;
|
|
goto done;
|
|
}
|
|
|
|
i = 0;
|
|
while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
|
|
&completed_nbytes,
|
|
&id, &flags) != 0) {
|
|
mdelay(1);
|
|
|
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
ret = -EBUSY;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
if (nbytes != completed_nbytes) {
|
|
ret = -EIO;
|
|
goto done;
|
|
}
|
|
|
|
if (buf != address) {
|
|
ret = -EIO;
|
|
goto done;
|
|
}
|
|
|
|
remaining_bytes -= nbytes;
|
|
address += nbytes;
|
|
ce_data += nbytes;
|
|
}
|
|
|
|
done:
|
|
if (data_buf) {
|
|
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
|
|
ce_data_base);
|
|
}
|
|
|
|
if (ret != 0)
|
|
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
|
|
address, ret);
|
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
|
|
{
|
|
__le32 val = __cpu_to_le32(value);
|
|
|
|
return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
|
|
}
|
|
|
|
static bool ath10k_pci_is_awake(struct ath10k *ar)
|
|
{
|
|
u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
|
|
|
|
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
|
|
}
|
|
|
|
static int ath10k_pci_wake_wait(struct ath10k *ar)
|
|
{
|
|
int tot_delay = 0;
|
|
int curr_delay = 5;
|
|
|
|
while (tot_delay < PCIE_WAKE_TIMEOUT) {
|
|
if (ath10k_pci_is_awake(ar))
|
|
return 0;
|
|
|
|
udelay(curr_delay);
|
|
tot_delay += curr_delay;
|
|
|
|
if (curr_delay < 50)
|
|
curr_delay += 5;
|
|
}
|
|
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
static int ath10k_pci_wake(struct ath10k *ar)
|
|
{
|
|
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
|
|
PCIE_SOC_WAKE_V_MASK);
|
|
return ath10k_pci_wake_wait(ar);
|
|
}
|
|
|
|
static void ath10k_pci_sleep(struct ath10k *ar)
|
|
{
|
|
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
|
|
PCIE_SOC_WAKE_RESET);
|
|
}
|
|
|
|
/* Called by lower (CE) layer when a send to Target completes. */
|
|
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
|
|
{
|
|
struct ath10k *ar = ce_state->ar;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
|
|
void *transfer_context;
|
|
u32 ce_data;
|
|
unsigned int nbytes;
|
|
unsigned int transfer_id;
|
|
|
|
while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
|
|
&ce_data, &nbytes,
|
|
&transfer_id) == 0) {
|
|
/* no need to call tx completion for NULL pointers */
|
|
if (transfer_context == NULL)
|
|
continue;
|
|
|
|
cb->tx_completion(ar, transfer_context, transfer_id);
|
|
}
|
|
}
|
|
|
|
/* Called by lower (CE) layer when data is received from the Target. */
|
|
static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
|
|
{
|
|
struct ath10k *ar = ce_state->ar;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
|
|
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
|
|
struct sk_buff *skb;
|
|
void *transfer_context;
|
|
u32 ce_data;
|
|
unsigned int nbytes, max_nbytes;
|
|
unsigned int transfer_id;
|
|
unsigned int flags;
|
|
|
|
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
|
|
&ce_data, &nbytes, &transfer_id,
|
|
&flags) == 0) {
|
|
skb = transfer_context;
|
|
max_nbytes = skb->len + skb_tailroom(skb);
|
|
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
|
max_nbytes, DMA_FROM_DEVICE);
|
|
|
|
if (unlikely(max_nbytes < nbytes)) {
|
|
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
|
|
nbytes, max_nbytes);
|
|
dev_kfree_skb_any(skb);
|
|
continue;
|
|
}
|
|
|
|
skb_put(skb, nbytes);
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
|
|
ce_state->id, skb->len);
|
|
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
|
|
skb->data, skb->len);
|
|
|
|
cb->rx_completion(ar, skb, pipe_info->pipe_num);
|
|
}
|
|
|
|
ath10k_pci_rx_post_pipe(pipe_info);
|
|
}
|
|
|
|
static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
|
|
struct ath10k_hif_sg_item *items, int n_items)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
|
|
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
|
|
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
|
|
unsigned int nentries_mask;
|
|
unsigned int sw_index;
|
|
unsigned int write_index;
|
|
int err, i = 0;
|
|
|
|
spin_lock_bh(&ar_pci->ce_lock);
|
|
|
|
nentries_mask = src_ring->nentries_mask;
|
|
sw_index = src_ring->sw_index;
|
|
write_index = src_ring->write_index;
|
|
|
|
if (unlikely(CE_RING_DELTA(nentries_mask,
|
|
write_index, sw_index - 1) < n_items)) {
|
|
err = -ENOBUFS;
|
|
goto err;
|
|
}
|
|
|
|
for (i = 0; i < n_items - 1; i++) {
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI,
|
|
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
|
|
i, items[i].paddr, items[i].len, n_items);
|
|
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
|
|
items[i].vaddr, items[i].len);
|
|
|
|
err = ath10k_ce_send_nolock(ce_pipe,
|
|
items[i].transfer_context,
|
|
items[i].paddr,
|
|
items[i].len,
|
|
items[i].transfer_id,
|
|
CE_SEND_FLAG_GATHER);
|
|
if (err)
|
|
goto err;
|
|
}
|
|
|
|
/* `i` is equal to `n_items -1` after for() */
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI,
|
|
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
|
|
i, items[i].paddr, items[i].len, n_items);
|
|
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
|
|
items[i].vaddr, items[i].len);
|
|
|
|
err = ath10k_ce_send_nolock(ce_pipe,
|
|
items[i].transfer_context,
|
|
items[i].paddr,
|
|
items[i].len,
|
|
items[i].transfer_id,
|
|
0);
|
|
if (err)
|
|
goto err;
|
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
return 0;
|
|
|
|
err:
|
|
for (; i > 0; i--)
|
|
__ath10k_ce_send_revert(ce_pipe);
|
|
|
|
spin_unlock_bh(&ar_pci->ce_lock);
|
|
return err;
|
|
}
|
|
|
|
static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
|
|
size_t buf_len)
|
|
{
|
|
return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
|
|
}
|
|
|
|
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
|
|
|
|
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
|
|
}
|
|
|
|
static void ath10k_pci_dump_registers(struct ath10k *ar,
|
|
struct ath10k_fw_crash_data *crash_data)
|
|
{
|
|
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
|
|
int i, ret;
|
|
|
|
lockdep_assert_held(&ar->data_lock);
|
|
|
|
ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
|
|
hi_failure_state,
|
|
REG_DUMP_COUNT_QCA988X * sizeof(__le32));
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
|
|
return;
|
|
}
|
|
|
|
BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
|
|
|
|
ath10k_err(ar, "firmware register dump:\n");
|
|
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
|
|
ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
|
|
i,
|
|
__le32_to_cpu(reg_dump_values[i]),
|
|
__le32_to_cpu(reg_dump_values[i + 1]),
|
|
__le32_to_cpu(reg_dump_values[i + 2]),
|
|
__le32_to_cpu(reg_dump_values[i + 3]));
|
|
|
|
if (!crash_data)
|
|
return;
|
|
|
|
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
|
|
crash_data->registers[i] = reg_dump_values[i];
|
|
}
|
|
|
|
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
|
{
|
|
struct ath10k_fw_crash_data *crash_data;
|
|
char uuid[50];
|
|
|
|
spin_lock_bh(&ar->data_lock);
|
|
|
|
ar->stats.fw_crash_counter++;
|
|
|
|
crash_data = ath10k_debug_get_new_fw_crash_data(ar);
|
|
|
|
if (crash_data)
|
|
scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
|
|
else
|
|
scnprintf(uuid, sizeof(uuid), "n/a");
|
|
|
|
ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
|
|
ath10k_print_driver_info(ar);
|
|
ath10k_pci_dump_registers(ar, crash_data);
|
|
|
|
spin_unlock_bh(&ar->data_lock);
|
|
|
|
queue_work(ar->workqueue, &ar->restart_work);
|
|
}
|
|
|
|
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
|
int force)
|
|
{
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
|
|
|
|
if (!force) {
|
|
int resources;
|
|
/*
|
|
* Decide whether to actually poll for completions, or just
|
|
* wait for a later chance.
|
|
* If there seem to be plenty of resources left, then just wait
|
|
* since checking involves reading a CE register, which is a
|
|
* relatively expensive operation.
|
|
*/
|
|
resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
|
|
|
|
/*
|
|
* If at least 50% of the total resources are still available,
|
|
* don't bother checking again yet.
|
|
*/
|
|
if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
|
|
return;
|
|
}
|
|
ath10k_ce_per_engine_service(ar, pipe);
|
|
}
|
|
|
|
static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
|
|
struct ath10k_hif_cb *callbacks)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
|
|
|
|
memcpy(&ar_pci->msg_callbacks_current, callbacks,
|
|
sizeof(ar_pci->msg_callbacks_current));
|
|
}
|
|
|
|
static void ath10k_pci_kill_tasklet(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int i;
|
|
|
|
tasklet_kill(&ar_pci->intr_tq);
|
|
tasklet_kill(&ar_pci->msi_fw_err);
|
|
|
|
for (i = 0; i < CE_COUNT; i++)
|
|
tasklet_kill(&ar_pci->pipe_info[i].intr);
|
|
|
|
del_timer_sync(&ar_pci->rx_post_retry);
|
|
}
|
|
|
|
static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
|
|
u16 service_id, u8 *ul_pipe,
|
|
u8 *dl_pipe, int *ul_is_polled,
|
|
int *dl_is_polled)
|
|
{
|
|
const struct service_to_pipe *entry;
|
|
bool ul_set = false, dl_set = false;
|
|
int i;
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
|
|
|
|
/* polling for received messages not supported */
|
|
*dl_is_polled = 0;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
|
|
entry = &target_service_to_ce_map_wlan[i];
|
|
|
|
if (__le32_to_cpu(entry->service_id) != service_id)
|
|
continue;
|
|
|
|
switch (__le32_to_cpu(entry->pipedir)) {
|
|
case PIPEDIR_NONE:
|
|
break;
|
|
case PIPEDIR_IN:
|
|
WARN_ON(dl_set);
|
|
*dl_pipe = __le32_to_cpu(entry->pipenum);
|
|
dl_set = true;
|
|
break;
|
|
case PIPEDIR_OUT:
|
|
WARN_ON(ul_set);
|
|
*ul_pipe = __le32_to_cpu(entry->pipenum);
|
|
ul_set = true;
|
|
break;
|
|
case PIPEDIR_INOUT:
|
|
WARN_ON(dl_set);
|
|
WARN_ON(ul_set);
|
|
*dl_pipe = __le32_to_cpu(entry->pipenum);
|
|
*ul_pipe = __le32_to_cpu(entry->pipenum);
|
|
dl_set = true;
|
|
ul_set = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (WARN_ON(!ul_set || !dl_set))
|
|
return -ENOENT;
|
|
|
|
*ul_is_polled =
|
|
(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
|
|
u8 *ul_pipe, u8 *dl_pipe)
|
|
{
|
|
int ul_is_polled, dl_is_polled;
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
|
|
|
|
(void)ath10k_pci_hif_map_service_to_pipe(ar,
|
|
ATH10K_HTC_SVC_ID_RSVD_CTRL,
|
|
ul_pipe,
|
|
dl_pipe,
|
|
&ul_is_polled,
|
|
&dl_is_polled);
|
|
}
|
|
|
|
static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
|
|
{
|
|
u32 val;
|
|
|
|
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
|
|
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
|
|
}
|
|
|
|
static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
|
|
{
|
|
u32 val;
|
|
|
|
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
|
|
val |= CORE_CTRL_PCIE_REG_31_MASK;
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
|
|
}
|
|
|
|
static void ath10k_pci_irq_disable(struct ath10k *ar)
|
|
{
|
|
ath10k_ce_disable_interrupts(ar);
|
|
ath10k_pci_disable_and_clear_legacy_irq(ar);
|
|
ath10k_pci_irq_msi_fw_mask(ar);
|
|
}
|
|
|
|
static void ath10k_pci_irq_sync(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int i;
|
|
|
|
for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
|
|
synchronize_irq(ar_pci->pdev->irq + i);
|
|
}
|
|
|
|
static void ath10k_pci_irq_enable(struct ath10k *ar)
|
|
{
|
|
ath10k_ce_enable_interrupts(ar);
|
|
ath10k_pci_enable_legacy_irq(ar);
|
|
ath10k_pci_irq_msi_fw_unmask(ar);
|
|
}
|
|
|
|
static int ath10k_pci_hif_start(struct ath10k *ar)
|
|
{
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
|
|
|
|
ath10k_pci_irq_enable(ar);
|
|
ath10k_pci_rx_post(ar);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
|
|
{
|
|
struct ath10k *ar;
|
|
struct ath10k_pci *ar_pci;
|
|
struct ath10k_ce_pipe *ce_hdl;
|
|
u32 buf_sz;
|
|
struct sk_buff *netbuf;
|
|
u32 ce_data;
|
|
|
|
buf_sz = pipe_info->buf_sz;
|
|
|
|
/* Unused Copy Engine */
|
|
if (buf_sz == 0)
|
|
return;
|
|
|
|
ar = pipe_info->hif_ce_state;
|
|
ar_pci = ath10k_pci_priv(ar);
|
|
ce_hdl = pipe_info->ce_hdl;
|
|
|
|
while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
|
|
&ce_data) == 0) {
|
|
dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
|
|
netbuf->len + skb_tailroom(netbuf),
|
|
DMA_FROM_DEVICE);
|
|
dev_kfree_skb_any(netbuf);
|
|
}
|
|
}
|
|
|
|
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
|
|
{
|
|
struct ath10k *ar;
|
|
struct ath10k_pci *ar_pci;
|
|
struct ath10k_ce_pipe *ce_hdl;
|
|
struct sk_buff *netbuf;
|
|
u32 ce_data;
|
|
unsigned int nbytes;
|
|
unsigned int id;
|
|
u32 buf_sz;
|
|
|
|
buf_sz = pipe_info->buf_sz;
|
|
|
|
/* Unused Copy Engine */
|
|
if (buf_sz == 0)
|
|
return;
|
|
|
|
ar = pipe_info->hif_ce_state;
|
|
ar_pci = ath10k_pci_priv(ar);
|
|
ce_hdl = pipe_info->ce_hdl;
|
|
|
|
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
|
|
&ce_data, &nbytes, &id) == 0) {
|
|
/* no need to call tx completion for NULL pointers */
|
|
if (!netbuf)
|
|
continue;
|
|
|
|
ar_pci->msg_callbacks_current.tx_completion(ar,
|
|
netbuf,
|
|
id);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Cleanup residual buffers for device shutdown:
|
|
* buffers that were enqueued for receive
|
|
* buffers that were to be sent
|
|
* Note: Buffers that had completed but which were
|
|
* not yet processed are on a completion queue. They
|
|
* are handled when the completion thread shuts down.
|
|
*/
|
|
static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int pipe_num;
|
|
|
|
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
|
|
struct ath10k_pci_pipe *pipe_info;
|
|
|
|
pipe_info = &ar_pci->pipe_info[pipe_num];
|
|
ath10k_pci_rx_pipe_cleanup(pipe_info);
|
|
ath10k_pci_tx_pipe_cleanup(pipe_info);
|
|
}
|
|
}
|
|
|
|
static void ath10k_pci_ce_deinit(struct ath10k *ar)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < CE_COUNT; i++)
|
|
ath10k_ce_deinit_pipe(ar, i);
|
|
}
|
|
|
|
static void ath10k_pci_flush(struct ath10k *ar)
|
|
{
|
|
ath10k_pci_kill_tasklet(ar);
|
|
ath10k_pci_buffer_cleanup(ar);
|
|
}
|
|
|
|
static void ath10k_pci_hif_stop(struct ath10k *ar)
|
|
{
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
|
|
|
|
/* Most likely the device has HTT Rx ring configured. The only way to
|
|
* prevent the device from accessing (and possible corrupting) host
|
|
* memory is to reset the chip now.
|
|
*
|
|
* There's also no known way of masking MSI interrupts on the device.
|
|
* For ranged MSI the CE-related interrupts can be masked. However
|
|
* regardless how many MSI interrupts are assigned the first one
|
|
* is always used for firmware indications (crashes) and cannot be
|
|
* masked. To prevent the device from asserting the interrupt reset it
|
|
* before proceeding with cleanup.
|
|
*/
|
|
ath10k_pci_warm_reset(ar);
|
|
|
|
ath10k_pci_irq_disable(ar);
|
|
ath10k_pci_irq_sync(ar);
|
|
ath10k_pci_flush(ar);
|
|
}
|
|
|
|
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
|
|
void *req, u32 req_len,
|
|
void *resp, u32 *resp_len)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
|
|
struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
|
|
struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
|
|
struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
|
|
dma_addr_t req_paddr = 0;
|
|
dma_addr_t resp_paddr = 0;
|
|
struct bmi_xfer xfer = {};
|
|
void *treq, *tresp = NULL;
|
|
int ret = 0;
|
|
|
|
might_sleep();
|
|
|
|
if (resp && !resp_len)
|
|
return -EINVAL;
|
|
|
|
if (resp && resp_len && *resp_len == 0)
|
|
return -EINVAL;
|
|
|
|
treq = kmemdup(req, req_len, GFP_KERNEL);
|
|
if (!treq)
|
|
return -ENOMEM;
|
|
|
|
req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
|
|
ret = dma_mapping_error(ar->dev, req_paddr);
|
|
if (ret)
|
|
goto err_dma;
|
|
|
|
if (resp && resp_len) {
|
|
tresp = kzalloc(*resp_len, GFP_KERNEL);
|
|
if (!tresp) {
|
|
ret = -ENOMEM;
|
|
goto err_req;
|
|
}
|
|
|
|
resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
|
|
DMA_FROM_DEVICE);
|
|
ret = dma_mapping_error(ar->dev, resp_paddr);
|
|
if (ret)
|
|
goto err_req;
|
|
|
|
xfer.wait_for_resp = true;
|
|
xfer.resp_len = 0;
|
|
|
|
ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
|
|
}
|
|
|
|
ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
|
|
if (ret)
|
|
goto err_resp;
|
|
|
|
ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
|
|
if (ret) {
|
|
u32 unused_buffer;
|
|
unsigned int unused_nbytes;
|
|
unsigned int unused_id;
|
|
|
|
ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
|
|
&unused_nbytes, &unused_id);
|
|
} else {
|
|
/* non-zero means we did not time out */
|
|
ret = 0;
|
|
}
|
|
|
|
err_resp:
|
|
if (resp) {
|
|
u32 unused_buffer;
|
|
|
|
ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
|
|
dma_unmap_single(ar->dev, resp_paddr,
|
|
*resp_len, DMA_FROM_DEVICE);
|
|
}
|
|
err_req:
|
|
dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
|
|
|
|
if (ret == 0 && resp_len) {
|
|
*resp_len = min(*resp_len, xfer.resp_len);
|
|
memcpy(resp, tresp, xfer.resp_len);
|
|
}
|
|
err_dma:
|
|
kfree(treq);
|
|
kfree(tresp);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
|
|
{
|
|
struct bmi_xfer *xfer;
|
|
u32 ce_data;
|
|
unsigned int nbytes;
|
|
unsigned int transfer_id;
|
|
|
|
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
|
|
&nbytes, &transfer_id))
|
|
return;
|
|
|
|
xfer->tx_done = true;
|
|
}
|
|
|
|
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
|
|
{
|
|
struct ath10k *ar = ce_state->ar;
|
|
struct bmi_xfer *xfer;
|
|
u32 ce_data;
|
|
unsigned int nbytes;
|
|
unsigned int transfer_id;
|
|
unsigned int flags;
|
|
|
|
if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
|
|
&nbytes, &transfer_id, &flags))
|
|
return;
|
|
|
|
if (!xfer->wait_for_resp) {
|
|
ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
|
|
return;
|
|
}
|
|
|
|
xfer->resp_len = nbytes;
|
|
xfer->rx_done = true;
|
|
}
|
|
|
|
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
|
|
struct ath10k_ce_pipe *rx_pipe,
|
|
struct bmi_xfer *xfer)
|
|
{
|
|
unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
|
|
|
|
while (time_before_eq(jiffies, timeout)) {
|
|
ath10k_pci_bmi_send_done(tx_pipe);
|
|
ath10k_pci_bmi_recv_data(rx_pipe);
|
|
|
|
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
|
|
return 0;
|
|
|
|
schedule();
|
|
}
|
|
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
/*
|
|
* Send an interrupt to the device to wake up the Target CPU
|
|
* so it has an opportunity to notice any changed state.
|
|
*/
|
|
static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
|
|
{
|
|
u32 addr, val;
|
|
|
|
addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
|
|
val = ath10k_pci_read32(ar, addr);
|
|
val |= CORE_CTRL_CPU_INTR_MASK;
|
|
ath10k_pci_write32(ar, addr, val);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ath10k_pci_init_config(struct ath10k *ar)
|
|
{
|
|
u32 interconnect_targ_addr;
|
|
u32 pcie_state_targ_addr = 0;
|
|
u32 pipe_cfg_targ_addr = 0;
|
|
u32 svc_to_pipe_map = 0;
|
|
u32 pcie_config_flags = 0;
|
|
u32 ealloc_value;
|
|
u32 ealloc_targ_addr;
|
|
u32 flag2_value;
|
|
u32 flag2_targ_addr;
|
|
int ret = 0;
|
|
|
|
/* Download to Target the CE Config and the service-to-CE map */
|
|
interconnect_targ_addr =
|
|
host_interest_item_address(HI_ITEM(hi_interconnect_state));
|
|
|
|
/* Supply Target-side CE configuration */
|
|
ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
|
|
&pcie_state_targ_addr);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
if (pcie_state_targ_addr == 0) {
|
|
ret = -EIO;
|
|
ath10k_err(ar, "Invalid pcie state addr\n");
|
|
return ret;
|
|
}
|
|
|
|
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
|
offsetof(struct pcie_state,
|
|
pipe_cfg_addr)),
|
|
&pipe_cfg_targ_addr);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
if (pipe_cfg_targ_addr == 0) {
|
|
ret = -EIO;
|
|
ath10k_err(ar, "Invalid pipe cfg addr\n");
|
|
return ret;
|
|
}
|
|
|
|
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
|
|
target_ce_config_wlan,
|
|
sizeof(target_ce_config_wlan));
|
|
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
|
offsetof(struct pcie_state,
|
|
svc_to_pipe_map)),
|
|
&svc_to_pipe_map);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
if (svc_to_pipe_map == 0) {
|
|
ret = -EIO;
|
|
ath10k_err(ar, "Invalid svc_to_pipe map\n");
|
|
return ret;
|
|
}
|
|
|
|
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
|
|
target_service_to_ce_map_wlan,
|
|
sizeof(target_service_to_ce_map_wlan));
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
|
offsetof(struct pcie_state,
|
|
config_flags)),
|
|
&pcie_config_flags);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
|
|
|
|
ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
|
|
offsetof(struct pcie_state,
|
|
config_flags)),
|
|
pcie_config_flags);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
/* configure early allocation */
|
|
ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
|
|
|
|
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
/* first bank is switched to IRAM */
|
|
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
|
|
HI_EARLY_ALLOC_MAGIC_MASK);
|
|
ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
|
|
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
|
|
|
|
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
/* Tell Target to proceed with initialization */
|
|
flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
|
|
|
|
ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to get option val: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
flag2_value |= HI_OPTION_EARLY_CFG_DONE;
|
|
|
|
ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
|
|
if (ret != 0) {
|
|
ath10k_err(ar, "Failed to set option val: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ath10k_pci_alloc_ce(struct ath10k *ar)
|
|
{
|
|
int i, ret;
|
|
|
|
for (i = 0; i < CE_COUNT; i++) {
|
|
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
|
|
i, ret);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ath10k_pci_free_ce(struct ath10k *ar)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < CE_COUNT; i++)
|
|
ath10k_ce_free_pipe(ar, i);
|
|
}
|
|
|
|
static int ath10k_pci_ce_init(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct ath10k_pci_pipe *pipe_info;
|
|
const struct ce_attr *attr;
|
|
int pipe_num, ret;
|
|
|
|
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
|
|
pipe_info = &ar_pci->pipe_info[pipe_num];
|
|
pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
|
|
pipe_info->pipe_num = pipe_num;
|
|
pipe_info->hif_ce_state = ar;
|
|
attr = &host_ce_config_wlan[pipe_num];
|
|
|
|
ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
|
|
ath10k_pci_ce_send_done,
|
|
ath10k_pci_ce_recv_data);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
|
|
pipe_num, ret);
|
|
return ret;
|
|
}
|
|
|
|
if (pipe_num == CE_COUNT - 1) {
|
|
/*
|
|
* Reserve the ultimate CE for
|
|
* diagnostic Window support
|
|
*/
|
|
ar_pci->ce_diag = pipe_info->ce_hdl;
|
|
continue;
|
|
}
|
|
|
|
pipe_info->buf_sz = (size_t)(attr->src_sz_max);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
|
|
{
|
|
return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
|
|
FW_IND_EVENT_PENDING;
|
|
}
|
|
|
|
static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
|
|
{
|
|
u32 val;
|
|
|
|
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
val &= ~FW_IND_EVENT_PENDING;
|
|
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
|
|
}
|
|
|
|
/* this function effectively clears target memory controller assert line */
|
|
static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
|
|
{
|
|
u32 val;
|
|
|
|
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
|
|
val | SOC_RESET_CONTROL_SI0_RST_MASK);
|
|
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
|
|
msleep(10);
|
|
|
|
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
|
|
val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
|
|
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
|
|
msleep(10);
|
|
}
|
|
|
|
static int ath10k_pci_warm_reset(struct ath10k *ar)
|
|
{
|
|
u32 val;
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
|
|
|
|
spin_lock_bh(&ar->data_lock);
|
|
|
|
ar->stats.fw_warm_reset_counter++;
|
|
|
|
spin_unlock_bh(&ar->data_lock);
|
|
|
|
/* debug */
|
|
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
PCIE_INTR_CAUSE_ADDRESS);
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
|
|
val);
|
|
|
|
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
CPU_INTR_ADDRESS);
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
|
|
val);
|
|
|
|
/* disable pending irqs */
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
|
|
PCIE_INTR_ENABLE_ADDRESS, 0);
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
|
|
PCIE_INTR_CLR_ADDRESS, ~0);
|
|
|
|
msleep(100);
|
|
|
|
/* clear fw indicator */
|
|
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
|
|
|
|
/* clear target LF timer interrupts */
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
SOC_LF_TIMER_CONTROL0_ADDRESS);
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
|
|
SOC_LF_TIMER_CONTROL0_ADDRESS,
|
|
val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
|
|
|
|
/* reset CE */
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
SOC_RESET_CONTROL_ADDRESS);
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
val | SOC_RESET_CONTROL_CE_RST_MASK);
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
SOC_RESET_CONTROL_ADDRESS);
|
|
msleep(10);
|
|
|
|
/* unreset CE */
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
val & ~SOC_RESET_CONTROL_CE_RST_MASK);
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
SOC_RESET_CONTROL_ADDRESS);
|
|
msleep(10);
|
|
|
|
ath10k_pci_warm_reset_si0(ar);
|
|
|
|
/* debug */
|
|
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
PCIE_INTR_CAUSE_ADDRESS);
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
|
|
val);
|
|
|
|
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
CPU_INTR_ADDRESS);
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
|
|
val);
|
|
|
|
/* CPU warm reset */
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
SOC_RESET_CONTROL_ADDRESS);
|
|
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
|
|
|
|
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
SOC_RESET_CONTROL_ADDRESS);
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
|
|
val);
|
|
|
|
msleep(100);
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
|
|
{
|
|
int ret;
|
|
|
|
/*
|
|
* Bring the target up cleanly.
|
|
*
|
|
* The target may be in an undefined state with an AUX-powered Target
|
|
* and a Host in WoW mode. If the Host crashes, loses power, or is
|
|
* restarted (without unloading the driver) then the Target is left
|
|
* (aux) powered and running. On a subsequent driver load, the Target
|
|
* is in an unexpected state. We try to catch that here in order to
|
|
* reset the Target and retry the probe.
|
|
*/
|
|
if (cold_reset)
|
|
ret = ath10k_pci_cold_reset(ar);
|
|
else
|
|
ret = ath10k_pci_warm_reset(ar);
|
|
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to reset target: %d\n", ret);
|
|
goto err;
|
|
}
|
|
|
|
ret = ath10k_pci_ce_init(ar);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
|
|
goto err;
|
|
}
|
|
|
|
ret = ath10k_pci_wait_for_target_init(ar);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
|
|
goto err_ce;
|
|
}
|
|
|
|
ret = ath10k_pci_init_config(ar);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to setup init config: %d\n", ret);
|
|
goto err_ce;
|
|
}
|
|
|
|
ret = ath10k_pci_wake_target_cpu(ar);
|
|
if (ret) {
|
|
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
|
|
goto err_ce;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err_ce:
|
|
ath10k_pci_ce_deinit(ar);
|
|
ath10k_pci_warm_reset(ar);
|
|
err:
|
|
return ret;
|
|
}
|
|
|
|
static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
|
|
{
|
|
int i, ret;
|
|
|
|
/*
|
|
* Sometime warm reset succeeds after retries.
|
|
*
|
|
* FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
|
|
* at first try.
|
|
*/
|
|
for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
|
|
ret = __ath10k_pci_hif_power_up(ar, false);
|
|
if (ret == 0)
|
|
break;
|
|
|
|
ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
|
|
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
|
{
|
|
int ret;
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
|
|
|
|
/*
|
|
* Hardware CUS232 version 2 has some issues with cold reset and the
|
|
* preferred (and safer) way to perform a device reset is through a
|
|
* warm reset.
|
|
*
|
|
* Warm reset doesn't always work though so fall back to cold reset may
|
|
* be necessary.
|
|
*/
|
|
ret = ath10k_pci_hif_power_up_warm(ar);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
|
|
ret);
|
|
|
|
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
|
|
return ret;
|
|
|
|
ath10k_warn(ar, "trying cold reset\n");
|
|
|
|
ret = __ath10k_pci_hif_power_up(ar, true);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
|
|
ret);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ath10k_pci_hif_power_down(struct ath10k *ar)
|
|
{
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
|
|
|
|
ath10k_pci_warm_reset(ar);
|
|
}
|
|
|
|
#ifdef CONFIG_PM
|
|
|
|
#define ATH10K_PCI_PM_CONTROL 0x44
|
|
|
|
static int ath10k_pci_hif_suspend(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct pci_dev *pdev = ar_pci->pdev;
|
|
u32 val;
|
|
|
|
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
|
|
|
|
if ((val & 0x000000ff) != 0x3) {
|
|
pci_save_state(pdev);
|
|
pci_disable_device(pdev);
|
|
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
|
|
(val & 0xffffff00) | 0x03);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ath10k_pci_hif_resume(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct pci_dev *pdev = ar_pci->pdev;
|
|
u32 val;
|
|
|
|
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
|
|
|
|
if ((val & 0x000000ff) != 0) {
|
|
pci_restore_state(pdev);
|
|
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
|
|
val & 0xffffff00);
|
|
/*
|
|
* Suspend/Resume resets the PCI configuration space,
|
|
* so we have to re-disable the RETRY_TIMEOUT register (0x41)
|
|
* to keep PCI Tx retries from interfering with C3 CPU state
|
|
*/
|
|
pci_read_config_dword(pdev, 0x40, &val);
|
|
|
|
if ((val & 0x0000ff00) != 0)
|
|
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
|
|
.tx_sg = ath10k_pci_hif_tx_sg,
|
|
.diag_read = ath10k_pci_hif_diag_read,
|
|
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
|
|
.start = ath10k_pci_hif_start,
|
|
.stop = ath10k_pci_hif_stop,
|
|
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
|
|
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
|
|
.send_complete_check = ath10k_pci_hif_send_complete_check,
|
|
.set_callbacks = ath10k_pci_hif_set_callbacks,
|
|
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
|
|
.power_up = ath10k_pci_hif_power_up,
|
|
.power_down = ath10k_pci_hif_power_down,
|
|
#ifdef CONFIG_PM
|
|
.suspend = ath10k_pci_hif_suspend,
|
|
.resume = ath10k_pci_hif_resume,
|
|
#endif
|
|
};
|
|
|
|
static void ath10k_pci_ce_tasklet(unsigned long ptr)
|
|
{
|
|
struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
|
|
struct ath10k_pci *ar_pci = pipe->ar_pci;
|
|
|
|
ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
|
|
}
|
|
|
|
static void ath10k_msi_err_tasklet(unsigned long data)
|
|
{
|
|
struct ath10k *ar = (struct ath10k *)data;
|
|
|
|
if (!ath10k_pci_has_fw_crashed(ar)) {
|
|
ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
|
|
return;
|
|
}
|
|
|
|
ath10k_pci_fw_crashed_clear(ar);
|
|
ath10k_pci_fw_crashed_dump(ar);
|
|
}
|
|
|
|
/*
|
|
* Handler for a per-engine interrupt on a PARTICULAR CE.
|
|
* This is used in cases where each CE has a private MSI interrupt.
|
|
*/
|
|
static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
|
|
{
|
|
struct ath10k *ar = arg;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
|
|
|
|
if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
|
|
ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
|
|
ce_id);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
/*
|
|
* NOTE: We are able to derive ce_id from irq because we
|
|
* use a one-to-one mapping for CE's 0..5.
|
|
* CE's 6 & 7 do not use interrupts at all.
|
|
*
|
|
* This mapping must be kept in sync with the mapping
|
|
* used by firmware.
|
|
*/
|
|
tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
|
|
{
|
|
struct ath10k *ar = arg;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
tasklet_schedule(&ar_pci->msi_fw_err);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
/*
|
|
* Top-level interrupt handler for all PCI interrupts from a Target.
|
|
* When a block of MSI interrupts is allocated, this top-level handler
|
|
* is not used; instead, we directly call the correct sub-handler.
|
|
*/
|
|
static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
|
|
{
|
|
struct ath10k *ar = arg;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
if (ar_pci->num_msi_intrs == 0) {
|
|
if (!ath10k_pci_irq_pending(ar))
|
|
return IRQ_NONE;
|
|
|
|
ath10k_pci_disable_and_clear_legacy_irq(ar);
|
|
}
|
|
|
|
tasklet_schedule(&ar_pci->intr_tq);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static void ath10k_pci_tasklet(unsigned long data)
|
|
{
|
|
struct ath10k *ar = (struct ath10k *)data;
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
if (ath10k_pci_has_fw_crashed(ar)) {
|
|
ath10k_pci_fw_crashed_clear(ar);
|
|
ath10k_pci_fw_crashed_dump(ar);
|
|
return;
|
|
}
|
|
|
|
ath10k_ce_per_engine_service_any(ar);
|
|
|
|
/* Re-enable legacy irq that was disabled in the irq handler */
|
|
if (ar_pci->num_msi_intrs == 0)
|
|
ath10k_pci_enable_legacy_irq(ar);
|
|
}
|
|
|
|
static int ath10k_pci_request_irq_msix(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int ret, i;
|
|
|
|
ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
|
|
ath10k_pci_msi_fw_handler,
|
|
IRQF_SHARED, "ath10k_pci", ar);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
|
|
ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
|
|
return ret;
|
|
}
|
|
|
|
for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
|
|
ret = request_irq(ar_pci->pdev->irq + i,
|
|
ath10k_pci_per_engine_handler,
|
|
IRQF_SHARED, "ath10k_pci", ar);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
|
|
ar_pci->pdev->irq + i, ret);
|
|
|
|
for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
|
|
free_irq(ar_pci->pdev->irq + i, ar);
|
|
|
|
free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int ret;
|
|
|
|
ret = request_irq(ar_pci->pdev->irq,
|
|
ath10k_pci_interrupt_handler,
|
|
IRQF_SHARED, "ath10k_pci", ar);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
|
|
ar_pci->pdev->irq, ret);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int ret;
|
|
|
|
ret = request_irq(ar_pci->pdev->irq,
|
|
ath10k_pci_interrupt_handler,
|
|
IRQF_SHARED, "ath10k_pci", ar);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
|
|
ar_pci->pdev->irq, ret);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ath10k_pci_request_irq(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
switch (ar_pci->num_msi_intrs) {
|
|
case 0:
|
|
return ath10k_pci_request_irq_legacy(ar);
|
|
case 1:
|
|
return ath10k_pci_request_irq_msi(ar);
|
|
case MSI_NUM_REQUEST:
|
|
return ath10k_pci_request_irq_msix(ar);
|
|
}
|
|
|
|
ath10k_warn(ar, "unknown irq configuration upon request\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
static void ath10k_pci_free_irq(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int i;
|
|
|
|
/* There's at least one interrupt irregardless whether its legacy INTR
|
|
* or MSI or MSI-X */
|
|
for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
|
|
free_irq(ar_pci->pdev->irq + i, ar);
|
|
}
|
|
|
|
static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int i;
|
|
|
|
tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
|
|
tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
|
|
(unsigned long)ar);
|
|
|
|
for (i = 0; i < CE_COUNT; i++) {
|
|
ar_pci->pipe_info[i].ar_pci = ar_pci;
|
|
tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
|
|
(unsigned long)&ar_pci->pipe_info[i]);
|
|
}
|
|
}
|
|
|
|
static int ath10k_pci_init_irq(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
int ret;
|
|
|
|
ath10k_pci_init_irq_tasklets(ar);
|
|
|
|
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
|
|
ath10k_info(ar, "limiting irq mode to: %d\n",
|
|
ath10k_pci_irq_mode);
|
|
|
|
/* Try MSI-X */
|
|
if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
|
|
ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
|
|
ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
|
|
ar_pci->num_msi_intrs);
|
|
if (ret > 0)
|
|
return 0;
|
|
|
|
/* fall-through */
|
|
}
|
|
|
|
/* Try MSI */
|
|
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
|
|
ar_pci->num_msi_intrs = 1;
|
|
ret = pci_enable_msi(ar_pci->pdev);
|
|
if (ret == 0)
|
|
return 0;
|
|
|
|
/* fall-through */
|
|
}
|
|
|
|
/* Try legacy irq
|
|
*
|
|
* A potential race occurs here: The CORE_BASE write
|
|
* depends on target correctly decoding AXI address but
|
|
* host won't know when target writes BAR to CORE_CTRL.
|
|
* This write might get lost if target has NOT written BAR.
|
|
* For now, fix the race by repeating the write in below
|
|
* synchronization checking. */
|
|
ar_pci->num_msi_intrs = 0;
|
|
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
|
|
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
|
|
{
|
|
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
|
|
0);
|
|
}
|
|
|
|
static int ath10k_pci_deinit_irq(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
|
|
switch (ar_pci->num_msi_intrs) {
|
|
case 0:
|
|
ath10k_pci_deinit_irq_legacy(ar);
|
|
return 0;
|
|
case 1:
|
|
/* fall-through */
|
|
case MSI_NUM_REQUEST:
|
|
pci_disable_msi(ar_pci->pdev);
|
|
return 0;
|
|
default:
|
|
pci_disable_msi(ar_pci->pdev);
|
|
}
|
|
|
|
ath10k_warn(ar, "unknown irq configuration upon deinit\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
unsigned long timeout;
|
|
u32 val;
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
|
|
|
|
timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
|
|
|
|
do {
|
|
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
|
|
val);
|
|
|
|
/* target should never return this */
|
|
if (val == 0xffffffff)
|
|
continue;
|
|
|
|
/* the device has crashed so don't bother trying anymore */
|
|
if (val & FW_IND_EVENT_PENDING)
|
|
break;
|
|
|
|
if (val & FW_IND_INITIALIZED)
|
|
break;
|
|
|
|
if (ar_pci->num_msi_intrs == 0)
|
|
/* Fix potential race by repeating CORE_BASE writes */
|
|
ath10k_pci_enable_legacy_irq(ar);
|
|
|
|
mdelay(10);
|
|
} while (time_before(jiffies, timeout));
|
|
|
|
ath10k_pci_disable_and_clear_legacy_irq(ar);
|
|
ath10k_pci_irq_msi_fw_mask(ar);
|
|
|
|
if (val == 0xffffffff) {
|
|
ath10k_err(ar, "failed to read device register, device is gone\n");
|
|
return -EIO;
|
|
}
|
|
|
|
if (val & FW_IND_EVENT_PENDING) {
|
|
ath10k_warn(ar, "device has crashed during init\n");
|
|
ath10k_pci_fw_crashed_clear(ar);
|
|
ath10k_pci_fw_crashed_dump(ar);
|
|
return -ECOMM;
|
|
}
|
|
|
|
if (!(val & FW_IND_INITIALIZED)) {
|
|
ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
|
|
val);
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
|
|
return 0;
|
|
}
|
|
|
|
static int ath10k_pci_cold_reset(struct ath10k *ar)
|
|
{
|
|
int i;
|
|
u32 val;
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
|
|
|
|
spin_lock_bh(&ar->data_lock);
|
|
|
|
ar->stats.fw_cold_reset_counter++;
|
|
|
|
spin_unlock_bh(&ar->data_lock);
|
|
|
|
/* Put Target, including PCIe, into RESET. */
|
|
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
|
|
val |= 1;
|
|
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
|
|
|
|
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
|
|
if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
|
|
RTC_STATE_COLD_RESET_MASK)
|
|
break;
|
|
msleep(1);
|
|
}
|
|
|
|
/* Pull Target, including PCIe, out of RESET. */
|
|
val &= ~1;
|
|
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
|
|
|
|
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
|
|
if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
|
|
RTC_STATE_COLD_RESET_MASK))
|
|
break;
|
|
msleep(1);
|
|
}
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ath10k_pci_claim(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct pci_dev *pdev = ar_pci->pdev;
|
|
u32 lcr_val;
|
|
int ret;
|
|
|
|
pci_set_drvdata(pdev, ar);
|
|
|
|
ret = pci_enable_device(pdev);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to enable pci device: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
ret = pci_request_region(pdev, BAR_NUM, "ath");
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
|
|
ret);
|
|
goto err_device;
|
|
}
|
|
|
|
/* Target expects 32 bit DMA. Enforce it. */
|
|
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
|
|
goto err_region;
|
|
}
|
|
|
|
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
|
|
ret);
|
|
goto err_region;
|
|
}
|
|
|
|
pci_set_master(pdev);
|
|
|
|
/* Workaround: Disable ASPM */
|
|
pci_read_config_dword(pdev, 0x80, &lcr_val);
|
|
pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
|
|
|
|
/* Arrange for access to Target SoC registers. */
|
|
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
|
|
if (!ar_pci->mem) {
|
|
ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
|
|
ret = -EIO;
|
|
goto err_master;
|
|
}
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
|
|
return 0;
|
|
|
|
err_master:
|
|
pci_clear_master(pdev);
|
|
|
|
err_region:
|
|
pci_release_region(pdev, BAR_NUM);
|
|
|
|
err_device:
|
|
pci_disable_device(pdev);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void ath10k_pci_release(struct ath10k *ar)
|
|
{
|
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
struct pci_dev *pdev = ar_pci->pdev;
|
|
|
|
pci_iounmap(pdev, ar_pci->mem);
|
|
pci_release_region(pdev, BAR_NUM);
|
|
pci_clear_master(pdev);
|
|
pci_disable_device(pdev);
|
|
}
|
|
|
|
static int ath10k_pci_probe(struct pci_dev *pdev,
|
|
const struct pci_device_id *pci_dev)
|
|
{
|
|
int ret = 0;
|
|
struct ath10k *ar;
|
|
struct ath10k_pci *ar_pci;
|
|
u32 chip_id;
|
|
|
|
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
|
|
ATH10K_BUS_PCI,
|
|
&ath10k_pci_hif_ops);
|
|
if (!ar) {
|
|
dev_err(&pdev->dev, "failed to allocate core\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
|
|
|
|
ar_pci = ath10k_pci_priv(ar);
|
|
ar_pci->pdev = pdev;
|
|
ar_pci->dev = &pdev->dev;
|
|
ar_pci->ar = ar;
|
|
|
|
spin_lock_init(&ar_pci->ce_lock);
|
|
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
|
|
(unsigned long)ar);
|
|
|
|
ret = ath10k_pci_claim(ar);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to claim device: %d\n", ret);
|
|
goto err_core_destroy;
|
|
}
|
|
|
|
ret = ath10k_pci_wake(ar);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to wake up: %d\n", ret);
|
|
goto err_release;
|
|
}
|
|
|
|
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
|
|
if (chip_id == 0xffffffff) {
|
|
ath10k_err(ar, "failed to get chip id\n");
|
|
goto err_sleep;
|
|
}
|
|
|
|
ret = ath10k_pci_alloc_ce(ar);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
|
|
ret);
|
|
goto err_sleep;
|
|
}
|
|
|
|
ath10k_pci_ce_deinit(ar);
|
|
ath10k_pci_irq_disable(ar);
|
|
|
|
ret = ath10k_pci_init_irq(ar);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to init irqs: %d\n", ret);
|
|
goto err_free_ce;
|
|
}
|
|
|
|
ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
|
|
ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
|
|
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
|
|
|
|
ret = ath10k_pci_request_irq(ar);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to request irqs: %d\n", ret);
|
|
goto err_deinit_irq;
|
|
}
|
|
|
|
ret = ath10k_core_register(ar, chip_id);
|
|
if (ret) {
|
|
ath10k_err(ar, "failed to register driver core: %d\n", ret);
|
|
goto err_free_irq;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err_free_irq:
|
|
ath10k_pci_free_irq(ar);
|
|
ath10k_pci_kill_tasklet(ar);
|
|
|
|
err_deinit_irq:
|
|
ath10k_pci_deinit_irq(ar);
|
|
|
|
err_free_ce:
|
|
ath10k_pci_free_ce(ar);
|
|
|
|
err_sleep:
|
|
ath10k_pci_sleep(ar);
|
|
|
|
err_release:
|
|
ath10k_pci_release(ar);
|
|
|
|
err_core_destroy:
|
|
ath10k_core_destroy(ar);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void ath10k_pci_remove(struct pci_dev *pdev)
|
|
{
|
|
struct ath10k *ar = pci_get_drvdata(pdev);
|
|
struct ath10k_pci *ar_pci;
|
|
|
|
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
|
|
|
|
if (!ar)
|
|
return;
|
|
|
|
ar_pci = ath10k_pci_priv(ar);
|
|
|
|
if (!ar_pci)
|
|
return;
|
|
|
|
ath10k_core_unregister(ar);
|
|
ath10k_pci_free_irq(ar);
|
|
ath10k_pci_kill_tasklet(ar);
|
|
ath10k_pci_deinit_irq(ar);
|
|
ath10k_pci_ce_deinit(ar);
|
|
ath10k_pci_free_ce(ar);
|
|
ath10k_pci_sleep(ar);
|
|
ath10k_pci_release(ar);
|
|
ath10k_core_destroy(ar);
|
|
}
|
|
|
|
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
|
|
|
|
static struct pci_driver ath10k_pci_driver = {
|
|
.name = "ath10k_pci",
|
|
.id_table = ath10k_pci_id_table,
|
|
.probe = ath10k_pci_probe,
|
|
.remove = ath10k_pci_remove,
|
|
};
|
|
|
|
static int __init ath10k_pci_init(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = pci_register_driver(&ath10k_pci_driver);
|
|
if (ret)
|
|
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
|
|
ret);
|
|
|
|
return ret;
|
|
}
|
|
module_init(ath10k_pci_init);
|
|
|
|
static void __exit ath10k_pci_exit(void)
|
|
{
|
|
pci_unregister_driver(&ath10k_pci_driver);
|
|
}
|
|
|
|
module_exit(ath10k_pci_exit);
|
|
|
|
MODULE_AUTHOR("Qualcomm Atheros");
|
|
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
|
|
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
|