mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-18 13:46:15 +07:00
Merge branch 'remotes/lorenzo/pci/endpoint'
- Use notification chain instead of EPF linkup ops for EPC events (Kishon Vijay Abraham I) - Protect concurrent allocation in endpoint outbound address region (Kishon Vijay Abraham I) - Protect concurrent access to pci_epf_ops (Kishon Vijay Abraham I) - Assign function number for each PF in endpoint core (Kishon Vijay Abraham I) - Refactor endpoint mode core initialization (Vidya Sagar) - Add API to notify when core initialization completes (Vidya Sagar) - Add test framework support to defer core initialization (Vidya Sagar) - Update Tegra SoC ABI header to support uninitialization of UPHY PLL when in endpoint mode without reference clock (Vidya Sagar) - Add DT and driver support for Tegra194 PCIe endpoint nodes (Vidya Sagar) - Add endpoint test support for DMA data transfer (Kishon Vijay Abraham I) - Print throughput information in endpoint test (Kishon Vijay Abraham I) - Use streaming DMA APIs for endpoint test buffer allocation (Kishon Vijay Abraham I) - Add endpoint test command line option for DMA (Kishon Vijay Abraham I) - When stopping a controller via configfs, clear endpoint "start" entry to prevent WARN_ON (Kunihiko Hayashi) - Update endpoint ->set_msix() to pay attention to MSI-X BAR Indicator and offset when finding MSI-X tables (Kishon Vijay Abraham I) - MSI-X tables are in local memory, not in the PCI address space. Update pcie-designware-ep to account for this (Kishon Vijay Abraham I) - Allow AM654 PCIe Endpoint to raise MSI-X interrupts (Kishon Vijay Abraham I) - Avoid using module parameter to determine irqtype for endpoint test (Kishon Vijay Abraham I) - Add ioctl to clear IRQ for endpoint test (Kishon Vijay Abraham I) - Add endpoint test 'e' option to clear IRQ (Kishon Vijay Abraham I) - Bump limit on number of endpoint test devices from 10 to 10,000 (Kishon Vijay Abraham I) - Use full pci-endpoint-test name in request_irq() for easier profiling (Kishon Vijay Abraham I) - Reduce log level of -EPROBE_DEFER error messages to debug (Thierry Reding) * remotes/lorenzo/pci/endpoint: misc: pci_endpoint_test: remove duplicate macro PCI_ENDPOINT_TEST_STATUS PCI: tegra: Print -EPROBE_DEFER error message at debug level misc: pci_endpoint_test: Use full pci-endpoint-test name in request_irq() misc: pci_endpoint_test: Fix to support > 10 pci-endpoint-test devices tools: PCI: Add 'e' to clear IRQ misc: pci_endpoint_test: Add ioctl to clear IRQ misc: pci_endpoint_test: Avoid using module parameter to determine irqtype PCI: keystone: Allow AM654 PCIe Endpoint to raise MSI-X interrupt PCI: dwc: Fix dw_pcie_ep_raise_msix_irq() to get correct MSI-X table address PCI: endpoint: Fix ->set_msix() to take BIR and offset as arguments misc: pci_endpoint_test: Add support to get DMA option from userspace tools: PCI: Add 'd' command line option to support DMA misc: pci_endpoint_test: Use streaming DMA APIs for buffer allocation PCI: endpoint: functions/pci-epf-test: Print throughput information PCI: endpoint: functions/pci-epf-test: Add DMA support to transfer data PCI: endpoint: Fix clearing start entry in configfs PCI: tegra: Add support for PCIe endpoint mode in Tegra194 dt-bindings: PCI: tegra: Add DT support for PCIe EP nodes in Tegra194 soc/tegra: bpmp: Update ABI header PCI: pci-epf-test: Add support to defer core initialization PCI: dwc: Add API to notify core initialization completion PCI: endpoint: Add notification for core init completion PCI: dwc: Refactor core initialization code for EP mode PCI: endpoint: Add core init notifying feature PCI: endpoint: Assign function number for each PF in EPC core PCI: endpoint: Protect concurrent access to pci_epf_ops with mutex PCI: endpoint: Fix for concurrent memory allocation in OB address region PCI: endpoint: Replace spinlock with mutex PCI: endpoint: Use notification chain mechanism to notify EPC events to EPF
This commit is contained in:
commit
b16f2ab280
@ -1,11 +1,11 @@
|
||||
NVIDIA Tegra PCIe controller (Synopsys DesignWare Core based)
|
||||
|
||||
This PCIe host controller is based on the Synopsis Designware PCIe IP
|
||||
This PCIe controller is based on the Synopsis Designware PCIe IP
|
||||
and thus inherits all the common properties defined in designware-pcie.txt.
|
||||
Some of the controller instances are dual mode where in they can work either
|
||||
in root port mode or endpoint mode but one at a time.
|
||||
|
||||
Required properties:
|
||||
- compatible: For Tegra19x, must contain "nvidia,tegra194-pcie".
|
||||
- device_type: Must be "pci"
|
||||
- power-domains: A phandle to the node that controls power to the respective
|
||||
PCIe controller and a specifier name for the PCIe controller. Following are
|
||||
the specifiers for the different PCIe controllers
|
||||
@ -32,6 +32,32 @@ Required properties:
|
||||
entry for each entry in the interrupt-names property.
|
||||
- interrupt-names: Must include the following entries:
|
||||
"intr": The Tegra interrupt that is asserted for controller interrupts
|
||||
- clocks: Must contain an entry for each entry in clock-names.
|
||||
See ../clocks/clock-bindings.txt for details.
|
||||
- clock-names: Must include the following entries:
|
||||
- core
|
||||
- resets: Must contain an entry for each entry in reset-names.
|
||||
See ../reset/reset.txt for details.
|
||||
- reset-names: Must include the following entries:
|
||||
- apb
|
||||
- core
|
||||
- phys: Must contain a phandle to P2U PHY for each entry in phy-names.
|
||||
- phy-names: Must include an entry for each active lane.
|
||||
"p2u-N": where N ranges from 0 to one less than the total number of lanes
|
||||
- nvidia,bpmp: Must contain a pair of phandle to BPMP controller node followed
|
||||
by controller-id. Following are the controller ids for each controller.
|
||||
0: C0
|
||||
1: C1
|
||||
2: C2
|
||||
3: C3
|
||||
4: C4
|
||||
5: C5
|
||||
- vddio-pex-ctl-supply: Regulator supply for PCIe side band signals
|
||||
|
||||
RC mode:
|
||||
- compatible: Tegra19x must contain "nvidia,tegra194-pcie"
|
||||
- device_type: Must be "pci" for RC mode
|
||||
- interrupt-names: Must include the following entries:
|
||||
"msi": The Tegra interrupt that is asserted when an MSI is received
|
||||
- bus-range: Range of bus numbers associated with this controller
|
||||
- #address-cells: Address representation for root ports (must be 3)
|
||||
@ -60,27 +86,15 @@ Required properties:
|
||||
- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
|
||||
Please refer to the standard PCI bus binding document for a more detailed
|
||||
explanation.
|
||||
- clocks: Must contain an entry for each entry in clock-names.
|
||||
See ../clocks/clock-bindings.txt for details.
|
||||
- clock-names: Must include the following entries:
|
||||
- core
|
||||
- resets: Must contain an entry for each entry in reset-names.
|
||||
See ../reset/reset.txt for details.
|
||||
- reset-names: Must include the following entries:
|
||||
- apb
|
||||
- core
|
||||
- phys: Must contain a phandle to P2U PHY for each entry in phy-names.
|
||||
- phy-names: Must include an entry for each active lane.
|
||||
"p2u-N": where N ranges from 0 to one less than the total number of lanes
|
||||
- nvidia,bpmp: Must contain a pair of phandle to BPMP controller node followed
|
||||
by controller-id. Following are the controller ids for each controller.
|
||||
0: C0
|
||||
1: C1
|
||||
2: C2
|
||||
3: C3
|
||||
4: C4
|
||||
5: C5
|
||||
- vddio-pex-ctl-supply: Regulator supply for PCIe side band signals
|
||||
|
||||
EP mode:
|
||||
In Tegra194, Only controllers C0, C4 & C5 support EP mode.
|
||||
- compatible: Tegra19x must contain "nvidia,tegra194-pcie-ep"
|
||||
- reg-names: Must include the following entries:
|
||||
"addr_space": Used to map remote RC address space
|
||||
- reset-gpios: Must contain a phandle to a GPIO controller followed by
|
||||
GPIO that is being used as PERST input signal. Please refer to pci.txt
|
||||
document.
|
||||
|
||||
Optional properties:
|
||||
- pinctrl-names: A list of pinctrl state names.
|
||||
@ -104,6 +118,8 @@ Optional properties:
|
||||
specified in microseconds
|
||||
- nvidia,aspm-l0s-entrance-latency-us: ASPM L0s entrance latency to be
|
||||
specified in microseconds
|
||||
|
||||
RC mode:
|
||||
- vpcie3v3-supply: A phandle to the regulator node that supplies 3.3V to the slot
|
||||
if the platform has one such slot. (Ex:- x16 slot owned by C5 controller
|
||||
in p2972-0000 platform).
|
||||
@ -111,11 +127,18 @@ Optional properties:
|
||||
if the platform has one such slot. (Ex:- x16 slot owned by C5 controller
|
||||
in p2972-0000 platform).
|
||||
|
||||
EP mode:
|
||||
- nvidia,refclk-select-gpios: Must contain a phandle to a GPIO controller
|
||||
followed by GPIO that is being used to enable REFCLK to controller from host
|
||||
|
||||
NOTE:- On Tegra194's P2972-0000 platform, only C5 controller can be enabled to
|
||||
operate in the endpoint mode because of the way the platform is designed.
|
||||
|
||||
Examples:
|
||||
=========
|
||||
|
||||
Tegra194:
|
||||
--------
|
||||
Tegra194 RC mode:
|
||||
-----------------
|
||||
|
||||
pcie@14180000 {
|
||||
compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
|
||||
@ -169,3 +192,53 @@ Tegra194:
|
||||
<&p2u_hsio_5>;
|
||||
phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3";
|
||||
};
|
||||
|
||||
Tegra194 EP mode:
|
||||
-----------------
|
||||
|
||||
pcie_ep@141a0000 {
|
||||
compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
|
||||
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
|
||||
reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
|
||||
0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
|
||||
0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */
|
||||
0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
|
||||
reg-names = "appl", "atu_dma", "dbi", "addr_space";
|
||||
|
||||
num-lanes = <8>;
|
||||
num-ib-windows = <2>;
|
||||
num-ob-windows = <8>;
|
||||
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&clkreq_c5_bi_dir_state>;
|
||||
|
||||
clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>;
|
||||
clock-names = "core";
|
||||
|
||||
resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>,
|
||||
<&bpmp TEGRA194_RESET_PEX1_CORE_5>;
|
||||
reset-names = "apb", "core";
|
||||
|
||||
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
|
||||
interrupt-names = "intr";
|
||||
|
||||
nvidia,bpmp = <&bpmp 5>;
|
||||
|
||||
nvidia,aspm-cmrt-us = <60>;
|
||||
nvidia,aspm-pwr-on-t-us = <20>;
|
||||
nvidia,aspm-l0s-entrance-latency-us = <3>;
|
||||
|
||||
vddio-pex-ctl-supply = <&vdd_1v8ao>;
|
||||
|
||||
reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>;
|
||||
|
||||
nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5)
|
||||
GPIO_ACTIVE_HIGH>;
|
||||
|
||||
phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
|
||||
<&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
|
||||
<&p2u_nvhs_6>, <&p2u_nvhs_7>;
|
||||
|
||||
phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
|
||||
"p2u-5", "p2u-6", "p2u-7";
|
||||
};
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci_ids.h>
|
||||
|
||||
@ -64,6 +65,9 @@
|
||||
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
|
||||
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
|
||||
|
||||
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
|
||||
#define FLAG_USE_DMA BIT(0)
|
||||
|
||||
#define PCI_DEVICE_ID_TI_AM654 0xb00c
|
||||
|
||||
#define is_am654_pci_dev(pdev) \
|
||||
@ -98,11 +102,13 @@ struct pci_endpoint_test {
|
||||
struct completion irq_raised;
|
||||
int last_irq;
|
||||
int num_irqs;
|
||||
int irq_type;
|
||||
/* mutex to protect the ioctls */
|
||||
struct mutex mutex;
|
||||
struct miscdevice miscdev;
|
||||
enum pci_barno test_reg_bar;
|
||||
size_t alignment;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct pci_endpoint_test_data {
|
||||
@ -157,6 +163,7 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
|
||||
struct pci_dev *pdev = test->pdev;
|
||||
|
||||
pci_free_irq_vectors(pdev);
|
||||
test->irq_type = IRQ_TYPE_UNDEFINED;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
|
||||
@ -191,6 +198,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
|
||||
irq = 0;
|
||||
res = false;
|
||||
}
|
||||
|
||||
test->irq_type = type;
|
||||
test->num_irqs = irq;
|
||||
|
||||
return res;
|
||||
@ -218,7 +227,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
|
||||
for (i = 0; i < test->num_irqs; i++) {
|
||||
err = devm_request_irq(dev, pci_irq_vector(pdev, i),
|
||||
pci_endpoint_test_irqhandler,
|
||||
IRQF_SHARED, DRV_MODULE_NAME, test);
|
||||
IRQF_SHARED, test->name, test);
|
||||
if (err)
|
||||
goto fail;
|
||||
}
|
||||
@ -315,11 +324,16 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
|
||||
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct pci_endpoint_test_xfer_param param;
|
||||
bool ret = false;
|
||||
void *src_addr;
|
||||
void *dst_addr;
|
||||
u32 flags = 0;
|
||||
bool use_dma;
|
||||
size_t size;
|
||||
dma_addr_t src_phys_addr;
|
||||
dma_addr_t dst_phys_addr;
|
||||
struct pci_dev *pdev = test->pdev;
|
||||
@ -330,25 +344,46 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
|
||||
dma_addr_t orig_dst_phys_addr;
|
||||
size_t offset;
|
||||
size_t alignment = test->alignment;
|
||||
int irq_type = test->irq_type;
|
||||
u32 src_crc32;
|
||||
u32 dst_crc32;
|
||||
int err;
|
||||
|
||||
err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to get transfer param\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
size = param.size;
|
||||
if (size > SIZE_MAX - alignment)
|
||||
goto err;
|
||||
|
||||
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
||||
if (use_dma)
|
||||
flags |= FLAG_USE_DMA;
|
||||
|
||||
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
|
||||
dev_err(dev, "Invalid IRQ type option\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
orig_src_addr = dma_alloc_coherent(dev, size + alignment,
|
||||
&orig_src_phys_addr, GFP_KERNEL);
|
||||
orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
|
||||
if (!orig_src_addr) {
|
||||
dev_err(dev, "Failed to allocate source buffer\n");
|
||||
ret = false;
|
||||
goto err;
|
||||
}
|
||||
|
||||
get_random_bytes(orig_src_addr, size + alignment);
|
||||
orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
|
||||
size + alignment, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, orig_src_phys_addr)) {
|
||||
dev_err(dev, "failed to map source buffer address\n");
|
||||
ret = false;
|
||||
goto err_src_phys_addr;
|
||||
}
|
||||
|
||||
if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
|
||||
src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
|
||||
offset = src_phys_addr - orig_src_phys_addr;
|
||||
@ -364,15 +399,21 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
|
||||
upper_32_bits(src_phys_addr));
|
||||
|
||||
get_random_bytes(src_addr, size);
|
||||
src_crc32 = crc32_le(~0, src_addr, size);
|
||||
|
||||
orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
|
||||
&orig_dst_phys_addr, GFP_KERNEL);
|
||||
orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
|
||||
if (!orig_dst_addr) {
|
||||
dev_err(dev, "Failed to allocate destination address\n");
|
||||
ret = false;
|
||||
goto err_orig_src_addr;
|
||||
goto err_dst_addr;
|
||||
}
|
||||
|
||||
orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
|
||||
size + alignment, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, orig_dst_phys_addr)) {
|
||||
dev_err(dev, "failed to map destination buffer address\n");
|
||||
ret = false;
|
||||
goto err_dst_phys_addr;
|
||||
}
|
||||
|
||||
if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
|
||||
@ -392,6 +433,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
|
||||
size);
|
||||
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
||||
@ -399,24 +441,34 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
|
||||
|
||||
wait_for_completion(&test->irq_raised);
|
||||
|
||||
dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
dst_crc32 = crc32_le(~0, dst_addr, size);
|
||||
if (dst_crc32 == src_crc32)
|
||||
ret = true;
|
||||
|
||||
dma_free_coherent(dev, size + alignment, orig_dst_addr,
|
||||
orig_dst_phys_addr);
|
||||
err_dst_phys_addr:
|
||||
kfree(orig_dst_addr);
|
||||
|
||||
err_orig_src_addr:
|
||||
dma_free_coherent(dev, size + alignment, orig_src_addr,
|
||||
orig_src_phys_addr);
|
||||
err_dst_addr:
|
||||
dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
err_src_phys_addr:
|
||||
kfree(orig_src_addr);
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
|
||||
static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct pci_endpoint_test_xfer_param param;
|
||||
bool ret = false;
|
||||
u32 flags = 0;
|
||||
bool use_dma;
|
||||
u32 reg;
|
||||
void *addr;
|
||||
dma_addr_t phys_addr;
|
||||
@ -426,24 +478,47 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
|
||||
dma_addr_t orig_phys_addr;
|
||||
size_t offset;
|
||||
size_t alignment = test->alignment;
|
||||
int irq_type = test->irq_type;
|
||||
size_t size;
|
||||
u32 crc32;
|
||||
int err;
|
||||
|
||||
err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
|
||||
if (err != 0) {
|
||||
dev_err(dev, "Failed to get transfer param\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
size = param.size;
|
||||
if (size > SIZE_MAX - alignment)
|
||||
goto err;
|
||||
|
||||
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
||||
if (use_dma)
|
||||
flags |= FLAG_USE_DMA;
|
||||
|
||||
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
|
||||
dev_err(dev, "Invalid IRQ type option\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
|
||||
GFP_KERNEL);
|
||||
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
|
||||
if (!orig_addr) {
|
||||
dev_err(dev, "Failed to allocate address\n");
|
||||
ret = false;
|
||||
goto err;
|
||||
}
|
||||
|
||||
get_random_bytes(orig_addr, size + alignment);
|
||||
|
||||
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, orig_phys_addr)) {
|
||||
dev_err(dev, "failed to map source buffer address\n");
|
||||
ret = false;
|
||||
goto err_phys_addr;
|
||||
}
|
||||
|
||||
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
|
||||
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
|
||||
offset = phys_addr - orig_phys_addr;
|
||||
@ -453,8 +528,6 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
|
||||
addr = orig_addr;
|
||||
}
|
||||
|
||||
get_random_bytes(addr, size);
|
||||
|
||||
crc32 = crc32_le(~0, addr, size);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
|
||||
crc32);
|
||||
@ -466,6 +539,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
|
||||
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
|
||||
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
||||
@ -477,15 +551,24 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
|
||||
if (reg & STATUS_READ_SUCCESS)
|
||||
ret = true;
|
||||
|
||||
dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
|
||||
dma_unmap_single(dev, orig_phys_addr, size + alignment,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
err_phys_addr:
|
||||
kfree(orig_addr);
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
|
||||
static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct pci_endpoint_test_xfer_param param;
|
||||
bool ret = false;
|
||||
u32 flags = 0;
|
||||
bool use_dma;
|
||||
size_t size;
|
||||
void *addr;
|
||||
dma_addr_t phys_addr;
|
||||
struct pci_dev *pdev = test->pdev;
|
||||
@ -494,24 +577,44 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
|
||||
dma_addr_t orig_phys_addr;
|
||||
size_t offset;
|
||||
size_t alignment = test->alignment;
|
||||
int irq_type = test->irq_type;
|
||||
u32 crc32;
|
||||
int err;
|
||||
|
||||
err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to get transfer param\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
size = param.size;
|
||||
if (size > SIZE_MAX - alignment)
|
||||
goto err;
|
||||
|
||||
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
|
||||
if (use_dma)
|
||||
flags |= FLAG_USE_DMA;
|
||||
|
||||
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
|
||||
dev_err(dev, "Invalid IRQ type option\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
|
||||
GFP_KERNEL);
|
||||
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
|
||||
if (!orig_addr) {
|
||||
dev_err(dev, "Failed to allocate destination address\n");
|
||||
ret = false;
|
||||
goto err;
|
||||
}
|
||||
|
||||
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, orig_phys_addr)) {
|
||||
dev_err(dev, "failed to map source buffer address\n");
|
||||
ret = false;
|
||||
goto err_phys_addr;
|
||||
}
|
||||
|
||||
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
|
||||
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
|
||||
offset = phys_addr - orig_phys_addr;
|
||||
@ -528,6 +631,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
|
||||
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
|
||||
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
|
||||
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
|
||||
@ -535,15 +639,26 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
|
||||
|
||||
wait_for_completion(&test->irq_raised);
|
||||
|
||||
dma_unmap_single(dev, orig_phys_addr, size + alignment,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
crc32 = crc32_le(~0, addr, size);
|
||||
if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
|
||||
ret = true;
|
||||
|
||||
dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
|
||||
err_phys_addr:
|
||||
kfree(orig_addr);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
|
||||
{
|
||||
pci_endpoint_test_release_irq(test);
|
||||
pci_endpoint_test_free_irq_vectors(test);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
|
||||
int req_irq_type)
|
||||
{
|
||||
@ -555,7 +670,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (irq_type == req_irq_type)
|
||||
if (test->irq_type == req_irq_type)
|
||||
return true;
|
||||
|
||||
pci_endpoint_test_release_irq(test);
|
||||
@ -567,12 +682,10 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
|
||||
if (!pci_endpoint_test_request_irq(test))
|
||||
goto err;
|
||||
|
||||
irq_type = req_irq_type;
|
||||
return true;
|
||||
|
||||
err:
|
||||
pci_endpoint_test_free_irq_vectors(test);
|
||||
irq_type = IRQ_TYPE_UNDEFINED;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -616,6 +729,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
|
||||
case PCITEST_GET_IRQTYPE:
|
||||
ret = irq_type;
|
||||
break;
|
||||
case PCITEST_CLEAR_IRQ:
|
||||
ret = pci_endpoint_test_clear_irq(test);
|
||||
break;
|
||||
}
|
||||
|
||||
ret:
|
||||
@ -633,7 +749,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
{
|
||||
int err;
|
||||
int id;
|
||||
char name[20];
|
||||
char name[24];
|
||||
enum pci_barno bar;
|
||||
void __iomem *base;
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -652,6 +768,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
test->test_reg_bar = 0;
|
||||
test->alignment = 0;
|
||||
test->pdev = pdev;
|
||||
test->irq_type = IRQ_TYPE_UNDEFINED;
|
||||
|
||||
if (no_msi)
|
||||
irq_type = IRQ_TYPE_LEGACY;
|
||||
@ -667,6 +784,12 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
init_completion(&test->irq_raised);
|
||||
mutex_init(&test->mutex);
|
||||
|
||||
if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
|
||||
dev_err(dev, "Cannot set DMA mask\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_err(dev, "Cannot enable PCI device\n");
|
||||
@ -684,9 +807,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
|
||||
goto err_disable_irq;
|
||||
|
||||
if (!pci_endpoint_test_request_irq(test))
|
||||
goto err_disable_irq;
|
||||
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
||||
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
|
||||
base = pci_ioremap_bar(pdev, bar);
|
||||
@ -716,12 +836,21 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
|
||||
test->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!test->name) {
|
||||
err = -ENOMEM;
|
||||
goto err_ida_remove;
|
||||
}
|
||||
|
||||
if (!pci_endpoint_test_request_irq(test))
|
||||
goto err_kfree_test_name;
|
||||
|
||||
misc_device = &test->miscdev;
|
||||
misc_device->minor = MISC_DYNAMIC_MINOR;
|
||||
misc_device->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!misc_device->name) {
|
||||
err = -ENOMEM;
|
||||
goto err_ida_remove;
|
||||
goto err_release_irq;
|
||||
}
|
||||
misc_device->fops = &pci_endpoint_test_fops,
|
||||
|
||||
@ -736,6 +865,12 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
err_kfree_name:
|
||||
kfree(misc_device->name);
|
||||
|
||||
err_release_irq:
|
||||
pci_endpoint_test_release_irq(test);
|
||||
|
||||
err_kfree_test_name:
|
||||
kfree(test->name);
|
||||
|
||||
err_ida_remove:
|
||||
ida_simple_remove(&pci_endpoint_test_ida, id);
|
||||
|
||||
@ -744,7 +879,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
if (test->bar[bar])
|
||||
pci_iounmap(pdev, test->bar[bar]);
|
||||
}
|
||||
pci_endpoint_test_release_irq(test);
|
||||
|
||||
err_disable_irq:
|
||||
pci_endpoint_test_free_irq_vectors(test);
|
||||
@ -770,6 +904,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
|
||||
|
||||
misc_deregister(&test->miscdev);
|
||||
kfree(misc_device->name);
|
||||
kfree(test->name);
|
||||
ida_simple_remove(&pci_endpoint_test_ida, id);
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
||||
if (test->bar[bar])
|
||||
@ -783,6 +918,12 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static const struct pci_endpoint_test_data default_data = {
|
||||
.test_reg_bar = BAR_0,
|
||||
.alignment = SZ_4K,
|
||||
.irq_type = IRQ_TYPE_MSI,
|
||||
};
|
||||
|
||||
static const struct pci_endpoint_test_data am654_data = {
|
||||
.test_reg_bar = BAR_2,
|
||||
.alignment = SZ_64K,
|
||||
@ -790,8 +931,12 @@ static const struct pci_endpoint_test_data am654_data = {
|
||||
};
|
||||
|
||||
static const struct pci_device_id pci_endpoint_test_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
|
||||
.driver_data = (kernel_ulong_t)&default_data,
|
||||
},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
|
||||
.driver_data = (kernel_ulong_t)&default_data,
|
||||
},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
|
||||
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
|
||||
|
@ -248,14 +248,37 @@ config PCI_MESON
|
||||
implement the driver.
|
||||
|
||||
config PCIE_TEGRA194
|
||||
tristate "NVIDIA Tegra194 (and later) PCIe controller"
|
||||
tristate
|
||||
|
||||
config PCIE_TEGRA194_HOST
|
||||
tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode"
|
||||
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
select PCIE_DW_HOST
|
||||
select PHY_TEGRA194_P2U
|
||||
select PCIE_TEGRA194
|
||||
help
|
||||
Say Y here if you want support for DesignWare core based PCIe host
|
||||
controller found in NVIDIA Tegra194 SoC.
|
||||
Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
|
||||
work in host mode. There are two instances of PCIe controllers in
|
||||
Tegra194. This controller can work either as EP or RC. In order to
|
||||
enable host-specific features PCIE_TEGRA194_HOST must be selected and
|
||||
in order to enable device-specific features PCIE_TEGRA194_EP must be
|
||||
selected. This uses the DesignWare core.
|
||||
|
||||
config PCIE_TEGRA194_EP
|
||||
tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode"
|
||||
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
|
||||
depends on PCI_ENDPOINT
|
||||
select PCIE_DW_EP
|
||||
select PHY_TEGRA194_P2U
|
||||
select PCIE_TEGRA194
|
||||
help
|
||||
Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
|
||||
work in host mode. There are two instances of PCIe controllers in
|
||||
Tegra194. This controller can work either as EP or RC. In order to
|
||||
enable host-specific features PCIE_TEGRA194_HOST must be selected and
|
||||
in order to enable device-specific features PCIE_TEGRA194_EP must be
|
||||
selected. This uses the DesignWare core.
|
||||
|
||||
config PCIE_UNIPHIER
|
||||
bool "Socionext UniPhier PCIe controllers"
|
||||
|
@ -959,6 +959,9 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
case PCI_EPC_IRQ_MSI:
|
||||
dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
|
||||
break;
|
||||
case PCI_EPC_IRQ_MSIX:
|
||||
dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
|
||||
break;
|
||||
default:
|
||||
dev_err(pci->dev, "UNKNOWN IRQ type\n");
|
||||
return -EINVAL;
|
||||
@ -970,7 +973,7 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
static const struct pci_epc_features ks_pcie_am654_epc_features = {
|
||||
.linkup_notifier = false,
|
||||
.msi_capable = true,
|
||||
.msix_capable = false,
|
||||
.msix_capable = true,
|
||||
.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
|
||||
.bar_fixed_64bit = 1 << BAR_0,
|
||||
.bar_fixed_size[2] = SZ_1M,
|
||||
|
@ -18,6 +18,15 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
|
||||
|
||||
pci_epc_linkup(epc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
|
||||
|
||||
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct pci_epc *epc = ep->epc;
|
||||
|
||||
pci_epc_init_notify(epc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
|
||||
|
||||
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
|
||||
int flags)
|
||||
@ -125,6 +134,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
|
||||
|
||||
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
|
||||
clear_bit(atu_index, ep->ib_window_map);
|
||||
ep->epf_bar[bar] = NULL;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
|
||||
@ -158,6 +168,7 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
|
||||
dw_pcie_writel_dbi(pci, reg + 4, 0);
|
||||
}
|
||||
|
||||
ep->epf_bar[bar] = epf_bar;
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
return 0;
|
||||
@ -269,7 +280,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
|
||||
return val;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
|
||||
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
|
||||
enum pci_barno bir, u32 offset)
|
||||
{
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
@ -278,12 +290,22 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
|
||||
if (!ep->msix_cap)
|
||||
return -EINVAL;
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
|
||||
reg = ep->msix_cap + PCI_MSIX_FLAGS;
|
||||
val = dw_pcie_readw_dbi(pci, reg);
|
||||
val &= ~PCI_MSIX_FLAGS_QSIZE;
|
||||
val |= interrupts;
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
dw_pcie_writew_dbi(pci, reg, val);
|
||||
|
||||
reg = ep->msix_cap + PCI_MSIX_TABLE;
|
||||
val = offset | bir;
|
||||
dw_pcie_writel_dbi(pci, reg, val);
|
||||
|
||||
reg = ep->msix_cap + PCI_MSIX_PBA;
|
||||
val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
|
||||
dw_pcie_writel_dbi(pci, reg, val);
|
||||
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
return 0;
|
||||
@ -409,55 +431,41 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u16 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct pci_epf_msix_tbl *msix_tbl;
|
||||
struct pci_epc *epc = ep->epc;
|
||||
u16 tbl_offset, bir;
|
||||
u32 bar_addr_upper, bar_addr_lower;
|
||||
u32 msg_addr_upper, msg_addr_lower;
|
||||
struct pci_epf_bar *epf_bar;
|
||||
u32 reg, msg_data, vec_ctrl;
|
||||
u64 tbl_addr, msg_addr, reg_u64;
|
||||
void __iomem *msix_tbl;
|
||||
unsigned int aligned_offset;
|
||||
u32 tbl_offset;
|
||||
u64 msg_addr;
|
||||
int ret;
|
||||
u8 bir;
|
||||
|
||||
reg = ep->msix_cap + PCI_MSIX_TABLE;
|
||||
tbl_offset = dw_pcie_readl_dbi(pci, reg);
|
||||
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
|
||||
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
|
||||
|
||||
reg = PCI_BASE_ADDRESS_0 + (4 * bir);
|
||||
bar_addr_upper = 0;
|
||||
bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
|
||||
reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
|
||||
if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
|
||||
epf_bar = ep->epf_bar[bir];
|
||||
msix_tbl = epf_bar->addr;
|
||||
msix_tbl = (struct pci_epf_msix_tbl *)((char *)msix_tbl + tbl_offset);
|
||||
|
||||
tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
|
||||
tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
|
||||
tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
|
||||
msix_tbl = ioremap(ep->phys_base + tbl_addr,
|
||||
PCI_MSIX_ENTRY_SIZE);
|
||||
if (!msix_tbl)
|
||||
return -EINVAL;
|
||||
|
||||
msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
|
||||
msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
|
||||
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
|
||||
msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
|
||||
vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
|
||||
iounmap(msix_tbl);
|
||||
msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
|
||||
msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
|
||||
vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
|
||||
|
||||
if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
|
||||
dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
|
||||
aligned_offset = msg_addr & (epc->mem->page_size - 1);
|
||||
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
|
||||
epc->mem->page_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
writel(msg_data, ep->msi_mem);
|
||||
writel(msg_data, ep->msi_mem + aligned_offset);
|
||||
|
||||
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
|
||||
|
||||
@ -492,19 +500,54 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
unsigned int offset;
|
||||
unsigned int nbars;
|
||||
u8 hdr_type;
|
||||
u32 reg;
|
||||
int i;
|
||||
|
||||
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
|
||||
if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
|
||||
dev_err(pci->dev,
|
||||
"PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
|
||||
hdr_type);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
|
||||
|
||||
ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
|
||||
|
||||
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
|
||||
if (offset) {
|
||||
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
|
||||
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
|
||||
PCI_REBAR_CTRL_NBAR_SHIFT;
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
|
||||
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
}
|
||||
|
||||
dw_pcie_setup(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
|
||||
|
||||
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
u32 reg;
|
||||
void *addr;
|
||||
u8 hdr_type;
|
||||
unsigned int nbars;
|
||||
unsigned int offset;
|
||||
struct pci_epc *epc;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct device *dev = pci->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
const struct pci_epc_features *epc_features;
|
||||
|
||||
if (!pci->dbi_base || !pci->dbi_base2) {
|
||||
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
|
||||
@ -563,13 +606,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
if (ep->ops->ep_init)
|
||||
ep->ops->ep_init(ep);
|
||||
|
||||
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
|
||||
if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
|
||||
dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
|
||||
hdr_type);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
|
||||
if (ret < 0)
|
||||
epc->max_functions = 1;
|
||||
@ -587,23 +623,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
|
||||
|
||||
ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
|
||||
|
||||
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
|
||||
if (offset) {
|
||||
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
|
||||
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
|
||||
PCI_REBAR_CTRL_NBAR_SHIFT;
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
|
||||
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
if (ep->ops->get_features) {
|
||||
epc_features = ep->ops->get_features(ep);
|
||||
if (epc_features->core_init_notifier)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dw_pcie_setup(pci);
|
||||
|
||||
return 0;
|
||||
return dw_pcie_ep_init_complete(ep);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
|
||||
|
@ -233,6 +233,7 @@ struct dw_pcie_ep {
|
||||
phys_addr_t msi_mem_phys;
|
||||
u8 msi_cap; /* MSI capability offset */
|
||||
u8 msix_cap; /* MSI-X capability offset */
|
||||
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
|
||||
};
|
||||
|
||||
struct dw_pcie_ops {
|
||||
@ -411,6 +412,8 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
|
||||
#ifdef CONFIG_PCIE_DW_EP
|
||||
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
|
||||
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
|
||||
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
|
||||
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
@ -428,6 +431,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
|
||||
{
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -53,6 +54,7 @@
|
||||
#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
|
||||
#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
|
||||
#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
|
||||
#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
|
||||
#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
|
||||
#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
|
||||
#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
|
||||
@ -60,19 +62,26 @@
|
||||
#define APPL_INTR_STATUS_L0 0xC
|
||||
#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
|
||||
#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
|
||||
#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
|
||||
#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
|
||||
#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
|
||||
|
||||
#define APPL_INTR_EN_L1_0_0 0x1C
|
||||
#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
|
||||
#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
|
||||
#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
|
||||
|
||||
#define APPL_INTR_STATUS_L1_0_0 0x20
|
||||
#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
|
||||
#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
|
||||
#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
|
||||
|
||||
#define APPL_INTR_STATUS_L1_1 0x2C
|
||||
#define APPL_INTR_STATUS_L1_2 0x30
|
||||
#define APPL_INTR_STATUS_L1_3 0x34
|
||||
#define APPL_INTR_STATUS_L1_6 0x3C
|
||||
#define APPL_INTR_STATUS_L1_7 0x40
|
||||
#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
|
||||
|
||||
#define APPL_INTR_EN_L1_8_0 0x44
|
||||
#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
|
||||
@ -103,8 +112,12 @@
|
||||
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
|
||||
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
|
||||
|
||||
#define APPL_MSI_CTRL_1 0xAC
|
||||
|
||||
#define APPL_MSI_CTRL_2 0xB0
|
||||
|
||||
#define APPL_LEGACY_INTX 0xB8
|
||||
|
||||
#define APPL_LTR_MSG_1 0xC4
|
||||
#define LTR_MSG_REQ BIT(15)
|
||||
#define LTR_MST_NO_SNOOP_SHIFT 16
|
||||
@ -205,6 +218,13 @@
|
||||
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
|
||||
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
|
||||
|
||||
#define MSIX_ADDR_MATCH_LOW_OFF 0x940
|
||||
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
|
||||
#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
|
||||
|
||||
#define MSIX_ADDR_MATCH_HIGH_OFF 0x944
|
||||
#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
|
||||
|
||||
#define PORT_LOGIC_MSIX_DOORBELL 0x948
|
||||
|
||||
#define CAP_SPCIE_CAP_OFF 0x154
|
||||
@ -223,6 +243,13 @@
|
||||
#define GEN3_CORE_CLK_FREQ 250000000
|
||||
#define GEN4_CORE_CLK_FREQ 500000000
|
||||
|
||||
#define LTR_MSG_TIMEOUT (100 * 1000)
|
||||
|
||||
#define PERST_DEBOUNCE_TIME (5 * 1000)
|
||||
|
||||
#define EP_STATE_DISABLED 0
|
||||
#define EP_STATE_ENABLED 1
|
||||
|
||||
static const unsigned int pcie_gen_freq[] = {
|
||||
GEN1_CORE_CLK_FREQ,
|
||||
GEN2_CORE_CLK_FREQ,
|
||||
@ -260,6 +287,8 @@ struct tegra_pcie_dw {
|
||||
struct dw_pcie pci;
|
||||
struct tegra_bpmp *bpmp;
|
||||
|
||||
enum dw_pcie_device_mode mode;
|
||||
|
||||
bool supports_clkreq;
|
||||
bool enable_cdm_check;
|
||||
bool link_state;
|
||||
@ -283,6 +312,16 @@ struct tegra_pcie_dw {
|
||||
struct phy **phys;
|
||||
|
||||
struct dentry *debugfs;
|
||||
|
||||
/* Endpoint mode specific */
|
||||
struct gpio_desc *pex_rst_gpiod;
|
||||
struct gpio_desc *pex_refclk_sel_gpiod;
|
||||
unsigned int pex_rst_irq;
|
||||
int ep_state;
|
||||
};
|
||||
|
||||
struct tegra_pcie_dw_of_data {
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
|
||||
@ -339,8 +378,9 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
|
||||
static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct tegra_pcie_dw *pcie = arg;
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
u32 val, tmp;
|
||||
@ -411,11 +451,121 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
|
||||
static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
|
||||
|
||||
val = appl_readl(pcie, APPL_CTRL);
|
||||
val |= APPL_CTRL_LTSSM_EN;
|
||||
appl_writel(pcie, val, APPL_CTRL);
|
||||
}
|
||||
|
||||
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
|
||||
{
|
||||
struct tegra_pcie_dw *pcie = arg;
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
u32 val, speed;
|
||||
|
||||
return tegra_pcie_rp_irq_handler(pcie);
|
||||
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
|
||||
PCI_EXP_LNKSTA_CLS;
|
||||
clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
|
||||
|
||||
/* If EP doesn't advertise L1SS, just return */
|
||||
val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
|
||||
if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Check if BME is set to '1' */
|
||||
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
|
||||
if (val & PCI_COMMAND_MASTER) {
|
||||
ktime_t timeout;
|
||||
|
||||
/* 110us for both snoop and no-snoop */
|
||||
val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
|
||||
val |= (val << LTR_MST_NO_SNOOP_SHIFT);
|
||||
appl_writel(pcie, val, APPL_LTR_MSG_1);
|
||||
|
||||
/* Send LTR upstream */
|
||||
val = appl_readl(pcie, APPL_LTR_MSG_2);
|
||||
val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
|
||||
appl_writel(pcie, val, APPL_LTR_MSG_2);
|
||||
|
||||
timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
|
||||
for (;;) {
|
||||
val = appl_readl(pcie, APPL_LTR_MSG_2);
|
||||
if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
|
||||
break;
|
||||
if (ktime_after(ktime_get(), timeout))
|
||||
break;
|
||||
usleep_range(1000, 1100);
|
||||
}
|
||||
if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
|
||||
dev_err(pcie->dev, "Failed to send LTR message\n");
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
|
||||
{
|
||||
struct tegra_pcie_dw *pcie = arg;
|
||||
struct dw_pcie_ep *ep = &pcie->pci.ep;
|
||||
int spurious = 1;
|
||||
u32 val, tmp;
|
||||
|
||||
val = appl_readl(pcie, APPL_INTR_STATUS_L0);
|
||||
if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
|
||||
val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
|
||||
appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
|
||||
|
||||
if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
|
||||
pex_ep_event_hot_rst_done(pcie);
|
||||
|
||||
if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
|
||||
tmp = appl_readl(pcie, APPL_LINK_STATUS);
|
||||
if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
|
||||
dev_dbg(pcie->dev, "Link is up with Host\n");
|
||||
dw_pcie_ep_linkup(ep);
|
||||
}
|
||||
}
|
||||
|
||||
spurious = 0;
|
||||
}
|
||||
|
||||
if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
|
||||
val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
|
||||
appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
|
||||
|
||||
if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
|
||||
return IRQ_WAKE_THREAD;
|
||||
|
||||
spurious = 0;
|
||||
}
|
||||
|
||||
if (spurious) {
|
||||
dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
|
||||
val);
|
||||
appl_writel(pcie, val, APPL_INTR_STATUS_L0);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
|
||||
@ -884,8 +1034,26 @@ static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
|
||||
pp->num_vectors = MAX_MSI_IRQS;
|
||||
}
|
||||
|
||||
static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
|
||||
|
||||
enable_irq(pcie->pex_rst_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
|
||||
|
||||
disable_irq(pcie->pex_rst_irq);
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
|
||||
.link_up = tegra_pcie_dw_link_up,
|
||||
.start_link = tegra_pcie_dw_start_link,
|
||||
.stop_link = tegra_pcie_dw_stop_link,
|
||||
};
|
||||
|
||||
static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
|
||||
@ -986,6 +1154,40 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
|
||||
pcie->enable_cdm_check =
|
||||
of_property_read_bool(np, "snps,enable-cdm-check");
|
||||
|
||||
if (pcie->mode == DW_PCIE_RC_TYPE)
|
||||
return 0;
|
||||
|
||||
/* Endpoint mode specific DT entries */
|
||||
pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
|
||||
if (IS_ERR(pcie->pex_rst_gpiod)) {
|
||||
int err = PTR_ERR(pcie->pex_rst_gpiod);
|
||||
const char *level = KERN_ERR;
|
||||
|
||||
if (err == -EPROBE_DEFER)
|
||||
level = KERN_DEBUG;
|
||||
|
||||
dev_printk(level, pcie->dev,
|
||||
dev_fmt("Failed to get PERST GPIO: %d\n"),
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
|
||||
"nvidia,refclk-select",
|
||||
GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
|
||||
int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
|
||||
const char *level = KERN_ERR;
|
||||
|
||||
if (err == -EPROBE_DEFER)
|
||||
level = KERN_DEBUG;
|
||||
|
||||
dev_printk(level, pcie->dev,
|
||||
dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
|
||||
err);
|
||||
pcie->pex_refclk_sel_gpiod = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1017,6 +1219,34 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
|
||||
return tegra_bpmp_transfer(pcie->bpmp, &msg);
|
||||
}
|
||||
|
||||
static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
|
||||
bool enable)
|
||||
{
|
||||
struct mrq_uphy_response resp;
|
||||
struct tegra_bpmp_message msg;
|
||||
struct mrq_uphy_request req;
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
memset(&resp, 0, sizeof(resp));
|
||||
|
||||
if (enable) {
|
||||
req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
|
||||
req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
|
||||
} else {
|
||||
req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
|
||||
req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
|
||||
}
|
||||
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.mrq = MRQ_UPHY;
|
||||
msg.tx.data = &req;
|
||||
msg.tx.size = sizeof(req);
|
||||
msg.rx.data = &resp;
|
||||
msg.rx.size = sizeof(resp);
|
||||
|
||||
return tegra_bpmp_transfer(pcie->bpmp, &msg);
|
||||
}
|
||||
|
||||
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
struct pcie_port *pp = &pcie->pci.pp;
|
||||
@ -1427,8 +1657,396 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (pcie->ep_state == EP_STATE_DISABLED)
|
||||
return;
|
||||
|
||||
/* Disable LTSSM */
|
||||
val = appl_readl(pcie, APPL_CTRL);
|
||||
val &= ~APPL_CTRL_LTSSM_EN;
|
||||
appl_writel(pcie, val, APPL_CTRL);
|
||||
|
||||
ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
|
||||
((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
|
||||
APPL_DEBUG_LTSSM_STATE_SHIFT) ==
|
||||
LTSSM_STATE_PRE_DETECT,
|
||||
1, LTSSM_TIMEOUT);
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
|
||||
|
||||
reset_control_assert(pcie->core_rst);
|
||||
|
||||
tegra_pcie_disable_phy(pcie);
|
||||
|
||||
reset_control_assert(pcie->core_apb_rst);
|
||||
|
||||
clk_disable_unprepare(pcie->core_clk);
|
||||
|
||||
pm_runtime_put_sync(pcie->dev);
|
||||
|
||||
ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
|
||||
|
||||
pcie->ep_state = EP_STATE_DISABLED;
|
||||
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
|
||||
}
|
||||
|
||||
static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
struct dw_pcie_ep *ep = &pci->ep;
|
||||
struct device *dev = pcie->dev;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (pcie->ep_state == EP_STATE_ENABLED)
|
||||
return;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
|
||||
ret);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
|
||||
goto fail_pll_init;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(pcie->core_clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to enable core clock: %d\n", ret);
|
||||
goto fail_core_clk_enable;
|
||||
}
|
||||
|
||||
ret = reset_control_deassert(pcie->core_apb_rst);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
|
||||
goto fail_core_apb_rst;
|
||||
}
|
||||
|
||||
ret = tegra_pcie_enable_phy(pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to enable PHY: %d\n", ret);
|
||||
goto fail_phy;
|
||||
}
|
||||
|
||||
/* Clear any stale interrupt statuses */
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
|
||||
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
|
||||
|
||||
/* configure this core for EP mode operation */
|
||||
val = appl_readl(pcie, APPL_DM_TYPE);
|
||||
val &= ~APPL_DM_TYPE_MASK;
|
||||
val |= APPL_DM_TYPE_EP;
|
||||
appl_writel(pcie, val, APPL_DM_TYPE);
|
||||
|
||||
appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
|
||||
|
||||
val = appl_readl(pcie, APPL_CTRL);
|
||||
val |= APPL_CTRL_SYS_PRE_DET_STATE;
|
||||
val |= APPL_CTRL_HW_HOT_RST_EN;
|
||||
appl_writel(pcie, val, APPL_CTRL);
|
||||
|
||||
val = appl_readl(pcie, APPL_CFG_MISC);
|
||||
val |= APPL_CFG_MISC_SLV_EP_MODE;
|
||||
val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
|
||||
appl_writel(pcie, val, APPL_CFG_MISC);
|
||||
|
||||
val = appl_readl(pcie, APPL_PINMUX);
|
||||
val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
|
||||
val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
|
||||
appl_writel(pcie, val, APPL_PINMUX);
|
||||
|
||||
appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
|
||||
APPL_CFG_BASE_ADDR);
|
||||
|
||||
appl_writel(pcie, pcie->atu_dma_res->start &
|
||||
APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
|
||||
APPL_CFG_IATU_DMA_BASE_ADDR);
|
||||
|
||||
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
|
||||
val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
|
||||
val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
|
||||
val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
|
||||
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
|
||||
|
||||
val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
|
||||
val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
|
||||
val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
|
||||
appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
|
||||
|
||||
reset_control_deassert(pcie->core_rst);
|
||||
|
||||
if (pcie->update_fc_fixup) {
|
||||
val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
|
||||
val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
|
||||
dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
|
||||
}
|
||||
|
||||
config_gen3_gen4_eq_presets(pcie);
|
||||
|
||||
init_host_aspm(pcie);
|
||||
|
||||
/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
|
||||
if (!pcie->supports_clkreq) {
|
||||
disable_aspm_l11(pcie);
|
||||
disable_aspm_l12(pcie);
|
||||
}
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
|
||||
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
|
||||
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
|
||||
|
||||
/* Configure N_FTS & FTS */
|
||||
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
|
||||
val &= ~(N_FTS_MASK << N_FTS_SHIFT);
|
||||
val |= N_FTS_VAL << N_FTS_SHIFT;
|
||||
dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
|
||||
val &= ~FTS_MASK;
|
||||
val |= FTS_VAL;
|
||||
dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
|
||||
|
||||
/* Configure Max Speed from DT */
|
||||
if (pcie->max_speed && pcie->max_speed != -EINVAL) {
|
||||
val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
|
||||
PCI_EXP_LNKCAP);
|
||||
val &= ~PCI_EXP_LNKCAP_SLS;
|
||||
val |= pcie->max_speed;
|
||||
dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
|
||||
val);
|
||||
}
|
||||
|
||||
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
|
||||
PCI_CAP_ID_EXP);
|
||||
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
|
||||
|
||||
val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
|
||||
val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
|
||||
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
|
||||
val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
|
||||
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
|
||||
|
||||
ret = dw_pcie_ep_init_complete(ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to complete initialization: %d\n", ret);
|
||||
goto fail_init_complete;
|
||||
}
|
||||
|
||||
dw_pcie_ep_init_notify(ep);
|
||||
|
||||
/* Enable LTSSM */
|
||||
val = appl_readl(pcie, APPL_CTRL);
|
||||
val |= APPL_CTRL_LTSSM_EN;
|
||||
appl_writel(pcie, val, APPL_CTRL);
|
||||
|
||||
pcie->ep_state = EP_STATE_ENABLED;
|
||||
dev_dbg(dev, "Initialization of endpoint is completed\n");
|
||||
|
||||
return;
|
||||
|
||||
fail_init_complete:
|
||||
reset_control_assert(pcie->core_rst);
|
||||
tegra_pcie_disable_phy(pcie);
|
||||
fail_phy:
|
||||
reset_control_assert(pcie->core_apb_rst);
|
||||
fail_core_apb_rst:
|
||||
clk_disable_unprepare(pcie->core_clk);
|
||||
fail_core_clk_enable:
|
||||
tegra_pcie_bpmp_set_pll_state(pcie, false);
|
||||
fail_pll_init:
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
|
||||
{
|
||||
struct tegra_pcie_dw *pcie = arg;
|
||||
|
||||
if (gpiod_get_value(pcie->pex_rst_gpiod))
|
||||
pex_ep_event_pex_rst_assert(pcie);
|
||||
else
|
||||
pex_ep_event_pex_rst_deassert(pcie);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
|
||||
{
|
||||
/* Tegra194 supports only INTA */
|
||||
if (irq > 1)
|
||||
return -EINVAL;
|
||||
|
||||
appl_writel(pcie, 1, APPL_LEGACY_INTX);
|
||||
usleep_range(1000, 2000);
|
||||
appl_writel(pcie, 0, APPL_LEGACY_INTX);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
|
||||
{
|
||||
if (unlikely(irq > 31))
|
||||
return -EINVAL;
|
||||
|
||||
appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
|
||||
{
|
||||
struct dw_pcie_ep *ep = &pcie->pci.ep;
|
||||
|
||||
writel(irq, ep->msi_mem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_epc_irq_type type,
|
||||
u16 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
|
||||
|
||||
switch (type) {
|
||||
case PCI_EPC_IRQ_LEGACY:
|
||||
return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
|
||||
|
||||
case PCI_EPC_IRQ_MSI:
|
||||
return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
|
||||
|
||||
case PCI_EPC_IRQ_MSIX:
|
||||
return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
|
||||
|
||||
default:
|
||||
dev_err(pci->dev, "Unknown IRQ type\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pci_epc_features tegra_pcie_epc_features = {
|
||||
.linkup_notifier = true,
|
||||
.core_init_notifier = true,
|
||||
.msi_capable = false,
|
||||
.msix_capable = false,
|
||||
.reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
|
||||
.bar_fixed_64bit = 1 << BAR_0,
|
||||
.bar_fixed_size[0] = SZ_1M,
|
||||
};
|
||||
|
||||
static const struct pci_epc_features*
|
||||
tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
|
||||
{
|
||||
return &tegra_pcie_epc_features;
|
||||
}
|
||||
|
||||
static struct dw_pcie_ep_ops pcie_ep_ops = {
|
||||
.raise_irq = tegra_pcie_ep_raise_irq,
|
||||
.get_features = tegra_pcie_ep_get_features,
|
||||
};
|
||||
|
||||
static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
struct device *dev = pcie->dev;
|
||||
struct dw_pcie_ep *ep;
|
||||
struct resource *res;
|
||||
char *name;
|
||||
int ret;
|
||||
|
||||
ep = &pci->ep;
|
||||
ep->ops = &pcie_ep_ops;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
ep->page_size = SZ_64K;
|
||||
|
||||
ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = gpiod_to_irq(pcie->pex_rst_gpiod);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
pcie->pex_rst_irq = (unsigned int)ret;
|
||||
|
||||
name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
|
||||
pcie->cid);
|
||||
if (!name) {
|
||||
dev_err(dev, "Failed to create PERST IRQ string\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
|
||||
|
||||
pcie->ep_state = EP_STATE_DISABLED;
|
||||
|
||||
ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
|
||||
tegra_pcie_ep_pex_rst_irq,
|
||||
IRQF_TRIGGER_RISING |
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
name, (void *)pcie);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
|
||||
pcie->cid);
|
||||
if (!name) {
|
||||
dev_err(dev, "Failed to create PCIe EP work thread string\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
ret = dw_pcie_ep_init(ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_pcie_dw_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct tegra_pcie_dw_of_data *data;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *atu_dma_res;
|
||||
struct tegra_pcie_dw *pcie;
|
||||
@ -1440,6 +2058,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
|
||||
int ret;
|
||||
u32 i;
|
||||
|
||||
data = of_device_get_match_data(dev);
|
||||
|
||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
||||
if (!pcie)
|
||||
return -ENOMEM;
|
||||
@ -1449,19 +2069,37 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
|
||||
pci->ops = &tegra_dw_pcie_ops;
|
||||
pp = &pci->pp;
|
||||
pcie->dev = &pdev->dev;
|
||||
pcie->mode = (enum dw_pcie_device_mode)data->mode;
|
||||
|
||||
ret = tegra_pcie_dw_parse_dt(pcie);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to parse device tree: %d\n", ret);
|
||||
const char *level = KERN_ERR;
|
||||
|
||||
if (ret == -EPROBE_DEFER)
|
||||
level = KERN_DEBUG;
|
||||
|
||||
dev_printk(level, dev,
|
||||
dev_fmt("Failed to parse device tree: %d\n"),
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = tegra_pcie_get_slot_regulators(pcie);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to get slot regulators: %d\n", ret);
|
||||
const char *level = KERN_ERR;
|
||||
|
||||
if (ret == -EPROBE_DEFER)
|
||||
level = KERN_DEBUG;
|
||||
|
||||
dev_printk(level, dev,
|
||||
dev_fmt("Failed to get slot regulators: %d\n"),
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (pcie->pex_refclk_sel_gpiod)
|
||||
gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
|
||||
|
||||
pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
|
||||
if (IS_ERR(pcie->pex_ctl_supply)) {
|
||||
ret = PTR_ERR(pcie->pex_ctl_supply);
|
||||
@ -1557,24 +2195,49 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
|
||||
IRQF_SHARED, "tegra-pcie-intr", pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pcie->bpmp = tegra_bpmp_get(dev);
|
||||
if (IS_ERR(pcie->bpmp))
|
||||
return PTR_ERR(pcie->bpmp);
|
||||
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
ret = tegra_pcie_config_rp(pcie);
|
||||
if (ret && ret != -ENOMEDIUM)
|
||||
goto fail;
|
||||
else
|
||||
return 0;
|
||||
switch (pcie->mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
|
||||
IRQF_SHARED, "tegra-pcie-intr", pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
|
||||
ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = tegra_pcie_config_rp(pcie);
|
||||
if (ret && ret != -ENOMEDIUM)
|
||||
goto fail;
|
||||
else
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case DW_PCIE_EP_TYPE:
|
||||
ret = devm_request_threaded_irq(dev, pp->irq,
|
||||
tegra_pcie_ep_hard_irq,
|
||||
tegra_pcie_ep_irq_thread,
|
||||
IRQF_SHARED | IRQF_ONESHOT,
|
||||
"tegra-pcie-ep-intr", pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
|
||||
ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = tegra_pcie_config_ep(pcie, pdev);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
|
||||
}
|
||||
|
||||
fail:
|
||||
tegra_bpmp_put(pcie->bpmp);
|
||||
@ -1593,6 +2256,8 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev)
|
||||
pm_runtime_put_sync(pcie->dev);
|
||||
pm_runtime_disable(pcie->dev);
|
||||
tegra_bpmp_put(pcie->bpmp);
|
||||
if (pcie->pex_refclk_sel_gpiod)
|
||||
gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1697,9 +2362,22 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
|
||||
__deinit_controller(pcie);
|
||||
}
|
||||
|
||||
static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
|
||||
.mode = DW_PCIE_RC_TYPE,
|
||||
};
|
||||
|
||||
static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
|
||||
.mode = DW_PCIE_EP_TYPE,
|
||||
};
|
||||
|
||||
static const struct of_device_id tegra_pcie_dw_of_match[] = {
|
||||
{
|
||||
.compatible = "nvidia,tegra194-pcie",
|
||||
.data = &tegra_pcie_dw_rc_of_data,
|
||||
},
|
||||
{
|
||||
.compatible = "nvidia,tegra194-pcie-ep",
|
||||
.data = &tegra_pcie_dw_ep_of_data,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
@ -39,6 +40,8 @@
|
||||
#define STATUS_SRC_ADDR_INVALID BIT(7)
|
||||
#define STATUS_DST_ADDR_INVALID BIT(8)
|
||||
|
||||
#define FLAG_USE_DMA BIT(0)
|
||||
|
||||
#define TIMER_RESOLUTION 1
|
||||
|
||||
static struct workqueue_struct *kpcitest_workqueue;
|
||||
@ -47,7 +50,11 @@ struct pci_epf_test {
|
||||
void *reg[PCI_STD_NUM_BARS];
|
||||
struct pci_epf *epf;
|
||||
enum pci_barno test_reg_bar;
|
||||
size_t msix_table_offset;
|
||||
struct delayed_work cmd_handler;
|
||||
struct dma_chan *dma_chan;
|
||||
struct completion transfer_complete;
|
||||
bool dma_supported;
|
||||
const struct pci_epc_features *epc_features;
|
||||
};
|
||||
|
||||
@ -61,6 +68,7 @@ struct pci_epf_test_reg {
|
||||
u32 checksum;
|
||||
u32 irq_type;
|
||||
u32 irq_number;
|
||||
u32 flags;
|
||||
} __packed;
|
||||
|
||||
static struct pci_epf_header test_header = {
|
||||
@ -72,13 +80,156 @@ static struct pci_epf_header test_header = {
|
||||
|
||||
static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
|
||||
|
||||
static void pci_epf_test_dma_callback(void *param)
|
||||
{
|
||||
struct pci_epf_test *epf_test = param;
|
||||
|
||||
complete(&epf_test->transfer_complete);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
|
||||
* data between PCIe EP and remote PCIe RC
|
||||
* @epf_test: the EPF test device that performs the data transfer operation
|
||||
* @dma_dst: The destination address of the data transfer. It can be a physical
|
||||
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
|
||||
* @dma_src: The source address of the data transfer. It can be a physical
|
||||
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
|
||||
* @len: The size of the data transfer
|
||||
*
|
||||
* Function that uses dmaengine API to transfer data between PCIe EP and remote
|
||||
* PCIe RC. The source and destination address can be a physical address given
|
||||
* by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
|
||||
*
|
||||
* The function returns '0' on success and negative value on failure.
|
||||
*/
|
||||
static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
|
||||
dma_addr_t dma_dst, dma_addr_t dma_src,
|
||||
size_t len)
|
||||
{
|
||||
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
|
||||
struct dma_chan *chan = epf_test->dma_chan;
|
||||
struct pci_epf *epf = epf_test->epf;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct device *dev = &epf->dev;
|
||||
dma_cookie_t cookie;
|
||||
int ret;
|
||||
|
||||
if (IS_ERR_OR_NULL(chan)) {
|
||||
dev_err(dev, "Invalid DMA memcpy channel\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
|
||||
if (!tx) {
|
||||
dev_err(dev, "Failed to prepare DMA memcpy\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
tx->callback = pci_epf_test_dma_callback;
|
||||
tx->callback_param = epf_test;
|
||||
cookie = tx->tx_submit(tx);
|
||||
reinit_completion(&epf_test->transfer_complete);
|
||||
|
||||
ret = dma_submit_error(cookie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dma_async_issue_pending(chan);
|
||||
ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
|
||||
if (ret < 0) {
|
||||
dmaengine_terminate_sync(chan);
|
||||
dev_err(dev, "DMA wait_for_completion_timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
|
||||
* @epf_test: the EPF test device that performs data transfer operation
|
||||
*
|
||||
* Function to initialize EPF test DMA channel.
|
||||
*/
|
||||
static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
|
||||
{
|
||||
struct pci_epf *epf = epf_test->epf;
|
||||
struct device *dev = &epf->dev;
|
||||
struct dma_chan *dma_chan;
|
||||
dma_cap_mask_t mask;
|
||||
int ret;
|
||||
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_MEMCPY, mask);
|
||||
|
||||
dma_chan = dma_request_chan_by_mask(&mask);
|
||||
if (IS_ERR(dma_chan)) {
|
||||
ret = PTR_ERR(dma_chan);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to get DMA channel\n");
|
||||
return ret;
|
||||
}
|
||||
init_completion(&epf_test->transfer_complete);
|
||||
|
||||
epf_test->dma_chan = dma_chan;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
|
||||
* @epf: the EPF test device that performs data transfer operation
|
||||
*
|
||||
* Helper to cleanup EPF test DMA channel.
|
||||
*/
|
||||
static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
|
||||
{
|
||||
dma_release_channel(epf_test->dma_chan);
|
||||
epf_test->dma_chan = NULL;
|
||||
}
|
||||
|
||||
static void pci_epf_test_print_rate(const char *ops, u64 size,
|
||||
struct timespec64 *start,
|
||||
struct timespec64 *end, bool dma)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
u64 rate, ns;
|
||||
|
||||
ts = timespec64_sub(*end, *start);
|
||||
|
||||
/* convert both size (stored in 'rate') and time in terms of 'ns' */
|
||||
ns = timespec64_to_ns(&ts);
|
||||
rate = size * NSEC_PER_SEC;
|
||||
|
||||
/* Divide both size (stored in 'rate') and ns by a common factor */
|
||||
while (ns > UINT_MAX) {
|
||||
rate >>= 1;
|
||||
ns >>= 1;
|
||||
}
|
||||
|
||||
if (!ns)
|
||||
return;
|
||||
|
||||
/* calculate the rate */
|
||||
do_div(rate, (uint32_t)ns);
|
||||
|
||||
pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
|
||||
"Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
|
||||
(u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
|
||||
}
|
||||
|
||||
static int pci_epf_test_copy(struct pci_epf_test *epf_test)
|
||||
{
|
||||
int ret;
|
||||
bool use_dma;
|
||||
void __iomem *src_addr;
|
||||
void __iomem *dst_addr;
|
||||
phys_addr_t src_phys_addr;
|
||||
phys_addr_t dst_phys_addr;
|
||||
struct timespec64 start, end;
|
||||
struct pci_epf *epf = epf_test->epf;
|
||||
struct device *dev = &epf->dev;
|
||||
struct pci_epc *epc = epf->epc;
|
||||
@ -117,8 +268,26 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
|
||||
goto err_dst_addr;
|
||||
}
|
||||
|
||||
memcpy(dst_addr, src_addr, reg->size);
|
||||
ktime_get_ts64(&start);
|
||||
use_dma = !!(reg->flags & FLAG_USE_DMA);
|
||||
if (use_dma) {
|
||||
if (!epf_test->dma_supported) {
|
||||
dev_err(dev, "Cannot transfer data using DMA\n");
|
||||
ret = -EINVAL;
|
||||
goto err_map_addr;
|
||||
}
|
||||
|
||||
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
|
||||
src_phys_addr, reg->size);
|
||||
if (ret)
|
||||
dev_err(dev, "Data transfer failed\n");
|
||||
} else {
|
||||
memcpy(dst_addr, src_addr, reg->size);
|
||||
}
|
||||
ktime_get_ts64(&end);
|
||||
pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
|
||||
|
||||
err_map_addr:
|
||||
pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
|
||||
|
||||
err_dst_addr:
|
||||
@ -140,10 +309,14 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
|
||||
void __iomem *src_addr;
|
||||
void *buf;
|
||||
u32 crc32;
|
||||
bool use_dma;
|
||||
phys_addr_t phys_addr;
|
||||
phys_addr_t dst_phys_addr;
|
||||
struct timespec64 start, end;
|
||||
struct pci_epf *epf = epf_test->epf;
|
||||
struct device *dev = &epf->dev;
|
||||
struct pci_epc *epc = epf->epc;
|
||||
struct device *dma_dev = epf->epc->dev.parent;
|
||||
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
||||
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
|
||||
|
||||
@ -169,12 +342,44 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
|
||||
goto err_map_addr;
|
||||
}
|
||||
|
||||
memcpy_fromio(buf, src_addr, reg->size);
|
||||
use_dma = !!(reg->flags & FLAG_USE_DMA);
|
||||
if (use_dma) {
|
||||
if (!epf_test->dma_supported) {
|
||||
dev_err(dev, "Cannot transfer data using DMA\n");
|
||||
ret = -EINVAL;
|
||||
goto err_dma_map;
|
||||
}
|
||||
|
||||
dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dma_dev, dst_phys_addr)) {
|
||||
dev_err(dev, "Failed to map destination buffer addr\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_dma_map;
|
||||
}
|
||||
|
||||
ktime_get_ts64(&start);
|
||||
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
|
||||
phys_addr, reg->size);
|
||||
if (ret)
|
||||
dev_err(dev, "Data transfer failed\n");
|
||||
ktime_get_ts64(&end);
|
||||
|
||||
dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
|
||||
DMA_FROM_DEVICE);
|
||||
} else {
|
||||
ktime_get_ts64(&start);
|
||||
memcpy_fromio(buf, src_addr, reg->size);
|
||||
ktime_get_ts64(&end);
|
||||
}
|
||||
|
||||
pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
|
||||
|
||||
crc32 = crc32_le(~0, buf, reg->size);
|
||||
if (crc32 != reg->checksum)
|
||||
ret = -EIO;
|
||||
|
||||
err_dma_map:
|
||||
kfree(buf);
|
||||
|
||||
err_map_addr:
|
||||
@ -192,10 +397,14 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
|
||||
int ret;
|
||||
void __iomem *dst_addr;
|
||||
void *buf;
|
||||
bool use_dma;
|
||||
phys_addr_t phys_addr;
|
||||
phys_addr_t src_phys_addr;
|
||||
struct timespec64 start, end;
|
||||
struct pci_epf *epf = epf_test->epf;
|
||||
struct device *dev = &epf->dev;
|
||||
struct pci_epc *epc = epf->epc;
|
||||
struct device *dma_dev = epf->epc->dev.parent;
|
||||
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
||||
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
|
||||
|
||||
@ -224,7 +433,38 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
|
||||
get_random_bytes(buf, reg->size);
|
||||
reg->checksum = crc32_le(~0, buf, reg->size);
|
||||
|
||||
memcpy_toio(dst_addr, buf, reg->size);
|
||||
use_dma = !!(reg->flags & FLAG_USE_DMA);
|
||||
if (use_dma) {
|
||||
if (!epf_test->dma_supported) {
|
||||
dev_err(dev, "Cannot transfer data using DMA\n");
|
||||
ret = -EINVAL;
|
||||
goto err_map_addr;
|
||||
}
|
||||
|
||||
src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dma_dev, src_phys_addr)) {
|
||||
dev_err(dev, "Failed to map source buffer addr\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_dma_map;
|
||||
}
|
||||
|
||||
ktime_get_ts64(&start);
|
||||
ret = pci_epf_test_data_transfer(epf_test, phys_addr,
|
||||
src_phys_addr, reg->size);
|
||||
if (ret)
|
||||
dev_err(dev, "Data transfer failed\n");
|
||||
ktime_get_ts64(&end);
|
||||
|
||||
dma_unmap_single(dma_dev, src_phys_addr, reg->size,
|
||||
DMA_TO_DEVICE);
|
||||
} else {
|
||||
ktime_get_ts64(&start);
|
||||
memcpy_toio(dst_addr, buf, reg->size);
|
||||
ktime_get_ts64(&end);
|
||||
}
|
||||
|
||||
pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
|
||||
|
||||
/*
|
||||
* wait 1ms inorder for the write to complete. Without this delay L3
|
||||
@ -232,6 +472,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
err_dma_map:
|
||||
kfree(buf);
|
||||
|
||||
err_map_addr:
|
||||
@ -360,14 +601,6 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
|
||||
msecs_to_jiffies(1));
|
||||
}
|
||||
|
||||
static void pci_epf_test_linkup(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
|
||||
queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
|
||||
msecs_to_jiffies(1));
|
||||
}
|
||||
|
||||
static void pci_epf_test_unbind(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
@ -376,6 +609,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
|
||||
int bar;
|
||||
|
||||
cancel_delayed_work(&epf_test->cmd_handler);
|
||||
pci_epf_test_clean_dma_chan(epf_test);
|
||||
pci_epc_stop(epc);
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
||||
epf_bar = &epf->bar[bar];
|
||||
@ -424,11 +658,90 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pci_epf_test_core_init(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
struct pci_epf_header *header = epf->header;
|
||||
const struct pci_epc_features *epc_features;
|
||||
struct pci_epc *epc = epf->epc;
|
||||
struct device *dev = &epf->dev;
|
||||
bool msix_capable = false;
|
||||
bool msi_capable = true;
|
||||
int ret;
|
||||
|
||||
epc_features = pci_epc_get_features(epc, epf->func_no);
|
||||
if (epc_features) {
|
||||
msix_capable = epc_features->msix_capable;
|
||||
msi_capable = epc_features->msi_capable;
|
||||
}
|
||||
|
||||
ret = pci_epc_write_header(epc, epf->func_no, header);
|
||||
if (ret) {
|
||||
dev_err(dev, "Configuration header write failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pci_epf_test_set_bar(epf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (msi_capable) {
|
||||
ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
|
||||
if (ret) {
|
||||
dev_err(dev, "MSI configuration failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (msix_capable) {
|
||||
ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
|
||||
epf_test->test_reg_bar,
|
||||
epf_test->msix_table_offset);
|
||||
if (ret) {
|
||||
dev_err(dev, "MSI-X configuration failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
int ret;
|
||||
|
||||
switch (val) {
|
||||
case CORE_INIT:
|
||||
ret = pci_epf_test_core_init(epf);
|
||||
if (ret)
|
||||
return NOTIFY_BAD;
|
||||
break;
|
||||
|
||||
case LINK_UP:
|
||||
queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
|
||||
msecs_to_jiffies(1));
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(&epf->dev, "Invalid EPF test notifier event\n");
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int pci_epf_test_alloc_space(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
struct device *dev = &epf->dev;
|
||||
struct pci_epf_bar *epf_bar;
|
||||
size_t msix_table_size = 0;
|
||||
size_t test_reg_bar_size;
|
||||
size_t pba_size = 0;
|
||||
bool msix_capable;
|
||||
void *base;
|
||||
int bar, add;
|
||||
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
||||
@ -437,13 +750,25 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
|
||||
|
||||
epc_features = epf_test->epc_features;
|
||||
|
||||
if (epc_features->bar_fixed_size[test_reg_bar])
|
||||
test_reg_size = bar_size[test_reg_bar];
|
||||
else
|
||||
test_reg_size = sizeof(struct pci_epf_test_reg);
|
||||
test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
|
||||
|
||||
base = pci_epf_alloc_space(epf, test_reg_size,
|
||||
test_reg_bar, epc_features->align);
|
||||
msix_capable = epc_features->msix_capable;
|
||||
if (msix_capable) {
|
||||
msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
|
||||
epf_test->msix_table_offset = test_reg_bar_size;
|
||||
/* Align to QWORD or 8 Bytes */
|
||||
pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
|
||||
}
|
||||
test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
|
||||
|
||||
if (epc_features->bar_fixed_size[test_reg_bar]) {
|
||||
if (test_reg_size > bar_size[test_reg_bar])
|
||||
return -ENOMEM;
|
||||
test_reg_size = bar_size[test_reg_bar];
|
||||
}
|
||||
|
||||
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
|
||||
epc_features->align);
|
||||
if (!base) {
|
||||
dev_err(dev, "Failed to allocated register space\n");
|
||||
return -ENOMEM;
|
||||
@ -492,14 +817,11 @@ static int pci_epf_test_bind(struct pci_epf *epf)
|
||||
{
|
||||
int ret;
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
struct pci_epf_header *header = epf->header;
|
||||
const struct pci_epc_features *epc_features;
|
||||
enum pci_barno test_reg_bar = BAR_0;
|
||||
struct pci_epc *epc = epf->epc;
|
||||
struct device *dev = &epf->dev;
|
||||
bool linkup_notifier = false;
|
||||
bool msix_capable = false;
|
||||
bool msi_capable = true;
|
||||
bool core_init_notifier = false;
|
||||
|
||||
if (WARN_ON_ONCE(!epc))
|
||||
return -EINVAL;
|
||||
@ -507,8 +829,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
|
||||
epc_features = pci_epc_get_features(epc, epf->func_no);
|
||||
if (epc_features) {
|
||||
linkup_notifier = epc_features->linkup_notifier;
|
||||
msix_capable = epc_features->msix_capable;
|
||||
msi_capable = epc_features->msi_capable;
|
||||
core_init_notifier = epc_features->core_init_notifier;
|
||||
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
|
||||
pci_epf_configure_bar(epf, epc_features);
|
||||
}
|
||||
@ -516,38 +837,28 @@ static int pci_epf_test_bind(struct pci_epf *epf)
|
||||
epf_test->test_reg_bar = test_reg_bar;
|
||||
epf_test->epc_features = epc_features;
|
||||
|
||||
ret = pci_epc_write_header(epc, epf->func_no, header);
|
||||
if (ret) {
|
||||
dev_err(dev, "Configuration header write failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pci_epf_test_alloc_space(epf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pci_epf_test_set_bar(epf);
|
||||
if (!core_init_notifier) {
|
||||
ret = pci_epf_test_core_init(epf);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
epf_test->dma_supported = true;
|
||||
|
||||
ret = pci_epf_test_init_dma_chan(epf_test);
|
||||
if (ret)
|
||||
return ret;
|
||||
epf_test->dma_supported = false;
|
||||
|
||||
if (msi_capable) {
|
||||
ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
|
||||
if (ret) {
|
||||
dev_err(dev, "MSI configuration failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (msix_capable) {
|
||||
ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
|
||||
if (ret) {
|
||||
dev_err(dev, "MSI-X configuration failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (!linkup_notifier)
|
||||
if (linkup_notifier) {
|
||||
epf->nb.notifier_call = pci_epf_test_notifier;
|
||||
pci_epc_register_notifier(epc, &epf->nb);
|
||||
} else {
|
||||
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -580,7 +891,6 @@ static int pci_epf_test_probe(struct pci_epf *epf)
|
||||
static struct pci_epf_ops ops = {
|
||||
.unbind = pci_epf_test_unbind,
|
||||
.bind = pci_epf_test_bind,
|
||||
.linkup = pci_epf_test_linkup,
|
||||
};
|
||||
|
||||
static struct pci_epf_driver test_driver = {
|
||||
|
@ -29,7 +29,6 @@ struct pci_epc_group {
|
||||
struct config_group group;
|
||||
struct pci_epc *epc;
|
||||
bool start;
|
||||
unsigned long function_num_map;
|
||||
};
|
||||
|
||||
static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item)
|
||||
@ -58,6 +57,7 @@ static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
|
||||
|
||||
if (!start) {
|
||||
pci_epc_stop(epc);
|
||||
epc_group->start = 0;
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -89,37 +89,22 @@ static int pci_epc_epf_link(struct config_item *epc_item,
|
||||
struct config_item *epf_item)
|
||||
{
|
||||
int ret;
|
||||
u32 func_no = 0;
|
||||
struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
|
||||
struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
|
||||
struct pci_epc *epc = epc_group->epc;
|
||||
struct pci_epf *epf = epf_group->epf;
|
||||
|
||||
func_no = find_first_zero_bit(&epc_group->function_num_map,
|
||||
BITS_PER_LONG);
|
||||
if (func_no >= BITS_PER_LONG)
|
||||
return -EINVAL;
|
||||
|
||||
set_bit(func_no, &epc_group->function_num_map);
|
||||
epf->func_no = func_no;
|
||||
|
||||
ret = pci_epc_add_epf(epc, epf);
|
||||
if (ret)
|
||||
goto err_add_epf;
|
||||
return ret;
|
||||
|
||||
ret = pci_epf_bind(epf);
|
||||
if (ret)
|
||||
goto err_epf_bind;
|
||||
if (ret) {
|
||||
pci_epc_remove_epf(epc, epf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_epf_bind:
|
||||
pci_epc_remove_epf(epc, epf);
|
||||
|
||||
err_add_epf:
|
||||
clear_bit(func_no, &epc_group->function_num_map);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pci_epc_epf_unlink(struct config_item *epc_item,
|
||||
@ -134,7 +119,6 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
|
||||
|
||||
epc = epc_group->epc;
|
||||
epf = epf_group->epf;
|
||||
clear_bit(epf->func_no, &epc_group->function_num_map);
|
||||
pci_epf_unbind(epf);
|
||||
pci_epc_remove_epf(epc, epf);
|
||||
}
|
||||
|
@ -120,7 +120,6 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
|
||||
u8 func_no)
|
||||
{
|
||||
const struct pci_epc_features *epc_features;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
|
||||
return NULL;
|
||||
@ -128,9 +127,9 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
|
||||
if (!epc->ops->get_features)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
epc_features = epc->ops->get_features(epc, func_no);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return epc_features;
|
||||
}
|
||||
@ -144,14 +143,12 @@ EXPORT_SYMBOL_GPL(pci_epc_get_features);
|
||||
*/
|
||||
void pci_epc_stop(struct pci_epc *epc)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR(epc) || !epc->ops->stop)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
epc->ops->stop(epc);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_stop);
|
||||
|
||||
@ -164,7 +161,6 @@ EXPORT_SYMBOL_GPL(pci_epc_stop);
|
||||
int pci_epc_start(struct pci_epc *epc)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR(epc))
|
||||
return -EINVAL;
|
||||
@ -172,9 +168,9 @@ int pci_epc_start(struct pci_epc *epc)
|
||||
if (!epc->ops->start)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
ret = epc->ops->start(epc);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -193,7 +189,6 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
|
||||
enum pci_epc_irq_type type, u16 interrupt_num)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
|
||||
return -EINVAL;
|
||||
@ -201,9 +196,9 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
|
||||
if (!epc->ops->raise_irq)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -219,7 +214,6 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
|
||||
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
|
||||
{
|
||||
int interrupt;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
|
||||
return 0;
|
||||
@ -227,9 +221,9 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
|
||||
if (!epc->ops->get_msi)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
interrupt = epc->ops->get_msi(epc, func_no);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
if (interrupt < 0)
|
||||
return 0;
|
||||
@ -252,7 +246,6 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
|
||||
{
|
||||
int ret;
|
||||
u8 encode_int;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
|
||||
interrupts > 32)
|
||||
@ -263,9 +256,9 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
|
||||
|
||||
encode_int = order_base_2(interrupts);
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
ret = epc->ops->set_msi(epc, func_no, encode_int);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -281,7 +274,6 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
|
||||
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
|
||||
{
|
||||
int interrupt;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
|
||||
return 0;
|
||||
@ -289,9 +281,9 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
|
||||
if (!epc->ops->get_msix)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
interrupt = epc->ops->get_msix(epc, func_no);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
if (interrupt < 0)
|
||||
return 0;
|
||||
@ -305,13 +297,15 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
|
||||
* @epc: the EPC device on which MSI-X has to be configured
|
||||
* @func_no: the endpoint function number in the EPC device
|
||||
* @interrupts: number of MSI-X interrupts required by the EPF
|
||||
* @bir: BAR where the MSI-X table resides
|
||||
* @offset: Offset pointing to the start of MSI-X table
|
||||
*
|
||||
* Invoke to set the required number of MSI-X interrupts.
|
||||
*/
|
||||
int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
|
||||
int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
|
||||
enum pci_barno bir, u32 offset)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
|
||||
interrupts < 1 || interrupts > 2048)
|
||||
@ -320,9 +314,9 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
|
||||
if (!epc->ops->set_msix)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -339,17 +333,15 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
|
||||
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
|
||||
phys_addr_t phys_addr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
|
||||
return;
|
||||
|
||||
if (!epc->ops->unmap_addr)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
epc->ops->unmap_addr(epc, func_no, phys_addr);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
|
||||
|
||||
@ -367,7 +359,6 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
|
||||
phys_addr_t phys_addr, u64 pci_addr, size_t size)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
|
||||
return -EINVAL;
|
||||
@ -375,9 +366,9 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
|
||||
if (!epc->ops->map_addr)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -394,8 +385,6 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
|
||||
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
|
||||
struct pci_epf_bar *epf_bar)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
|
||||
(epf_bar->barno == BAR_5 &&
|
||||
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
|
||||
@ -404,9 +393,9 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
|
||||
if (!epc->ops->clear_bar)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
epc->ops->clear_bar(epc, func_no, epf_bar);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
|
||||
|
||||
@ -422,7 +411,6 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
|
||||
struct pci_epf_bar *epf_bar)
|
||||
{
|
||||
int ret;
|
||||
unsigned long irq_flags;
|
||||
int flags = epf_bar->flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
|
||||
@ -437,9 +425,9 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
|
||||
if (!epc->ops->set_bar)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, irq_flags);
|
||||
mutex_lock(&epc->lock);
|
||||
ret = epc->ops->set_bar(epc, func_no, epf_bar);
|
||||
spin_unlock_irqrestore(&epc->lock, irq_flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -460,7 +448,6 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
|
||||
struct pci_epf_header *header)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
|
||||
return -EINVAL;
|
||||
@ -468,9 +455,9 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
|
||||
if (!epc->ops->write_header)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
ret = epc->ops->write_header(epc, func_no, header);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -487,7 +474,8 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
|
||||
*/
|
||||
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 func_no;
|
||||
int ret = 0;
|
||||
|
||||
if (epf->epc)
|
||||
return -EBUSY;
|
||||
@ -495,16 +483,30 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
|
||||
if (IS_ERR(epc))
|
||||
return -EINVAL;
|
||||
|
||||
if (epf->func_no > epc->max_functions - 1)
|
||||
return -EINVAL;
|
||||
mutex_lock(&epc->lock);
|
||||
func_no = find_first_zero_bit(&epc->function_num_map,
|
||||
BITS_PER_LONG);
|
||||
if (func_no >= BITS_PER_LONG) {
|
||||
ret = -EINVAL;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
if (func_no > epc->max_functions - 1) {
|
||||
dev_err(&epc->dev, "Exceeding max supported Function Number\n");
|
||||
ret = -EINVAL;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
set_bit(func_no, &epc->function_num_map);
|
||||
epf->func_no = func_no;
|
||||
epf->epc = epc;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
list_add_tail(&epf->list, &epc->pci_epf);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
|
||||
return 0;
|
||||
ret:
|
||||
mutex_unlock(&epc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_add_epf);
|
||||
|
||||
@ -517,15 +519,14 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
|
||||
*/
|
||||
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!epc || IS_ERR(epc) || !epf)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
mutex_lock(&epc->lock);
|
||||
clear_bit(epf->func_no, &epc->function_num_map);
|
||||
list_del(&epf->list);
|
||||
epf->epc = NULL;
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
mutex_unlock(&epc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
|
||||
|
||||
@ -539,19 +540,30 @@ EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
|
||||
*/
|
||||
void pci_epc_linkup(struct pci_epc *epc)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pci_epf *epf;
|
||||
|
||||
if (!epc || IS_ERR(epc))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
list_for_each_entry(epf, &epc->pci_epf, list)
|
||||
pci_epf_linkup(epf);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_linkup);
|
||||
|
||||
/**
|
||||
* pci_epc_init_notify() - Notify the EPF device that EPC device's core
|
||||
* initialization is completed.
|
||||
* @epc: the EPC device whose core initialization is completeds
|
||||
*
|
||||
* Invoke to Notify the EPF device that the EPC device's initialization
|
||||
* is completed.
|
||||
*/
|
||||
void pci_epc_init_notify(struct pci_epc *epc)
|
||||
{
|
||||
if (!epc || IS_ERR(epc))
|
||||
return;
|
||||
|
||||
atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
|
||||
|
||||
/**
|
||||
* pci_epc_destroy() - destroy the EPC device
|
||||
* @epc: the EPC device that has to be destroyed
|
||||
@ -610,8 +622,9 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
spin_lock_init(&epc->lock);
|
||||
mutex_init(&epc->lock);
|
||||
INIT_LIST_HEAD(&epc->pci_epf);
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
|
||||
|
||||
device_initialize(&epc->dev);
|
||||
epc->dev.class = pci_epc_class;
|
||||
|
@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
|
||||
mem->page_size = page_size;
|
||||
mem->pages = pages;
|
||||
mem->size = size;
|
||||
mutex_init(&mem->lock);
|
||||
|
||||
epc->mem = mem;
|
||||
|
||||
@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
|
||||
phys_addr_t *phys_addr, size_t size)
|
||||
{
|
||||
int pageno;
|
||||
void __iomem *virt_addr;
|
||||
void __iomem *virt_addr = NULL;
|
||||
struct pci_epc_mem *mem = epc->mem;
|
||||
unsigned int page_shift = ilog2(mem->page_size);
|
||||
int order;
|
||||
@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
|
||||
size = ALIGN(size, mem->page_size);
|
||||
order = pci_epc_mem_get_order(mem, size);
|
||||
|
||||
mutex_lock(&mem->lock);
|
||||
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
|
||||
if (pageno < 0)
|
||||
return NULL;
|
||||
goto ret;
|
||||
|
||||
*phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
|
||||
virt_addr = ioremap(*phys_addr, size);
|
||||
if (!virt_addr)
|
||||
bitmap_release_region(mem->bitmap, pageno, order);
|
||||
|
||||
ret:
|
||||
mutex_unlock(&mem->lock);
|
||||
return virt_addr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
|
||||
@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
|
||||
pageno = (phys_addr - mem->phys_base) >> page_shift;
|
||||
size = ALIGN(size, mem->page_size);
|
||||
order = pci_epc_mem_get_order(mem, size);
|
||||
mutex_lock(&mem->lock);
|
||||
bitmap_release_region(mem->bitmap, pageno, order);
|
||||
mutex_unlock(&mem->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
|
||||
|
||||
|
@ -20,26 +20,6 @@ static DEFINE_MUTEX(pci_epf_mutex);
|
||||
static struct bus_type pci_epf_bus_type;
|
||||
static const struct device_type pci_epf_type;
|
||||
|
||||
/**
|
||||
* pci_epf_linkup() - Notify the function driver that EPC device has
|
||||
* established a connection with the Root Complex.
|
||||
* @epf: the EPF device bound to the EPC device which has established
|
||||
* the connection with the host
|
||||
*
|
||||
* Invoke to notify the function driver that EPC device has established
|
||||
* a connection with the Root Complex.
|
||||
*/
|
||||
void pci_epf_linkup(struct pci_epf *epf)
|
||||
{
|
||||
if (!epf->driver) {
|
||||
dev_WARN(&epf->dev, "epf device not bound to driver\n");
|
||||
return;
|
||||
}
|
||||
|
||||
epf->driver->ops->linkup(epf);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epf_linkup);
|
||||
|
||||
/**
|
||||
* pci_epf_unbind() - Notify the function driver that the binding between the
|
||||
* EPF device and EPC device has been lost
|
||||
@ -55,7 +35,9 @@ void pci_epf_unbind(struct pci_epf *epf)
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&epf->lock);
|
||||
epf->driver->ops->unbind(epf);
|
||||
mutex_unlock(&epf->lock);
|
||||
module_put(epf->driver->owner);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epf_unbind);
|
||||
@ -69,6 +51,8 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
|
||||
*/
|
||||
int pci_epf_bind(struct pci_epf *epf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!epf->driver) {
|
||||
dev_WARN(&epf->dev, "epf device not bound to driver\n");
|
||||
return -EINVAL;
|
||||
@ -77,7 +61,11 @@ int pci_epf_bind(struct pci_epf *epf)
|
||||
if (!try_module_get(epf->driver->owner))
|
||||
return -EAGAIN;
|
||||
|
||||
return epf->driver->ops->bind(epf);
|
||||
mutex_lock(&epf->lock);
|
||||
ret = epf->driver->ops->bind(epf);
|
||||
mutex_unlock(&epf->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epf_bind);
|
||||
|
||||
@ -99,6 +87,7 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
|
||||
epf->bar[bar].phys_addr);
|
||||
|
||||
epf->bar[bar].phys_addr = 0;
|
||||
epf->bar[bar].addr = NULL;
|
||||
epf->bar[bar].size = 0;
|
||||
epf->bar[bar].barno = 0;
|
||||
epf->bar[bar].flags = 0;
|
||||
@ -135,6 +124,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
|
||||
}
|
||||
|
||||
epf->bar[bar].phys_addr = phys_addr;
|
||||
epf->bar[bar].addr = space;
|
||||
epf->bar[bar].size = size;
|
||||
epf->bar[bar].barno = bar;
|
||||
epf->bar[bar].flags |= upper_32_bits(size) ?
|
||||
@ -214,7 +204,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
|
||||
if (!driver->ops)
|
||||
return -EINVAL;
|
||||
|
||||
if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup)
|
||||
if (!driver->ops->bind || !driver->ops->unbind)
|
||||
return -EINVAL;
|
||||
|
||||
driver->driver.bus = &pci_epf_bus_type;
|
||||
@ -272,6 +262,7 @@ struct pci_epf *pci_epf_create(const char *name)
|
||||
device_initialize(dev);
|
||||
dev->bus = &pci_epf_bus_type;
|
||||
dev->type = &pci_epf_type;
|
||||
mutex_init(&epf->lock);
|
||||
|
||||
ret = dev_set_name(dev, "%s", name);
|
||||
if (ret) {
|
||||
|
@ -53,7 +53,8 @@ struct pci_epc_ops {
|
||||
phys_addr_t addr);
|
||||
int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
|
||||
int (*get_msi)(struct pci_epc *epc, u8 func_no);
|
||||
int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts);
|
||||
int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts,
|
||||
enum pci_barno, u32 offset);
|
||||
int (*get_msix)(struct pci_epc *epc, u8 func_no);
|
||||
int (*raise_irq)(struct pci_epc *epc, u8 func_no,
|
||||
enum pci_epc_irq_type type, u16 interrupt_num);
|
||||
@ -71,6 +72,7 @@ struct pci_epc_ops {
|
||||
* @bitmap: bitmap to manage the PCI address space
|
||||
* @pages: number of bits representing the address region
|
||||
* @page_size: size of each page
|
||||
* @lock: mutex to protect bitmap
|
||||
*/
|
||||
struct pci_epc_mem {
|
||||
phys_addr_t phys_base;
|
||||
@ -78,6 +80,8 @@ struct pci_epc_mem {
|
||||
unsigned long *bitmap;
|
||||
size_t page_size;
|
||||
int pages;
|
||||
/* mutex to protect against concurrent access for memory allocation*/
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -88,7 +92,9 @@ struct pci_epc_mem {
|
||||
* @mem: address space of the endpoint controller
|
||||
* @max_functions: max number of functions that can be configured in this EPC
|
||||
* @group: configfs group representing the PCI EPC device
|
||||
* @lock: spinlock to protect pci_epc ops
|
||||
* @lock: mutex to protect pci_epc ops
|
||||
* @function_num_map: bitmap to manage physical function number
|
||||
* @notifier: used to notify EPF of any EPC events (like linkup)
|
||||
*/
|
||||
struct pci_epc {
|
||||
struct device dev;
|
||||
@ -97,8 +103,10 @@ struct pci_epc {
|
||||
struct pci_epc_mem *mem;
|
||||
u8 max_functions;
|
||||
struct config_group *group;
|
||||
/* spinlock to protect against concurrent access of EP controller */
|
||||
spinlock_t lock;
|
||||
/* mutex to protect against concurrent access of EP controller */
|
||||
struct mutex lock;
|
||||
unsigned long function_num_map;
|
||||
struct atomic_notifier_head notifier;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -113,6 +121,7 @@ struct pci_epc {
|
||||
*/
|
||||
struct pci_epc_features {
|
||||
unsigned int linkup_notifier : 1;
|
||||
unsigned int core_init_notifier : 1;
|
||||
unsigned int msi_capable : 1;
|
||||
unsigned int msix_capable : 1;
|
||||
u8 reserved_bar;
|
||||
@ -141,6 +150,12 @@ static inline void *epc_get_drvdata(struct pci_epc *epc)
|
||||
return dev_get_drvdata(&epc->dev);
|
||||
}
|
||||
|
||||
static inline int
|
||||
pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
|
||||
{
|
||||
return atomic_notifier_chain_register(&epc->notifier, nb);
|
||||
}
|
||||
|
||||
struct pci_epc *
|
||||
__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
|
||||
struct module *owner);
|
||||
@ -151,6 +166,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
|
||||
void pci_epc_destroy(struct pci_epc *epc);
|
||||
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
|
||||
void pci_epc_linkup(struct pci_epc *epc);
|
||||
void pci_epc_init_notify(struct pci_epc *epc);
|
||||
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
|
||||
int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
|
||||
struct pci_epf_header *hdr);
|
||||
@ -165,7 +181,8 @@ void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
|
||||
phys_addr_t phys_addr);
|
||||
int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
|
||||
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
|
||||
int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts);
|
||||
int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
|
||||
enum pci_barno, u32 offset);
|
||||
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
|
||||
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
|
||||
enum pci_epc_irq_type type, u16 interrupt_num);
|
||||
|
@ -15,6 +15,11 @@
|
||||
|
||||
struct pci_epf;
|
||||
|
||||
enum pci_notify_event {
|
||||
CORE_INIT,
|
||||
LINK_UP,
|
||||
};
|
||||
|
||||
enum pci_barno {
|
||||
BAR_0,
|
||||
BAR_1,
|
||||
@ -55,13 +60,10 @@ struct pci_epf_header {
|
||||
* @bind: ops to perform when a EPC device has been bound to EPF device
|
||||
* @unbind: ops to perform when a binding has been lost between a EPC device
|
||||
* and EPF device
|
||||
* @linkup: ops to perform when the EPC device has established a connection with
|
||||
* a host system
|
||||
*/
|
||||
struct pci_epf_ops {
|
||||
int (*bind)(struct pci_epf *epf);
|
||||
void (*unbind)(struct pci_epf *epf);
|
||||
void (*linkup)(struct pci_epf *epf);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -92,10 +94,12 @@ struct pci_epf_driver {
|
||||
/**
|
||||
* struct pci_epf_bar - represents the BAR of EPF device
|
||||
* @phys_addr: physical address that should be mapped to the BAR
|
||||
* @addr: virtual address corresponding to the @phys_addr
|
||||
* @size: the size of the address space present in BAR
|
||||
*/
|
||||
struct pci_epf_bar {
|
||||
dma_addr_t phys_addr;
|
||||
void *addr;
|
||||
size_t size;
|
||||
enum pci_barno barno;
|
||||
int flags;
|
||||
@ -112,6 +116,8 @@ struct pci_epf_bar {
|
||||
* @epc: the EPC device to which this EPF device is bound
|
||||
* @driver: the EPF driver to which this EPF device is bound
|
||||
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
|
||||
* @nb: notifier block to notify EPF of any EPC events (like linkup)
|
||||
* @lock: mutex to protect pci_epf_ops
|
||||
*/
|
||||
struct pci_epf {
|
||||
struct device dev;
|
||||
@ -125,6 +131,22 @@ struct pci_epf {
|
||||
struct pci_epc *epc;
|
||||
struct pci_epf_driver *driver;
|
||||
struct list_head list;
|
||||
struct notifier_block nb;
|
||||
/* mutex to protect against concurrent access of pci_epf_ops */
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pci_epf_msix_tbl - represents the MSIX table entry structure
|
||||
* @msg_addr: Writes to this address will trigger MSIX interrupt in host
|
||||
* @msg_data: Data that should be written to @msg_addr to trigger MSIX interrupt
|
||||
* @vector_ctrl: Identifies if the function is prohibited from sending a message
|
||||
* using this MSIX table entry
|
||||
*/
|
||||
struct pci_epf_msix_tbl {
|
||||
u64 msg_addr;
|
||||
u32 msg_data;
|
||||
u32 vector_ctrl;
|
||||
};
|
||||
|
||||
#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
|
||||
@ -154,5 +176,4 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
|
||||
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
|
||||
int pci_epf_bind(struct pci_epf *epf);
|
||||
void pci_epf_unbind(struct pci_epf *epf);
|
||||
void pci_epf_linkup(struct pci_epf *epf);
|
||||
#endif /* __LINUX_PCI_EPF_H */
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ABI_BPMP_ABI_H_
|
||||
@ -2119,6 +2119,7 @@ enum {
|
||||
CMD_UPHY_PCIE_LANE_MARGIN_STATUS = 2,
|
||||
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3,
|
||||
CMD_UPHY_PCIE_CONTROLLER_STATE = 4,
|
||||
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF = 5,
|
||||
CMD_UPHY_MAX,
|
||||
};
|
||||
|
||||
@ -2151,6 +2152,11 @@ struct cmd_uphy_pcie_controller_state_request {
|
||||
uint8_t enable;
|
||||
} __ABI_PACKED;
|
||||
|
||||
struct cmd_uphy_ep_controller_pll_off_request {
|
||||
/** @brief EP controller number, valid: 0, 4, 5 */
|
||||
uint8_t ep_controller;
|
||||
} __ABI_PACKED;
|
||||
|
||||
/**
|
||||
* @ingroup UPHY
|
||||
* @brief Request with #MRQ_UPHY
|
||||
@ -2165,6 +2171,7 @@ struct cmd_uphy_pcie_controller_state_request {
|
||||
* |CMD_UPHY_PCIE_LANE_MARGIN_STATUS | |
|
||||
* |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request |
|
||||
* |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request |
|
||||
* |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF |cmd_uphy_ep_controller_pll_off_request |
|
||||
*
|
||||
*/
|
||||
|
||||
@ -2178,6 +2185,7 @@ struct mrq_uphy_request {
|
||||
struct cmd_uphy_margin_control_request uphy_set_margin_control;
|
||||
struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
|
||||
struct cmd_uphy_pcie_controller_state_request controller_state;
|
||||
struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off;
|
||||
} __UNION_ANON;
|
||||
} __ABI_PACKED;
|
||||
|
||||
|
@ -19,5 +19,13 @@
|
||||
#define PCITEST_MSIX _IOW('P', 0x7, int)
|
||||
#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
|
||||
#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
|
||||
#define PCITEST_CLEAR_IRQ _IO('P', 0x10)
|
||||
|
||||
#define PCITEST_FLAGS_USE_DMA 0x00000001
|
||||
|
||||
struct pci_endpoint_test_xfer_param {
|
||||
unsigned long size;
|
||||
unsigned char flags;
|
||||
};
|
||||
|
||||
#endif /* __UAPI_LINUX_PCITEST_H */
|
||||
|
@ -30,14 +30,17 @@ struct pci_test {
|
||||
int irqtype;
|
||||
bool set_irqtype;
|
||||
bool get_irqtype;
|
||||
bool clear_irq;
|
||||
bool read;
|
||||
bool write;
|
||||
bool copy;
|
||||
unsigned long size;
|
||||
bool use_dma;
|
||||
};
|
||||
|
||||
static int run_test(struct pci_test *test)
|
||||
{
|
||||
struct pci_endpoint_test_xfer_param param;
|
||||
int ret = -EINVAL;
|
||||
int fd;
|
||||
|
||||
@ -74,6 +77,15 @@ static int run_test(struct pci_test *test)
|
||||
fprintf(stdout, "%s\n", irq[ret]);
|
||||
}
|
||||
|
||||
if (test->clear_irq) {
|
||||
ret = ioctl(fd, PCITEST_CLEAR_IRQ);
|
||||
fprintf(stdout, "CLEAR IRQ:\t\t");
|
||||
if (ret < 0)
|
||||
fprintf(stdout, "FAILED\n");
|
||||
else
|
||||
fprintf(stdout, "%s\n", result[ret]);
|
||||
}
|
||||
|
||||
if (test->legacyirq) {
|
||||
ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
|
||||
fprintf(stdout, "LEGACY IRQ:\t");
|
||||
@ -102,7 +114,10 @@ static int run_test(struct pci_test *test)
|
||||
}
|
||||
|
||||
if (test->write) {
|
||||
ret = ioctl(fd, PCITEST_WRITE, test->size);
|
||||
param.size = test->size;
|
||||
if (test->use_dma)
|
||||
param.flags = PCITEST_FLAGS_USE_DMA;
|
||||
ret = ioctl(fd, PCITEST_WRITE, ¶m);
|
||||
fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size);
|
||||
if (ret < 0)
|
||||
fprintf(stdout, "TEST FAILED\n");
|
||||
@ -111,7 +126,10 @@ static int run_test(struct pci_test *test)
|
||||
}
|
||||
|
||||
if (test->read) {
|
||||
ret = ioctl(fd, PCITEST_READ, test->size);
|
||||
param.size = test->size;
|
||||
if (test->use_dma)
|
||||
param.flags = PCITEST_FLAGS_USE_DMA;
|
||||
ret = ioctl(fd, PCITEST_READ, ¶m);
|
||||
fprintf(stdout, "READ (%7ld bytes):\t\t", test->size);
|
||||
if (ret < 0)
|
||||
fprintf(stdout, "TEST FAILED\n");
|
||||
@ -120,7 +138,10 @@ static int run_test(struct pci_test *test)
|
||||
}
|
||||
|
||||
if (test->copy) {
|
||||
ret = ioctl(fd, PCITEST_COPY, test->size);
|
||||
param.size = test->size;
|
||||
if (test->use_dma)
|
||||
param.flags = PCITEST_FLAGS_USE_DMA;
|
||||
ret = ioctl(fd, PCITEST_COPY, ¶m);
|
||||
fprintf(stdout, "COPY (%7ld bytes):\t\t", test->size);
|
||||
if (ret < 0)
|
||||
fprintf(stdout, "TEST FAILED\n");
|
||||
@ -153,7 +174,7 @@ int main(int argc, char **argv)
|
||||
/* set default endpoint device */
|
||||
test->device = "/dev/pci-endpoint-test.0";
|
||||
|
||||
while ((c = getopt(argc, argv, "D:b:m:x:i:Ilhrwcs:")) != EOF)
|
||||
while ((c = getopt(argc, argv, "D:b:m:x:i:deIlhrwcs:")) != EOF)
|
||||
switch (c) {
|
||||
case 'D':
|
||||
test->device = optarg;
|
||||
@ -194,9 +215,15 @@ int main(int argc, char **argv)
|
||||
case 'c':
|
||||
test->copy = true;
|
||||
continue;
|
||||
case 'e':
|
||||
test->clear_irq = true;
|
||||
continue;
|
||||
case 's':
|
||||
test->size = strtoul(optarg, NULL, 0);
|
||||
continue;
|
||||
case 'd':
|
||||
test->use_dma = true;
|
||||
continue;
|
||||
case 'h':
|
||||
default:
|
||||
usage:
|
||||
@ -208,7 +235,9 @@ int main(int argc, char **argv)
|
||||
"\t-m <msi num> MSI test (msi number between 1..32)\n"
|
||||
"\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n"
|
||||
"\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n"
|
||||
"\t-e Clear IRQ\n"
|
||||
"\t-I Get current IRQ type configured\n"
|
||||
"\t-d Use DMA\n"
|
||||
"\t-l Legacy IRQ test\n"
|
||||
"\t-r Read buffer test\n"
|
||||
"\t-w Write buffer test\n"
|
||||
|
Loading…
Reference in New Issue
Block a user