mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 14:40:53 +07:00
FSL/NXP ARM SoC drivers updates for 4.14
This adds the DPAA QBMan support for ARM SoCs and a few minor fixes/updates. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJZxWAhAAoJEIbcUA77rBVU/jAQAKJ4a8sRMPWzIAK0lAzCzfQS yaHeVoBlzUp7raXziIJPOv+ahIcunh6Fe6+J36fOJy0telggNTqOJZDhPcUXRbIk af3vSYvQCbRmQQbfZ5dwhVnSOB+lSabBODJKIHAVC3MAzuIkfrU3Q7snnOGANh+4 5Ln4R4Z5qZ9cCUPb0keeEEO3qi3Y+Ln3jVWjNxqW7lCyY76gCMtS1X+x4byRdI1P Md5YqBy+jbGqnnT5c+d0ssvLjR5fe+rXNXv+lR3bqC0N7LzKNrlHNrWkAAyQvyKp Sw5onzaS0wBN2ybHvta8XMT4V3GahHVNmLK/tZH6ehichOc6V0MzVFoHtj/vdLmy U8Wdov+Dn1gyKv4bfp+m4bk26BsenCvHlz9Lznhcf/omWHBT/cpCJMcL4x+BpINA rVZ4kuX5qlJleTeu4/OT1hJaQCqYh9PbJydvWE2XrBldy8Pk/kwpeCj7lKiMFLhL fqBAEjoHtwjFyeSRLD3MZUA4dS4JhqGZTrjGeoXA1JruIJL7H34E6IzvIa2uTA1A H+XF+TumFLWV2ELrQ0QZf1cFgf3p/oujYvlcNReYPixm2aevlUwTBDKhiFTQb5ew DTU6SqoaW3N92wE2SDKYpEaVIOCKLbWfTVqxmTCzKG/eHpan4Z7lX7Xv8LAC/vyM faA91WfTza/swKjWXSyW =BpKO -----END PGP SIGNATURE----- Merge tag 'soc-fsl-for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into next/drivers Pull "FSL/NXP ARM SoC drivers updates for 4.14" from Li Yang: This adds the DPAA QBMan support for ARM SoCs and a few minor fixes/updates. This pull request includes updates to the QMAN/BMAN drivers to make them work on the arm/arm64 architectures in addition to the power architecture and a few minor update/bug-fix to the soc/fsl drivers. We got the Reviewed-by from Catalin on the ARM architecture side. DPAA (Data Path Acceleration Architecture) is a set of hardware components used on some FSL/NXP QorIQ Networking SoCs, it provides the infrastructure to support simplified sharing of networking interfaces and accelerators by multiple CPU cores, and the accelerators themselves. The QMan(Queue Manager) and BMan(Buffer Manager) are infrastructural components within the DPAA framework. They are used to manage queues and buffers for various I/O interfaces, hardware accelerators. * tag 'soc-fsl-for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux: soc/fsl/qbman: Enable FSL_LAYERSCAPE config on ARM soc/fsl/qbman: Add missing headers on ARM soc/fsl/qbman: different register offsets on ARM soc/fsl/qbman: add QMAN_REV32 soc/fsl/qbman: Rework portal mapping calls for ARM/PPC soc/fsl/qbman: Fix ARM32 typo soc/fsl/qbman: Drop L1_CACHE_BYTES compile time check soc/fsl/qbman: Drop set/clear_bits usage dt-bindings: soc/fsl: Update reserved memory binding for QBMan soc/fsl/qbman: Use shared-dma-pool for QMan private memory allocations soc/fsl/qbman: Use shared-dma-pool for BMan private memory allocations soc/fsl/qbman: Add common routine for QBMan private allocations soc/fsl/guts: Add compatible string for LS1088 soc/fsl/qman: Sleep instead of stuck hacking jiffies
This commit is contained in:
commit
1c6788e874
@ -65,8 +65,8 @@ to the respective BMan instance
|
||||
BMan Private Memory Node
|
||||
|
||||
BMan requires a contiguous range of physical memory used for the backing store
|
||||
for BMan Free Buffer Proxy Records (FBPR). This memory is reserved/allocated as a
|
||||
node under the /reserved-memory node
|
||||
for BMan Free Buffer Proxy Records (FBPR). This memory is reserved/allocated as
|
||||
a node under the /reserved-memory node.
|
||||
|
||||
The BMan FBPR memory node must be named "bman-fbpr"
|
||||
|
||||
@ -75,7 +75,9 @@ PROPERTIES
|
||||
- compatible
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: Must inclide "fsl,bman-fbpr"
|
||||
Definition: PPC platforms: Must include "fsl,bman-fbpr"
|
||||
ARM platforms: Must include "shared-dma-pool"
|
||||
as well as the "no-map" property
|
||||
|
||||
The following constraints are relevant to the FBPR private memory:
|
||||
- The size must be 2^(size + 1), with size = 11..33. That is 4 KiB to
|
||||
@ -100,10 +102,10 @@ The example below shows a BMan FBPR dynamic allocation memory node
|
||||
ranges;
|
||||
|
||||
bman_fbpr: bman-fbpr {
|
||||
compatible = "fsl,bman-fbpr";
|
||||
alloc-ranges = <0 0 0x10 0>;
|
||||
compatible = "shared-mem-pool";
|
||||
size = <0 0x1000000>;
|
||||
alignment = <0 0x1000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -60,6 +60,12 @@ are located at offsets 0xbf8 and 0xbfc
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: Reference input clock. Its frequency is half of the
|
||||
platform clock
|
||||
- memory-regions
|
||||
Usage: Required for ARM
|
||||
Value type: <phandle array>
|
||||
Definition: List of phandles referencing the QMan private memory
|
||||
nodes (described below). The qman-fqd node must be
|
||||
first followed by qman-pfdr node. Only used on ARM
|
||||
|
||||
Devices connected to a QMan instance via Direct Connect Portals (DCP) must link
|
||||
to the respective QMan instance
|
||||
@ -74,7 +80,9 @@ QMan Private Memory Nodes
|
||||
|
||||
QMan requires two contiguous range of physical memory used for the backing store
|
||||
for QMan Frame Queue Descriptor (FQD) and Packed Frame Descriptor Record (PFDR).
|
||||
This memory is reserved/allocated as a nodes under the /reserved-memory node
|
||||
This memory is reserved/allocated as a node under the /reserved-memory node.
|
||||
|
||||
For additional details about reserved memory regions see reserved-memory.txt
|
||||
|
||||
The QMan FQD memory node must be named "qman-fqd"
|
||||
|
||||
@ -83,7 +91,9 @@ PROPERTIES
|
||||
- compatible
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: Must inclide "fsl,qman-fqd"
|
||||
Definition: PPC platforms: Must include "fsl,qman-fqd"
|
||||
ARM platforms: Must include "shared-dma-pool"
|
||||
as well as the "no-map" property
|
||||
|
||||
The QMan PFDR memory node must be named "qman-pfdr"
|
||||
|
||||
@ -92,7 +102,9 @@ PROPERTIES
|
||||
- compatible
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: Must inclide "fsl,qman-pfdr"
|
||||
Definition: PPC platforms: Must include "fsl,qman-pfdr"
|
||||
ARM platforms: Must include "shared-dma-pool"
|
||||
as well as the "no-map" property
|
||||
|
||||
The following constraints are relevant to the FQD and PFDR private memory:
|
||||
- The size must be 2^(size + 1), with size = 11..29. That is 4 KiB to
|
||||
@ -117,16 +129,16 @@ The example below shows a QMan FQD and a PFDR dynamic allocation memory nodes
|
||||
ranges;
|
||||
|
||||
qman_fqd: qman-fqd {
|
||||
compatible = "fsl,qman-fqd";
|
||||
alloc-ranges = <0 0 0x10 0>;
|
||||
compatible = "shared-dma-pool";
|
||||
size = <0 0x400000>;
|
||||
alignment = <0 0x400000>;
|
||||
no-map;
|
||||
};
|
||||
qman_pfdr: qman-pfdr {
|
||||
compatible = "fsl,qman-pfdr";
|
||||
alloc-ranges = <0 0 0x10 0>;
|
||||
compatible = "shared-dma-pool";
|
||||
size = <0 0x2000000>;
|
||||
alignment = <0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -213,6 +213,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
|
||||
{ .compatible = "fsl,ls1021a-dcfg", },
|
||||
{ .compatible = "fsl,ls1043a-dcfg", },
|
||||
{ .compatible = "fsl,ls2080a-dcfg", },
|
||||
{ .compatible = "fsl,ls1088a-dcfg", },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
|
||||
|
@ -1,6 +1,6 @@
|
||||
menuconfig FSL_DPAA
|
||||
bool "Freescale DPAA 1.x support"
|
||||
depends on FSL_SOC_BOOKE
|
||||
depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
|
||||
select GENERIC_ALLOCATOR
|
||||
help
|
||||
The Freescale Data Path Acceleration Architecture (DPAA) is a set of
|
||||
|
@ -1,6 +1,6 @@
|
||||
obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
|
||||
bman_portal.o qman_portal.o \
|
||||
bman.o qman.o
|
||||
bman.o qman.o dpaa_sys.o
|
||||
|
||||
obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
|
||||
bman-test-y = bman_test.o
|
||||
|
@ -35,6 +35,27 @@
|
||||
|
||||
/* Portal register assists */
|
||||
|
||||
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
|
||||
/* Cache-inhibited register offsets */
|
||||
#define BM_REG_RCR_PI_CINH 0x3000
|
||||
#define BM_REG_RCR_CI_CINH 0x3100
|
||||
#define BM_REG_RCR_ITR 0x3200
|
||||
#define BM_REG_CFG 0x3300
|
||||
#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
|
||||
#define BM_REG_ISR 0x3e00
|
||||
#define BM_REG_IER 0x3e40
|
||||
#define BM_REG_ISDR 0x3e80
|
||||
#define BM_REG_IIR 0x3ec0
|
||||
|
||||
/* Cache-enabled register offsets */
|
||||
#define BM_CL_CR 0x0000
|
||||
#define BM_CL_RR0 0x0100
|
||||
#define BM_CL_RR1 0x0140
|
||||
#define BM_CL_RCR 0x1000
|
||||
#define BM_CL_RCR_PI_CENA 0x3000
|
||||
#define BM_CL_RCR_CI_CENA 0x3100
|
||||
|
||||
#else
|
||||
/* Cache-inhibited register offsets */
|
||||
#define BM_REG_RCR_PI_CINH 0x0000
|
||||
#define BM_REG_RCR_CI_CINH 0x0004
|
||||
@ -53,6 +74,7 @@
|
||||
#define BM_CL_RCR 0x1000
|
||||
#define BM_CL_RCR_PI_CENA 0x3000
|
||||
#define BM_CL_RCR_CI_CENA 0x3100
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Portal modes.
|
||||
@ -154,7 +176,8 @@ struct bm_mc {
|
||||
};
|
||||
|
||||
struct bm_addr {
|
||||
void __iomem *ce; /* cache-enabled */
|
||||
void *ce; /* cache-enabled */
|
||||
__be32 *ce_be; /* Same as above but for direct access */
|
||||
void __iomem *ci; /* cache-inhibited */
|
||||
};
|
||||
|
||||
@ -167,12 +190,12 @@ struct bm_portal {
|
||||
/* Cache-inhibited register access. */
|
||||
static inline u32 bm_in(struct bm_portal *p, u32 offset)
|
||||
{
|
||||
return be32_to_cpu(__raw_readl(p->addr.ci + offset));
|
||||
return ioread32be(p->addr.ci + offset);
|
||||
}
|
||||
|
||||
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
|
||||
{
|
||||
__raw_writel(cpu_to_be32(val), p->addr.ci + offset);
|
||||
iowrite32be(val, p->addr.ci + offset);
|
||||
}
|
||||
|
||||
/* Cache Enabled Portal Access */
|
||||
@ -188,7 +211,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
|
||||
|
||||
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
|
||||
{
|
||||
return be32_to_cpu(__raw_readl(p->addr.ce + offset));
|
||||
return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
|
||||
}
|
||||
|
||||
struct bman_portal {
|
||||
@ -408,7 +431,7 @@ static int bm_mc_init(struct bm_portal *portal)
|
||||
|
||||
mc->cr = portal->addr.ce + BM_CL_CR;
|
||||
mc->rr = portal->addr.ce + BM_CL_RR0;
|
||||
mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
|
||||
mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
|
||||
0 : 1;
|
||||
mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
|
||||
#ifdef CONFIG_FSL_DPAA_CHECKING
|
||||
@ -466,7 +489,7 @@ static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
|
||||
* its command is submitted and completed. This includes the valid-bit,
|
||||
* in case you were wondering...
|
||||
*/
|
||||
if (!__raw_readb(&rr->verb)) {
|
||||
if (!rr->verb) {
|
||||
dpaa_invalidate_touch_ro(rr);
|
||||
return NULL;
|
||||
}
|
||||
@ -512,8 +535,9 @@ static int bman_create_portal(struct bman_portal *portal,
|
||||
* config, everything that follows depends on it and "config" is more
|
||||
* for (de)reference...
|
||||
*/
|
||||
p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
|
||||
p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
|
||||
p->addr.ce = c->addr_virt_ce;
|
||||
p->addr.ce_be = c->addr_virt_ce;
|
||||
p->addr.ci = c->addr_virt_ci;
|
||||
if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
|
||||
dev_err(c->dev, "RCR initialisation failed\n");
|
||||
goto fail_rcr;
|
||||
@ -607,7 +631,7 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
|
||||
unsigned long irqflags;
|
||||
|
||||
local_irq_save(irqflags);
|
||||
set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
|
||||
p->irq_sources |= bits & BM_PIRQ_VISIBLE;
|
||||
bm_out(&p->p, BM_REG_IER, p->irq_sources);
|
||||
local_irq_restore(irqflags);
|
||||
return 0;
|
||||
|
@ -201,6 +201,21 @@ static int fsl_bman_probe(struct platform_device *pdev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* If FBPR memory wasn't defined using the qbman compatible string
|
||||
* try using the of_reserved_mem_device method
|
||||
*/
|
||||
if (!fbpr_a) {
|
||||
ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
|
||||
if (ret) {
|
||||
dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
|
||||
ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
|
||||
|
||||
bm_set_memory(fbpr_a, fbpr_sz);
|
||||
|
||||
err_irq = platform_get_irq(pdev, 0);
|
||||
|
@ -91,7 +91,6 @@ static int bman_portal_probe(struct platform_device *pdev)
|
||||
struct device_node *node = dev->of_node;
|
||||
struct bm_portal_config *pcfg;
|
||||
struct resource *addr_phys[2];
|
||||
void __iomem *va;
|
||||
int irq, cpu;
|
||||
|
||||
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
|
||||
@ -123,23 +122,21 @@ static int bman_portal_probe(struct platform_device *pdev)
|
||||
}
|
||||
pcfg->irq = irq;
|
||||
|
||||
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
|
||||
if (!va) {
|
||||
dev_err(dev, "ioremap::CE failed\n");
|
||||
pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
|
||||
resource_size(addr_phys[0]),
|
||||
QBMAN_MEMREMAP_ATTR);
|
||||
if (!pcfg->addr_virt_ce) {
|
||||
dev_err(dev, "memremap::CE failed\n");
|
||||
goto err_ioremap1;
|
||||
}
|
||||
|
||||
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
|
||||
|
||||
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
|
||||
_PAGE_GUARDED | _PAGE_NO_CACHE);
|
||||
if (!va) {
|
||||
pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
|
||||
resource_size(addr_phys[1]));
|
||||
if (!pcfg->addr_virt_ci) {
|
||||
dev_err(dev, "ioremap::CI failed\n");
|
||||
goto err_ioremap2;
|
||||
}
|
||||
|
||||
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
|
||||
|
||||
spin_lock(&bman_lock);
|
||||
cpu = cpumask_next_zero(-1, &portal_cpus);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
@ -164,9 +161,9 @@ static int bman_portal_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
err_portal_init:
|
||||
iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
|
||||
iounmap(pcfg->addr_virt_ci);
|
||||
err_ioremap2:
|
||||
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
|
||||
memunmap(pcfg->addr_virt_ce);
|
||||
err_ioremap1:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -46,11 +46,9 @@ extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
|
||||
extern struct gen_pool *bm_bpalloc;
|
||||
|
||||
struct bm_portal_config {
|
||||
/*
|
||||
* Corenet portal addresses;
|
||||
* [0]==cache-enabled, [1]==cache-inhibited.
|
||||
*/
|
||||
void __iomem *addr_virt[2];
|
||||
/* Portal addresses */
|
||||
void *addr_virt_ce;
|
||||
void __iomem *addr_virt_ci;
|
||||
/* Allow these to be joined in lists */
|
||||
struct list_head list;
|
||||
struct device *dev;
|
||||
|
78
drivers/soc/fsl/qbman/dpaa_sys.c
Normal file
78
drivers/soc/fsl/qbman/dpaa_sys.c
Normal file
@ -0,0 +1,78 @@
|
||||
/* Copyright 2017 NXP Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of NXP Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include "dpaa_sys.h"
|
||||
|
||||
/*
|
||||
* Initialize a devices private memory region
|
||||
*/
|
||||
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
|
||||
size_t *size)
|
||||
{
|
||||
int ret;
|
||||
struct device_node *mem_node;
|
||||
u64 size64;
|
||||
|
||||
ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
|
||||
idx, ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
|
||||
if (mem_node) {
|
||||
ret = of_property_read_u64(mem_node, "size", &size64);
|
||||
if (ret) {
|
||||
dev_err(dev, "of_address_to_resource fails 0x%x\n",
|
||||
ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
*size = size64;
|
||||
} else {
|
||||
dev_err(dev, "No memory-region found for index %d\n", idx);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!dma_zalloc_coherent(dev, *size, addr, 0)) {
|
||||
dev_err(dev, "DMA Alloc memory failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disassociate the reserved memory area from the device
|
||||
* because a device can only have one DMA memory area. This
|
||||
* should be fine since the memory is allocated and initialized
|
||||
* and only ever accessed by the QBMan device from now on
|
||||
*/
|
||||
of_reserved_mem_device_release(dev);
|
||||
return 0;
|
||||
}
|
@ -44,23 +44,21 @@
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
|
||||
#define DPAA_PORTAL_CE 0
|
||||
#define DPAA_PORTAL_CI 1
|
||||
|
||||
#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
|
||||
#error "Unsupported Cacheline Size"
|
||||
#endif
|
||||
|
||||
static inline void dpaa_flush(void *p)
|
||||
{
|
||||
/*
|
||||
* Only PPC needs to flush the cache currently - on ARM the mapping
|
||||
* is non cacheable
|
||||
*/
|
||||
#ifdef CONFIG_PPC
|
||||
flush_dcache_range((unsigned long)p, (unsigned long)p+64);
|
||||
#elif defined(CONFIG_ARM32)
|
||||
__cpuc_flush_dcache_area(p, 64);
|
||||
#elif defined(CONFIG_ARM64)
|
||||
__flush_dcache_area(p, 64);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -102,4 +100,15 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
|
||||
/* Offset applied to genalloc pools due to zero being an error return */
|
||||
#define DPAA_GENALLOC_OFF 0x80000000
|
||||
|
||||
/* Initialize the devices private memory region */
|
||||
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
|
||||
size_t *size);
|
||||
|
||||
/* memremap() attributes for different platforms */
|
||||
#ifdef CONFIG_PPC
|
||||
#define QBMAN_MEMREMAP_ATTR MEMREMAP_WB
|
||||
#else
|
||||
#define QBMAN_MEMREMAP_ATTR MEMREMAP_WC
|
||||
#endif
|
||||
|
||||
#endif /* __DPAA_SYS_H */
|
||||
|
@ -41,6 +41,43 @@
|
||||
|
||||
/* Portal register assists */
|
||||
|
||||
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
|
||||
/* Cache-inhibited register offsets */
|
||||
#define QM_REG_EQCR_PI_CINH 0x3000
|
||||
#define QM_REG_EQCR_CI_CINH 0x3040
|
||||
#define QM_REG_EQCR_ITR 0x3080
|
||||
#define QM_REG_DQRR_PI_CINH 0x3100
|
||||
#define QM_REG_DQRR_CI_CINH 0x3140
|
||||
#define QM_REG_DQRR_ITR 0x3180
|
||||
#define QM_REG_DQRR_DCAP 0x31C0
|
||||
#define QM_REG_DQRR_SDQCR 0x3200
|
||||
#define QM_REG_DQRR_VDQCR 0x3240
|
||||
#define QM_REG_DQRR_PDQCR 0x3280
|
||||
#define QM_REG_MR_PI_CINH 0x3300
|
||||
#define QM_REG_MR_CI_CINH 0x3340
|
||||
#define QM_REG_MR_ITR 0x3380
|
||||
#define QM_REG_CFG 0x3500
|
||||
#define QM_REG_ISR 0x3600
|
||||
#define QM_REG_IER 0x3640
|
||||
#define QM_REG_ISDR 0x3680
|
||||
#define QM_REG_IIR 0x36C0
|
||||
#define QM_REG_ITPR 0x3740
|
||||
|
||||
/* Cache-enabled register offsets */
|
||||
#define QM_CL_EQCR 0x0000
|
||||
#define QM_CL_DQRR 0x1000
|
||||
#define QM_CL_MR 0x2000
|
||||
#define QM_CL_EQCR_PI_CENA 0x3000
|
||||
#define QM_CL_EQCR_CI_CENA 0x3040
|
||||
#define QM_CL_DQRR_PI_CENA 0x3100
|
||||
#define QM_CL_DQRR_CI_CENA 0x3140
|
||||
#define QM_CL_MR_PI_CENA 0x3300
|
||||
#define QM_CL_MR_CI_CENA 0x3340
|
||||
#define QM_CL_CR 0x3800
|
||||
#define QM_CL_RR0 0x3900
|
||||
#define QM_CL_RR1 0x3940
|
||||
|
||||
#else
|
||||
/* Cache-inhibited register offsets */
|
||||
#define QM_REG_EQCR_PI_CINH 0x0000
|
||||
#define QM_REG_EQCR_CI_CINH 0x0004
|
||||
@ -75,6 +112,7 @@
|
||||
#define QM_CL_CR 0x3800
|
||||
#define QM_CL_RR0 0x3900
|
||||
#define QM_CL_RR1 0x3940
|
||||
#endif
|
||||
|
||||
/*
|
||||
* BTW, the drivers (and h/w programming model) already obtain the required
|
||||
@ -300,7 +338,8 @@ struct qm_mc {
|
||||
};
|
||||
|
||||
struct qm_addr {
|
||||
void __iomem *ce; /* cache-enabled */
|
||||
void *ce; /* cache-enabled */
|
||||
__be32 *ce_be; /* same value as above but for direct access */
|
||||
void __iomem *ci; /* cache-inhibited */
|
||||
};
|
||||
|
||||
@ -321,12 +360,12 @@ struct qm_portal {
|
||||
/* Cache-inhibited register access. */
|
||||
static inline u32 qm_in(struct qm_portal *p, u32 offset)
|
||||
{
|
||||
return be32_to_cpu(__raw_readl(p->addr.ci + offset));
|
||||
return ioread32be(p->addr.ci + offset);
|
||||
}
|
||||
|
||||
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
|
||||
{
|
||||
__raw_writel(cpu_to_be32(val), p->addr.ci + offset);
|
||||
iowrite32be(val, p->addr.ci + offset);
|
||||
}
|
||||
|
||||
/* Cache Enabled Portal Access */
|
||||
@ -342,7 +381,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
|
||||
|
||||
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
|
||||
{
|
||||
return be32_to_cpu(__raw_readl(p->addr.ce + offset));
|
||||
return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
|
||||
}
|
||||
|
||||
/* --- EQCR API --- */
|
||||
@ -646,11 +685,7 @@ static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
|
||||
*/
|
||||
dpaa_invalidate_touch_ro(res);
|
||||
#endif
|
||||
/*
|
||||
* when accessing 'verb', use __raw_readb() to ensure that compiler
|
||||
* inlining doesn't try to optimise out "excess reads".
|
||||
*/
|
||||
if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
|
||||
if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
|
||||
dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
|
||||
if (!dqrr->pi)
|
||||
dqrr->vbit ^= QM_DQRR_VERB_VBIT;
|
||||
@ -777,11 +812,8 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
|
||||
union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
|
||||
|
||||
DPAA_ASSERT(mr->pmode == qm_mr_pvb);
|
||||
/*
|
||||
* when accessing 'verb', use __raw_readb() to ensure that compiler
|
||||
* inlining doesn't try to optimise out "excess reads".
|
||||
*/
|
||||
if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
|
||||
|
||||
if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
|
||||
mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
|
||||
if (!mr->pi)
|
||||
mr->vbit ^= QM_MR_VERB_VBIT;
|
||||
@ -822,7 +854,7 @@ static inline int qm_mc_init(struct qm_portal *portal)
|
||||
|
||||
mc->cr = portal->addr.ce + QM_CL_CR;
|
||||
mc->rr = portal->addr.ce + QM_CL_RR0;
|
||||
mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
|
||||
mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT)
|
||||
? 0 : 1;
|
||||
mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
|
||||
#ifdef CONFIG_FSL_DPAA_CHECKING
|
||||
@ -880,7 +912,7 @@ static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
|
||||
* its command is submitted and completed. This includes the valid-bit,
|
||||
* in case you were wondering...
|
||||
*/
|
||||
if (!__raw_readb(&rr->verb)) {
|
||||
if (!rr->verb) {
|
||||
dpaa_invalidate_touch_ro(rr);
|
||||
return NULL;
|
||||
}
|
||||
@ -909,12 +941,12 @@ static inline int qm_mc_result_timeout(struct qm_portal *portal,
|
||||
|
||||
static inline void fq_set(struct qman_fq *fq, u32 mask)
|
||||
{
|
||||
set_bits(mask, &fq->flags);
|
||||
fq->flags |= mask;
|
||||
}
|
||||
|
||||
static inline void fq_clear(struct qman_fq *fq, u32 mask)
|
||||
{
|
||||
clear_bits(mask, &fq->flags);
|
||||
fq->flags &= ~mask;
|
||||
}
|
||||
|
||||
static inline int fq_isset(struct qman_fq *fq, u32 mask)
|
||||
@ -1084,11 +1116,7 @@ static int drain_mr_fqrni(struct qm_portal *p)
|
||||
* entries well before the ring has been fully consumed, so
|
||||
* we're being *really* paranoid here.
|
||||
*/
|
||||
u64 now, then = jiffies;
|
||||
|
||||
do {
|
||||
now = jiffies;
|
||||
} while ((then + 10000) > now);
|
||||
msleep(1);
|
||||
msg = qm_mr_current(p);
|
||||
if (!msg)
|
||||
return 0;
|
||||
@ -1124,8 +1152,9 @@ static int qman_create_portal(struct qman_portal *portal,
|
||||
* config, everything that follows depends on it and "config" is more
|
||||
* for (de)reference
|
||||
*/
|
||||
p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
|
||||
p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
|
||||
p->addr.ce = c->addr_virt_ce;
|
||||
p->addr.ce_be = c->addr_virt_ce;
|
||||
p->addr.ci = c->addr_virt_ci;
|
||||
/*
|
||||
* If CI-stashing is used, the current defaults use a threshold of 3,
|
||||
* and stash with high-than-DQRR priority.
|
||||
@ -1566,7 +1595,7 @@ void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
|
||||
unsigned long irqflags;
|
||||
|
||||
local_irq_save(irqflags);
|
||||
set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
|
||||
p->irq_sources |= bits & QM_PIRQ_VISIBLE;
|
||||
qm_out(&p->p, QM_REG_IER, p->irq_sources);
|
||||
local_irq_restore(irqflags);
|
||||
}
|
||||
@ -1589,7 +1618,7 @@ void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
|
||||
*/
|
||||
local_irq_save(irqflags);
|
||||
bits &= QM_PIRQ_VISIBLE;
|
||||
clear_bits(bits, &p->irq_sources);
|
||||
p->irq_sources &= ~bits;
|
||||
qm_out(&p->p, QM_REG_IER, p->irq_sources);
|
||||
ier = qm_in(&p->p, QM_REG_IER);
|
||||
/*
|
||||
|
@ -401,21 +401,42 @@ static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ideally we would use the DMA API to turn rmem->base into a DMA address
|
||||
* (especially if iommu translations ever get involved). Unfortunately, the
|
||||
* DMA API currently does not allow mapping anything that is not backed with
|
||||
* a struct page.
|
||||
* QMan needs two global memory areas initialized at boot time:
|
||||
* 1) FQD: Frame Queue Descriptors used to manage frame queues
|
||||
* 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
|
||||
* Both areas are reserved using the device tree reserved memory framework
|
||||
* and the addresses and sizes are initialized when the QMan device is probed
|
||||
*/
|
||||
static dma_addr_t fqd_a, pfdr_a;
|
||||
static size_t fqd_sz, pfdr_sz;
|
||||
|
||||
#ifdef CONFIG_PPC
|
||||
/*
|
||||
* Support for PPC Device Tree backward compatibility when compatible
|
||||
* string is set to fsl-qman-fqd and fsl-qman-pfdr
|
||||
*/
|
||||
static int zero_priv_mem(phys_addr_t addr, size_t sz)
|
||||
{
|
||||
/* map as cacheable, non-guarded */
|
||||
void __iomem *tmpp = ioremap_prot(addr, sz, 0);
|
||||
|
||||
if (!tmpp)
|
||||
return -ENOMEM;
|
||||
|
||||
memset_io(tmpp, 0, sz);
|
||||
flush_dcache_range((unsigned long)tmpp,
|
||||
(unsigned long)tmpp + sz);
|
||||
iounmap(tmpp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qman_fqd(struct reserved_mem *rmem)
|
||||
{
|
||||
fqd_a = rmem->base;
|
||||
fqd_sz = rmem->size;
|
||||
|
||||
WARN_ON(!(fqd_a && fqd_sz));
|
||||
|
||||
return 0;
|
||||
}
|
||||
RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
|
||||
@ -431,32 +452,13 @@ static int qman_pfdr(struct reserved_mem *rmem)
|
||||
}
|
||||
RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
|
||||
|
||||
#endif
|
||||
|
||||
static unsigned int qm_get_fqid_maxcnt(void)
|
||||
{
|
||||
return fqd_sz / 64;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush this memory range from data cache so that QMAN originated
|
||||
* transactions for this memory region could be marked non-coherent.
|
||||
*/
|
||||
static int zero_priv_mem(struct device *dev, struct device_node *node,
|
||||
phys_addr_t addr, size_t sz)
|
||||
{
|
||||
/* map as cacheable, non-guarded */
|
||||
void __iomem *tmpp = ioremap_prot(addr, sz, 0);
|
||||
|
||||
if (!tmpp)
|
||||
return -ENOMEM;
|
||||
|
||||
memset_io(tmpp, 0, sz);
|
||||
flush_dcache_range((unsigned long)tmpp,
|
||||
(unsigned long)tmpp + sz);
|
||||
iounmap(tmpp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void log_edata_bits(struct device *dev, u32 bit_count)
|
||||
{
|
||||
u32 i, j, mask = 0xffffffff;
|
||||
@ -717,6 +719,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
|
||||
qman_ip_rev = QMAN_REV30;
|
||||
else if (major == 3 && minor == 1)
|
||||
qman_ip_rev = QMAN_REV31;
|
||||
else if (major == 3 && minor == 2)
|
||||
qman_ip_rev = QMAN_REV32;
|
||||
else {
|
||||
dev_err(dev, "Unknown QMan version\n");
|
||||
return -ENODEV;
|
||||
@ -727,10 +731,41 @@ static int fsl_qman_probe(struct platform_device *pdev)
|
||||
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
|
||||
}
|
||||
|
||||
ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
|
||||
WARN_ON(ret);
|
||||
if (ret)
|
||||
return -ENODEV;
|
||||
if (fqd_a) {
|
||||
#ifdef CONFIG_PPC
|
||||
/*
|
||||
* For PPC backward DT compatibility
|
||||
* FQD memory MUST be zero'd by software
|
||||
*/
|
||||
zero_priv_mem(fqd_a, fqd_sz);
|
||||
#else
|
||||
WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
|
||||
#endif
|
||||
} else {
|
||||
/*
|
||||
* Order of memory regions is assumed as FQD followed by PFDR
|
||||
* in order to ensure allocations from the correct regions the
|
||||
* driver initializes then allocates each piece in order
|
||||
*/
|
||||
ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
|
||||
if (ret) {
|
||||
dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
|
||||
ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
|
||||
|
||||
if (!pfdr_a) {
|
||||
/* Setup PFDR memory */
|
||||
ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
|
||||
if (ret) {
|
||||
dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
|
||||
ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
|
||||
|
||||
ret = qman_init_ccsr(dev);
|
||||
if (ret) {
|
||||
|
@ -224,7 +224,6 @@ static int qman_portal_probe(struct platform_device *pdev)
|
||||
struct device_node *node = dev->of_node;
|
||||
struct qm_portal_config *pcfg;
|
||||
struct resource *addr_phys[2];
|
||||
void __iomem *va;
|
||||
int irq, cpu, err;
|
||||
u32 val;
|
||||
|
||||
@ -262,23 +261,21 @@ static int qman_portal_probe(struct platform_device *pdev)
|
||||
}
|
||||
pcfg->irq = irq;
|
||||
|
||||
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
|
||||
if (!va) {
|
||||
dev_err(dev, "ioremap::CE failed\n");
|
||||
pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
|
||||
resource_size(addr_phys[0]),
|
||||
QBMAN_MEMREMAP_ATTR);
|
||||
if (!pcfg->addr_virt_ce) {
|
||||
dev_err(dev, "memremap::CE failed\n");
|
||||
goto err_ioremap1;
|
||||
}
|
||||
|
||||
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
|
||||
|
||||
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
|
||||
_PAGE_GUARDED | _PAGE_NO_CACHE);
|
||||
if (!va) {
|
||||
pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
|
||||
resource_size(addr_phys[1]));
|
||||
if (!pcfg->addr_virt_ci) {
|
||||
dev_err(dev, "ioremap::CI failed\n");
|
||||
goto err_ioremap2;
|
||||
}
|
||||
|
||||
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
|
||||
|
||||
pcfg->pools = qm_get_pools_sdqcr();
|
||||
|
||||
spin_lock(&qman_lock);
|
||||
@ -310,9 +307,9 @@ static int qman_portal_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
err_portal_init:
|
||||
iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
|
||||
iounmap(pcfg->addr_virt_ci);
|
||||
err_ioremap2:
|
||||
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
|
||||
memunmap(pcfg->addr_virt_ce);
|
||||
err_ioremap1:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -28,8 +28,6 @@
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include "dpaa_sys.h"
|
||||
|
||||
#include <soc/fsl/qman.h>
|
||||
@ -155,11 +153,9 @@ static inline void qman_cgrs_xor(struct qman_cgrs *dest,
|
||||
void qman_init_cgr_all(void);
|
||||
|
||||
struct qm_portal_config {
|
||||
/*
|
||||
* Corenet portal addresses;
|
||||
* [0]==cache-enabled, [1]==cache-inhibited.
|
||||
*/
|
||||
void __iomem *addr_virt[2];
|
||||
/* Portal addresses */
|
||||
void *addr_virt_ce;
|
||||
void __iomem *addr_virt_ci;
|
||||
struct device *dev;
|
||||
struct iommu_domain *iommu_domain;
|
||||
/* Allow these to be joined in lists */
|
||||
@ -187,6 +183,7 @@ struct qm_portal_config {
|
||||
#define QMAN_REV20 0x0200
|
||||
#define QMAN_REV30 0x0300
|
||||
#define QMAN_REV31 0x0301
|
||||
#define QMAN_REV32 0x0302
|
||||
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
|
||||
|
||||
#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
|
||||
|
@ -30,7 +30,5 @@
|
||||
|
||||
#include "qman_priv.h"
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
int qman_test_stash(void);
|
||||
int qman_test_api(void);
|
||||
|
Loading…
Reference in New Issue
Block a user