2019-06-04 15:11:33 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2009-01-26 20:13:40 +07:00
|
|
|
/*
|
|
|
|
* omap iommu: tlb and pagetable primitives
|
|
|
|
*
|
2010-02-16 01:03:32 +07:00
|
|
|
* Copyright (C) 2008-2010 Nokia Corporation
|
2017-09-06 05:56:18 +07:00
|
|
|
* Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
|
2009-01-26 20:13:40 +07:00
|
|
|
*
|
|
|
|
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
|
|
|
|
* Paul Mundt and Toshihiro Kobayashi
|
|
|
|
*/
|
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
#include <linux/dma-mapping.h>
|
2009-01-26 20:13:40 +07:00
|
|
|
#include <linux/err.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2009-01-26 20:13:40 +07:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/platform_device.h>
|
2011-06-02 05:46:12 +07:00
|
|
|
#include <linux/iommu.h>
|
2012-11-03 02:24:03 +07:00
|
|
|
#include <linux/omap-iommu.h>
|
2011-06-02 05:46:12 +07:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/spinlock.h>
|
2012-11-03 02:24:06 +07:00
|
|
|
#include <linux/io.h>
|
2012-11-20 08:05:51 +07:00
|
|
|
#include <linux/pm_runtime.h>
|
2014-03-01 03:42:36 +07:00
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_iommu.h>
|
|
|
|
#include <linux/of_irq.h>
|
2014-09-05 05:27:30 +07:00
|
|
|
#include <linux/of_platform.h>
|
2015-10-03 06:02:44 +07:00
|
|
|
#include <linux/regmap.h>
|
|
|
|
#include <linux/mfd/syscon.h>
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2012-11-03 02:24:14 +07:00
|
|
|
#include <linux/platform_data/iommu-omap.h>
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2012-11-03 02:24:00 +07:00
|
|
|
#include "omap-iopgtable.h"
|
2012-11-03 02:24:06 +07:00
|
|
|
#include "omap-iommu.h"
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2017-04-12 12:21:30 +07:00
|
|
|
static const struct iommu_ops omap_iommu_ops;
|
|
|
|
|
2019-08-07 15:26:51 +07:00
|
|
|
struct orphan_dev {
|
|
|
|
struct device *dev;
|
|
|
|
struct list_head node;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(orphan_dev_list);
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(orphan_lock);
|
|
|
|
|
2019-04-23 14:50:08 +07:00
|
|
|
#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
|
2014-03-18 08:31:34 +07:00
|
|
|
|
2011-11-10 16:32:27 +07:00
|
|
|
/* bitmap of the page sizes currently supported */
|
|
|
|
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
|
|
|
|
|
2012-11-03 02:24:09 +07:00
|
|
|
#define MMU_LOCK_BASE_SHIFT 10
|
|
|
|
#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
|
|
|
|
#define MMU_LOCK_BASE(x) \
|
|
|
|
((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
|
|
|
|
|
|
|
|
#define MMU_LOCK_VICT_SHIFT 4
|
|
|
|
#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
|
|
|
|
#define MMU_LOCK_VICT(x) \
|
|
|
|
((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
static struct platform_driver omap_iommu_driver;
|
|
|
|
static struct kmem_cache *iopte_cachep;
|
|
|
|
|
2019-08-07 15:26:51 +07:00
|
|
|
static int _omap_iommu_add_device(struct device *dev);
|
|
|
|
|
2015-03-26 19:43:09 +07:00
|
|
|
/**
|
|
|
|
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
|
|
|
|
* @dom: generic iommu domain handle
|
|
|
|
**/
|
|
|
|
static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
|
|
|
|
{
|
|
|
|
return container_of(dom, struct omap_iommu_domain, domain);
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
/**
|
2011-08-18 02:57:56 +07:00
|
|
|
* omap_iommu_save_ctx - Save registers for pm off-mode support
|
2011-10-11 05:18:33 +07:00
|
|
|
* @dev: client device
|
2019-08-07 15:26:49 +07:00
|
|
|
*
|
|
|
|
* This should be treated as an deprecated API. It is preserved only
|
|
|
|
* to maintain existing functionality for OMAP3 ISP driver.
|
2009-01-26 20:13:40 +07:00
|
|
|
**/
|
2011-10-11 05:18:33 +07:00
|
|
|
void omap_iommu_save_ctx(struct device *dev)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2017-09-06 05:56:18 +07:00
|
|
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
|
|
|
struct omap_iommu *obj;
|
|
|
|
u32 *p;
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
int i;
|
2011-10-11 05:18:33 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
if (!arch_data)
|
|
|
|
return;
|
|
|
|
|
|
|
|
while (arch_data->iommu_dev) {
|
|
|
|
obj = arch_data->iommu_dev;
|
|
|
|
p = obj->ctx;
|
|
|
|
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
|
|
|
|
p[i] = iommu_read_reg(obj, i * sizeof(u32));
|
|
|
|
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
|
|
|
|
p[i]);
|
|
|
|
}
|
|
|
|
arch_data++;
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
}
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
2011-08-18 02:57:56 +07:00
|
|
|
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
/**
|
2011-08-18 02:57:56 +07:00
|
|
|
* omap_iommu_restore_ctx - Restore registers for pm off-mode support
|
2011-10-11 05:18:33 +07:00
|
|
|
* @dev: client device
|
2019-08-07 15:26:49 +07:00
|
|
|
*
|
|
|
|
* This should be treated as an deprecated API. It is preserved only
|
|
|
|
* to maintain existing functionality for OMAP3 ISP driver.
|
2009-01-26 20:13:40 +07:00
|
|
|
**/
|
2011-10-11 05:18:33 +07:00
|
|
|
void omap_iommu_restore_ctx(struct device *dev)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2017-09-06 05:56:18 +07:00
|
|
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
|
|
|
struct omap_iommu *obj;
|
|
|
|
u32 *p;
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
int i;
|
2011-10-11 05:18:33 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
if (!arch_data)
|
|
|
|
return;
|
|
|
|
|
|
|
|
while (arch_data->iommu_dev) {
|
|
|
|
obj = arch_data->iommu_dev;
|
|
|
|
p = obj->ctx;
|
|
|
|
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
|
|
|
|
iommu_write_reg(obj, p[i], i * sizeof(u32));
|
|
|
|
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
|
|
|
|
p[i]);
|
|
|
|
}
|
|
|
|
arch_data++;
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
}
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
2011-08-18 02:57:56 +07:00
|
|
|
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2015-10-03 06:02:44 +07:00
|
|
|
static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
|
|
|
|
{
|
|
|
|
u32 val, mask;
|
|
|
|
|
|
|
|
if (!obj->syscfg)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
|
|
|
|
val = enable ? mask : 0;
|
|
|
|
regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
|
|
|
|
}
|
|
|
|
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
static void __iommu_set_twl(struct omap_iommu *obj, bool on)
|
|
|
|
{
|
|
|
|
u32 l = iommu_read_reg(obj, MMU_CNTL);
|
|
|
|
|
|
|
|
if (on)
|
|
|
|
iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
|
|
|
|
else
|
|
|
|
iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
|
|
|
|
|
|
|
|
l &= ~MMU_CNTL_MASK;
|
|
|
|
if (on)
|
|
|
|
l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
|
|
|
|
else
|
|
|
|
l |= (MMU_CNTL_MMU_EN);
|
|
|
|
|
|
|
|
iommu_write_reg(obj, l, MMU_CNTL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int omap2_iommu_enable(struct omap_iommu *obj)
|
|
|
|
{
|
|
|
|
u32 l, pa;
|
|
|
|
|
|
|
|
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pa = virt_to_phys(obj->iopgd);
|
|
|
|
if (!IS_ALIGNED(pa, SZ_16K))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
l = iommu_read_reg(obj, MMU_REVISION);
|
|
|
|
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
|
|
|
|
(l >> 4) & 0xf, l & 0xf);
|
|
|
|
|
|
|
|
iommu_write_reg(obj, pa, MMU_TTB);
|
|
|
|
|
2015-10-03 06:02:44 +07:00
|
|
|
dra7_cfg_dspsys_mmu(obj, true);
|
|
|
|
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
if (obj->has_bus_err_back)
|
|
|
|
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
|
|
|
|
|
|
|
|
__iommu_set_twl(obj, true);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void omap2_iommu_disable(struct omap_iommu *obj)
|
|
|
|
{
|
|
|
|
u32 l = iommu_read_reg(obj, MMU_CNTL);
|
|
|
|
|
|
|
|
l &= ~MMU_CNTL_MASK;
|
|
|
|
iommu_write_reg(obj, l, MMU_CNTL);
|
2015-10-03 06:02:44 +07:00
|
|
|
dra7_cfg_dspsys_mmu(obj, false);
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
|
|
|
|
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static int iommu_enable(struct omap_iommu *obj)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2019-08-07 15:26:47 +07:00
|
|
|
int ret;
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2019-08-07 15:26:47 +07:00
|
|
|
ret = pm_runtime_get_sync(obj->dev);
|
|
|
|
if (ret < 0)
|
|
|
|
pm_runtime_put_noidle(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2019-08-07 15:26:47 +07:00
|
|
|
return ret < 0 ? ret : 0;
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static void iommu_disable(struct omap_iommu *obj)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_put_sync(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TLB operations
|
|
|
|
*/
|
2011-08-16 18:58:14 +07:00
|
|
|
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
|
|
|
|
u32 mask = get_cam_va_mask(cr->cam & page_size);
|
|
|
|
|
|
|
|
return cr->cam & mask;
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static u32 get_iopte_attr(struct iotlb_entry *e)
|
|
|
|
{
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
u32 attr;
|
|
|
|
|
|
|
|
attr = e->mixed << 5;
|
|
|
|
attr |= e->endian;
|
|
|
|
attr |= e->elsz >> 3;
|
|
|
|
attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
|
|
|
|
(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
|
|
|
|
return attr;
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
u32 status, fault_addr;
|
|
|
|
|
|
|
|
status = iommu_read_reg(obj, MMU_IRQSTATUS);
|
|
|
|
status &= MMU_IRQ_MASK;
|
|
|
|
if (!status) {
|
|
|
|
*da = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
|
|
|
|
*da = fault_addr;
|
|
|
|
|
|
|
|
iommu_write_reg(obj, status, MMU_IRQSTATUS);
|
|
|
|
|
|
|
|
return status;
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
2015-07-21 05:33:25 +07:00
|
|
|
void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = iommu_read_reg(obj, MMU_LOCK);
|
|
|
|
|
|
|
|
l->base = MMU_LOCK_BASE(val);
|
|
|
|
l->vict = MMU_LOCK_VICT(val);
|
|
|
|
}
|
|
|
|
|
2015-07-21 05:33:25 +07:00
|
|
|
void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = (l->base << MMU_LOCK_BASE_SHIFT);
|
|
|
|
val |= (l->vict << MMU_LOCK_VICT_SHIFT);
|
|
|
|
|
|
|
|
iommu_write_reg(obj, val, MMU_LOCK);
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
|
|
|
|
cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
|
|
|
|
iommu_write_reg(obj, cr->ram, MMU_RAM);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
|
|
|
|
iommu_write_reg(obj, 1, MMU_LD_TLB);
|
|
|
|
}
|
|
|
|
|
2010-04-27 12:37:12 +07:00
|
|
|
/* only used in iotlb iteration for-loop */
|
2015-07-21 05:33:25 +07:00
|
|
|
struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
|
2010-04-27 12:37:12 +07:00
|
|
|
{
|
|
|
|
struct cr_regs cr;
|
|
|
|
struct iotlb_lock l;
|
|
|
|
|
|
|
|
iotlb_lock_get(obj, &l);
|
|
|
|
l.vict = n;
|
|
|
|
iotlb_lock_set(obj, &l);
|
|
|
|
iotlb_read_cr(obj, &cr);
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
iommu/omap: Consolidate OMAP IOMMU modules
The OMAP IOMMU driver was originally designed as modules, and split
into a core module and a thin arch-specific module through the OMAP
arch-specific struct iommu_functions, to scale for both OMAP1 and
OMAP2+ IOMMU variants. The driver can only be built for OMAP2+
platforms currently, and also can only be built-in after the
adaptation to generic IOMMU API. The OMAP1 variant was never added
and will most probably be never added (the code for the only potential
user, its parent, DSP processor has already been cleaned up). So,
consolidate the OMAP2 specific omap-iommu2 module into the core OMAP
IOMMU driver - this eliminates the arch-specific ops structure and
simplifies the driver into a single module that only implements the
generic IOMMU API's iommu_ops.
The following are the main changes:
- omap-iommu2 module is completely eliminated, with the common
definitions moved to the internal omap-iommu.h, and the ops
implementations moved into omap-iommu.c
- OMAP arch-specific struct iommu_functions is also eliminated,
with the ops implementations directly absorbed into the calling
functions
- iotlb_alloc_cr() is no longer inlined and defined only when
PREFETCH_IOTLB is defined
- iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG
is defined
- Elimination of the OMAP IOMMU exported functions to register the
arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch()
- Any stale comments about OMAP1 are also cleaned up
Signed-off-by: Suman Anna <s-anna@ti.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-10-23 05:22:27 +07:00
|
|
|
#ifdef PREFETCH_IOTLB
|
|
|
|
static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
|
|
|
|
struct iotlb_entry *e)
|
|
|
|
{
|
|
|
|
struct cr_regs *cr;
|
|
|
|
|
|
|
|
if (!e)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (e->da & ~(get_cam_va_mask(e->pgsz))) {
|
|
|
|
dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
|
|
|
|
e->da);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
cr = kmalloc(sizeof(*cr), GFP_KERNEL);
|
|
|
|
if (!cr)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
|
|
|
|
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
|
|
|
|
|
|
|
|
return cr;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
/**
|
|
|
|
* load_iotlb_entry - Set an iommu tlb entry
|
|
|
|
* @obj: target iommu
|
|
|
|
* @e: an iommu tlb entry info
|
|
|
|
**/
|
2011-08-18 02:57:56 +07:00
|
|
|
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct iotlb_lock l;
|
|
|
|
struct cr_regs *cr;
|
|
|
|
|
|
|
|
if (!obj || !obj->nr_tlb_entries || !e)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_get_sync(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2010-04-23 06:26:11 +07:00
|
|
|
iotlb_lock_get(obj, &l);
|
|
|
|
if (l.base == obj->nr_tlb_entries) {
|
|
|
|
dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
|
2009-01-26 20:13:40 +07:00
|
|
|
err = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-04-23 06:26:11 +07:00
|
|
|
if (!e->prsvd) {
|
2010-04-27 12:37:12 +07:00
|
|
|
int i;
|
|
|
|
struct cr_regs tmp;
|
2010-04-23 06:26:11 +07:00
|
|
|
|
2010-04-27 12:37:12 +07:00
|
|
|
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
|
2010-04-23 06:26:11 +07:00
|
|
|
if (!iotlb_cr_valid(&tmp))
|
|
|
|
break;
|
2010-04-27 12:37:12 +07:00
|
|
|
|
2010-04-23 06:26:11 +07:00
|
|
|
if (i == obj->nr_tlb_entries) {
|
|
|
|
dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
|
|
|
|
err = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-04-27 12:37:12 +07:00
|
|
|
|
|
|
|
iotlb_lock_get(obj, &l);
|
2010-04-23 06:26:11 +07:00
|
|
|
} else {
|
|
|
|
l.vict = l.base;
|
|
|
|
iotlb_lock_set(obj, &l);
|
|
|
|
}
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
cr = iotlb_alloc_cr(obj, e);
|
|
|
|
if (IS_ERR(cr)) {
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_put_sync(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
return PTR_ERR(cr);
|
|
|
|
}
|
|
|
|
|
|
|
|
iotlb_load_cr(obj, cr);
|
|
|
|
kfree(cr);
|
|
|
|
|
2010-04-23 06:26:11 +07:00
|
|
|
if (e->prsvd)
|
|
|
|
l.base++;
|
2009-01-26 20:13:40 +07:00
|
|
|
/* increment victim for next tlb load */
|
|
|
|
if (++l.vict == obj->nr_tlb_entries)
|
2010-04-23 06:26:11 +07:00
|
|
|
l.vict = l.base;
|
2009-01-26 20:13:40 +07:00
|
|
|
iotlb_lock_set(obj, &l);
|
|
|
|
out:
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_put_sync(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-08-16 19:19:10 +07:00
|
|
|
#else /* !PREFETCH_IOTLB */
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
|
2011-08-16 19:19:10 +07:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* !PREFETCH_IOTLB */
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
|
2011-08-16 19:19:10 +07:00
|
|
|
{
|
|
|
|
return load_iotlb_entry(obj, e);
|
|
|
|
}
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* flush_iotlb_page - Clear an iommu tlb entry
|
|
|
|
* @obj: target iommu
|
|
|
|
* @da: iommu device virtual address
|
|
|
|
*
|
|
|
|
* Clear an iommu tlb entry which includes 'da' address.
|
|
|
|
**/
|
2011-08-18 02:57:56 +07:00
|
|
|
static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
int i;
|
2010-04-27 12:37:12 +07:00
|
|
|
struct cr_regs cr;
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_get_sync(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2010-04-27 12:37:12 +07:00
|
|
|
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
|
2009-01-26 20:13:40 +07:00
|
|
|
u32 start;
|
|
|
|
size_t bytes;
|
|
|
|
|
|
|
|
if (!iotlb_cr_valid(&cr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
start = iotlb_cr_to_virt(&cr);
|
|
|
|
bytes = iopgsz_to_bytes(cr.cam & 3);
|
|
|
|
|
|
|
|
if ((start <= da) && (da < start + bytes)) {
|
|
|
|
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
|
|
|
|
__func__, start, da, bytes);
|
2010-08-20 20:50:18 +07:00
|
|
|
iotlb_load_cr(obj, &cr);
|
2009-01-26 20:13:40 +07:00
|
|
|
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
|
2014-03-08 05:47:03 +07:00
|
|
|
break;
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
}
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_put_sync(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
if (i == obj->nr_tlb_entries)
|
|
|
|
dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flush_iotlb_all - Clear all iommu tlb entries
|
|
|
|
* @obj: target iommu
|
|
|
|
**/
|
2011-08-18 02:57:56 +07:00
|
|
|
static void flush_iotlb_all(struct omap_iommu *obj)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
struct iotlb_lock l;
|
|
|
|
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_get_sync(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
l.base = 0;
|
|
|
|
l.vict = 0;
|
|
|
|
iotlb_lock_set(obj, &l);
|
|
|
|
|
|
|
|
iommu_write_reg(obj, 1, MMU_GFLUSH);
|
|
|
|
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_put_sync(obj->dev);
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
2010-05-24 09:01:51 +07:00
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
/*
|
|
|
|
* H/W pagetable operations
|
|
|
|
*/
|
2017-07-29 03:49:14 +07:00
|
|
|
static void flush_iopte_range(struct device *dev, dma_addr_t dma,
|
|
|
|
unsigned long offset, int num_entries)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2017-07-29 03:49:14 +07:00
|
|
|
size_t size = num_entries * sizeof(u32);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2017-07-29 03:49:14 +07:00
|
|
|
dma_addr_t pt_dma;
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
/* Note: freed iopte's must be clean ready for re-use */
|
2017-07-29 03:49:14 +07:00
|
|
|
if (iopte) {
|
|
|
|
if (dma_valid) {
|
|
|
|
pt_dma = virt_to_phys(iopte);
|
|
|
|
dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
}
|
|
|
|
|
2014-03-05 17:20:19 +07:00
|
|
|
kmem_cache_free(iopte_cachep, iopte);
|
2017-07-29 03:49:14 +07:00
|
|
|
}
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
|
|
|
|
dma_addr_t *pt_dma, u32 da)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
u32 *iopte;
|
2017-07-29 03:49:14 +07:00
|
|
|
unsigned long offset = iopgd_index(da) * sizeof(da);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
/* a table has already existed */
|
|
|
|
if (*iopgd)
|
|
|
|
goto pte_ready;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* do the allocation outside the page table lock
|
|
|
|
*/
|
|
|
|
spin_unlock(&obj->page_table_lock);
|
|
|
|
iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
|
|
|
|
spin_lock(&obj->page_table_lock);
|
|
|
|
|
|
|
|
if (!*iopgd) {
|
|
|
|
if (!iopte)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
*pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(obj->dev, *pt_dma)) {
|
|
|
|
dev_err(obj->dev, "DMA map error for L2 table\n");
|
|
|
|
iopte_free(obj, iopte, false);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we rely on dma address and the physical address to be
|
|
|
|
* the same for mapping the L2 table
|
|
|
|
*/
|
|
|
|
if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
|
|
|
|
dev_err(obj->dev, "DMA translation error for L2 table\n");
|
|
|
|
dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
iopte_free(obj, iopte, false);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
|
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
|
2009-01-26 20:13:40 +07:00
|
|
|
dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
|
|
|
|
} else {
|
|
|
|
/* We raced, free the reduniovant table */
|
2017-07-29 03:49:14 +07:00
|
|
|
iopte_free(obj, iopte, false);
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
pte_ready:
|
|
|
|
iopte = iopte_offset(iopgd, da);
|
2018-08-06 22:00:36 +07:00
|
|
|
*pt_dma = iopgd_page_paddr(iopgd);
|
2009-01-26 20:13:40 +07:00
|
|
|
dev_vdbg(obj->dev,
|
|
|
|
"%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
|
|
|
|
__func__, da, iopgd, *iopgd, iopte, *iopte);
|
|
|
|
|
|
|
|
return iopte;
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
u32 *iopgd = iopgd_offset(obj, da);
|
2017-07-29 03:49:14 +07:00
|
|
|
unsigned long offset = iopgd_index(da) * sizeof(da);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2010-05-06 22:24:04 +07:00
|
|
|
if ((da | pa) & ~IOSECTION_MASK) {
|
|
|
|
dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
|
|
|
|
__func__, da, pa, IOSECTION_SIZE);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
|
2017-07-29 03:49:14 +07:00
|
|
|
flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
|
2009-01-26 20:13:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
u32 *iopgd = iopgd_offset(obj, da);
|
2017-07-29 03:49:14 +07:00
|
|
|
unsigned long offset = iopgd_index(da) * sizeof(da);
|
2009-01-26 20:13:40 +07:00
|
|
|
int i;
|
|
|
|
|
2010-05-06 22:24:04 +07:00
|
|
|
if ((da | pa) & ~IOSUPER_MASK) {
|
|
|
|
dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
|
|
|
|
__func__, da, pa, IOSUPER_SIZE);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
|
2017-07-29 03:49:14 +07:00
|
|
|
flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
|
2009-01-26 20:13:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
u32 *iopgd = iopgd_offset(obj, da);
|
2017-07-29 03:49:14 +07:00
|
|
|
dma_addr_t pt_dma;
|
|
|
|
u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
|
|
|
|
unsigned long offset = iopte_index(da) * sizeof(da);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
if (IS_ERR(iopte))
|
|
|
|
return PTR_ERR(iopte);
|
|
|
|
|
|
|
|
*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
|
2017-07-29 03:49:14 +07:00
|
|
|
flush_iopte_range(obj->dev, pt_dma, offset, 1);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
|
|
|
|
__func__, da, pa, iopte, *iopte);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
u32 *iopgd = iopgd_offset(obj, da);
|
2017-07-29 03:49:14 +07:00
|
|
|
dma_addr_t pt_dma;
|
|
|
|
u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
|
|
|
|
unsigned long offset = iopte_index(da) * sizeof(da);
|
2009-01-26 20:13:40 +07:00
|
|
|
int i;
|
|
|
|
|
2010-05-06 22:24:04 +07:00
|
|
|
if ((da | pa) & ~IOLARGE_MASK) {
|
|
|
|
dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
|
|
|
|
__func__, da, pa, IOLARGE_SIZE);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
if (IS_ERR(iopte))
|
|
|
|
return PTR_ERR(iopte);
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
|
2017-07-29 03:49:14 +07:00
|
|
|
flush_iopte_range(obj->dev, pt_dma, offset, 16);
|
2009-01-26 20:13:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static int
|
|
|
|
iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2011-08-18 02:57:56 +07:00
|
|
|
int (*fn)(struct omap_iommu *, u32, u32, u32);
|
2009-01-26 20:13:40 +07:00
|
|
|
u32 prot;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!obj || !e)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (e->pgsz) {
|
|
|
|
case MMU_CAM_PGSZ_16M:
|
|
|
|
fn = iopgd_alloc_super;
|
|
|
|
break;
|
|
|
|
case MMU_CAM_PGSZ_1M:
|
|
|
|
fn = iopgd_alloc_section;
|
|
|
|
break;
|
|
|
|
case MMU_CAM_PGSZ_64K:
|
|
|
|
fn = iopte_alloc_large;
|
|
|
|
break;
|
|
|
|
case MMU_CAM_PGSZ_4K:
|
|
|
|
fn = iopte_alloc_page;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fn = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-04-05 05:46:19 +07:00
|
|
|
if (WARN_ON(!fn))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
prot = get_iopte_attr(e);
|
|
|
|
|
|
|
|
spin_lock(&obj->page_table_lock);
|
|
|
|
err = fn(obj, e->da, e->pa, prot);
|
|
|
|
spin_unlock(&obj->page_table_lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2011-08-18 02:57:56 +07:00
|
|
|
* omap_iopgtable_store_entry - Make an iommu pte entry
|
2009-01-26 20:13:40 +07:00
|
|
|
* @obj: target iommu
|
|
|
|
* @e: an iommu tlb entry info
|
|
|
|
**/
|
2014-10-23 05:22:32 +07:00
|
|
|
static int
|
|
|
|
omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
flush_iotlb_page(obj, e->da);
|
|
|
|
err = iopgtable_store_entry_core(obj, e);
|
|
|
|
if (!err)
|
2011-08-16 19:19:10 +07:00
|
|
|
prefetch_iotlb_entry(obj, e);
|
2009-01-26 20:13:40 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iopgtable_lookup_entry - Lookup an iommu pte entry
|
|
|
|
* @obj: target iommu
|
|
|
|
* @da: iommu device virtual address
|
|
|
|
* @ppgd: iommu pgd entry pointer to be returned
|
|
|
|
* @ppte: iommu pte entry pointer to be returned
|
|
|
|
**/
|
2011-08-16 18:58:14 +07:00
|
|
|
static void
|
|
|
|
iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
u32 *iopgd, *iopte = NULL;
|
|
|
|
|
|
|
|
iopgd = iopgd_offset(obj, da);
|
|
|
|
if (!*iopgd)
|
|
|
|
goto out;
|
|
|
|
|
2010-05-13 13:45:35 +07:00
|
|
|
if (iopgd_is_table(*iopgd))
|
2009-01-26 20:13:40 +07:00
|
|
|
iopte = iopte_offset(iopgd, da);
|
|
|
|
out:
|
|
|
|
*ppgd = iopgd;
|
|
|
|
*ppte = iopte;
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
size_t bytes;
|
|
|
|
u32 *iopgd = iopgd_offset(obj, da);
|
|
|
|
int nent = 1;
|
2017-07-29 03:49:14 +07:00
|
|
|
dma_addr_t pt_dma;
|
|
|
|
unsigned long pd_offset = iopgd_index(da) * sizeof(da);
|
|
|
|
unsigned long pt_offset = iopte_index(da) * sizeof(da);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
if (!*iopgd)
|
|
|
|
return 0;
|
|
|
|
|
2010-05-13 13:45:35 +07:00
|
|
|
if (iopgd_is_table(*iopgd)) {
|
2009-01-26 20:13:40 +07:00
|
|
|
int i;
|
|
|
|
u32 *iopte = iopte_offset(iopgd, da);
|
|
|
|
|
|
|
|
bytes = IOPTE_SIZE;
|
|
|
|
if (*iopte & IOPTE_LARGE) {
|
|
|
|
nent *= 16;
|
|
|
|
/* rewind to the 1st entry */
|
2010-02-16 01:03:32 +07:00
|
|
|
iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
bytes *= nent;
|
|
|
|
memset(iopte, 0, nent * sizeof(*iopte));
|
2018-08-06 22:00:36 +07:00
|
|
|
pt_dma = iopgd_page_paddr(iopgd);
|
2017-07-29 03:49:14 +07:00
|
|
|
flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* do table walk to check if this table is necessary or not
|
|
|
|
*/
|
|
|
|
iopte = iopte_offset(iopgd, 0);
|
|
|
|
for (i = 0; i < PTRS_PER_IOPTE; i++)
|
|
|
|
if (iopte[i])
|
|
|
|
goto out;
|
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
iopte_free(obj, iopte, true);
|
2009-01-26 20:13:40 +07:00
|
|
|
nent = 1; /* for the next L1 entry */
|
|
|
|
} else {
|
|
|
|
bytes = IOPGD_SIZE;
|
2009-10-23 04:46:32 +07:00
|
|
|
if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
|
2009-01-26 20:13:40 +07:00
|
|
|
nent *= 16;
|
|
|
|
/* rewind to the 1st entry */
|
2010-02-16 01:03:32 +07:00
|
|
|
iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
bytes *= nent;
|
|
|
|
}
|
|
|
|
memset(iopgd, 0, nent * sizeof(*iopgd));
|
2017-07-29 03:49:14 +07:00
|
|
|
flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
|
2009-01-26 20:13:40 +07:00
|
|
|
out:
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iopgtable_clear_entry - Remove an iommu pte entry
|
|
|
|
* @obj: target iommu
|
|
|
|
* @da: iommu device virtual address
|
|
|
|
**/
|
2011-08-18 02:57:56 +07:00
|
|
|
static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
size_t bytes;
|
|
|
|
|
|
|
|
spin_lock(&obj->page_table_lock);
|
|
|
|
|
|
|
|
bytes = iopgtable_clear_entry_core(obj, da);
|
|
|
|
flush_iotlb_page(obj, da);
|
|
|
|
|
|
|
|
spin_unlock(&obj->page_table_lock);
|
|
|
|
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
|
2011-08-18 02:57:56 +07:00
|
|
|
static void iopgtable_clear_entry_all(struct omap_iommu *obj)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2017-07-29 03:49:14 +07:00
|
|
|
unsigned long offset;
|
2009-01-26 20:13:40 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
spin_lock(&obj->page_table_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < PTRS_PER_IOPGD; i++) {
|
|
|
|
u32 da;
|
|
|
|
u32 *iopgd;
|
|
|
|
|
|
|
|
da = i << IOPGD_SHIFT;
|
|
|
|
iopgd = iopgd_offset(obj, da);
|
2017-07-29 03:49:14 +07:00
|
|
|
offset = iopgd_index(da) * sizeof(da);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
if (!*iopgd)
|
|
|
|
continue;
|
|
|
|
|
2010-05-13 13:45:35 +07:00
|
|
|
if (iopgd_is_table(*iopgd))
|
2017-07-29 03:49:14 +07:00
|
|
|
iopte_free(obj, iopte_offset(iopgd, 0), true);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
*iopgd = 0;
|
2017-07-29 03:49:14 +07:00
|
|
|
flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
flush_iotlb_all(obj);
|
|
|
|
|
|
|
|
spin_unlock(&obj->page_table_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Device IOMMU generic operations
|
|
|
|
*/
|
|
|
|
static irqreturn_t iommu_fault_handler(int irq, void *data)
|
|
|
|
{
|
2011-02-17 02:35:51 +07:00
|
|
|
u32 da, errs;
|
2009-01-26 20:13:40 +07:00
|
|
|
u32 *iopgd, *iopte;
|
2011-08-18 02:57:56 +07:00
|
|
|
struct omap_iommu *obj = data;
|
2011-09-14 02:26:29 +07:00
|
|
|
struct iommu_domain *domain = obj->domain;
|
2015-03-26 19:43:09 +07:00
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2017-09-06 05:56:17 +07:00
|
|
|
if (!omap_domain->dev)
|
2009-01-26 20:13:40 +07:00
|
|
|
return IRQ_NONE;
|
|
|
|
|
2011-02-17 02:35:51 +07:00
|
|
|
errs = iommu_report_fault(obj, &da);
|
2011-05-10 21:56:46 +07:00
|
|
|
if (errs == 0)
|
|
|
|
return IRQ_HANDLED;
|
2011-02-17 02:35:51 +07:00
|
|
|
|
|
|
|
/* Fault callback or TLB/PTE Dynamic loading */
|
2011-09-14 02:26:29 +07:00
|
|
|
if (!report_iommu_fault(domain, obj->dev, da, 0))
|
2009-01-26 20:13:40 +07:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
|
2017-07-29 03:49:13 +07:00
|
|
|
iommu_write_reg(obj, 0, MMU_IRQENABLE);
|
2010-05-24 09:01:52 +07:00
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
iopgd = iopgd_offset(obj, da);
|
|
|
|
|
2010-05-13 13:45:35 +07:00
|
|
|
if (!iopgd_is_table(*iopgd)) {
|
2013-05-31 06:10:59 +07:00
|
|
|
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
|
2015-07-21 05:33:32 +07:00
|
|
|
obj->name, errs, da, iopgd, *iopgd);
|
2009-01-26 20:13:40 +07:00
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
iopte = iopte_offset(iopgd, da);
|
|
|
|
|
2013-05-31 06:10:59 +07:00
|
|
|
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
|
2015-07-21 05:33:32 +07:00
|
|
|
obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2011-06-02 05:46:12 +07:00
|
|
|
* omap_iommu_attach() - attach iommu device to an iommu domain
|
2017-04-12 12:21:29 +07:00
|
|
|
* @obj: target omap iommu device
|
2011-06-02 05:46:12 +07:00
|
|
|
* @iopgd: page table
|
2009-01-26 20:13:40 +07:00
|
|
|
**/
|
2017-04-12 12:21:29 +07:00
|
|
|
static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2014-03-01 03:42:33 +07:00
|
|
|
int err;
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
spin_lock(&obj->iommu_lock);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(obj->dev, obj->pd_dma)) {
|
|
|
|
dev_err(obj->dev, "DMA map error for L1 table\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
obj->iopgd = iopgd;
|
|
|
|
err = iommu_enable(obj);
|
|
|
|
if (err)
|
2017-07-29 03:49:14 +07:00
|
|
|
goto out_err;
|
2011-06-02 05:46:12 +07:00
|
|
|
flush_iotlb_all(obj);
|
|
|
|
|
|
|
|
spin_unlock(&obj->iommu_lock);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
|
2017-04-12 12:21:29 +07:00
|
|
|
|
|
|
|
return 0;
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
out_err:
|
2011-06-02 05:46:12 +07:00
|
|
|
spin_unlock(&obj->iommu_lock);
|
2017-04-12 12:21:29 +07:00
|
|
|
|
|
|
|
return err;
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2011-06-02 05:46:12 +07:00
|
|
|
* omap_iommu_detach - release iommu device
|
2009-01-26 20:13:40 +07:00
|
|
|
* @obj: target iommu
|
|
|
|
**/
|
2011-08-18 02:57:56 +07:00
|
|
|
static void omap_iommu_detach(struct omap_iommu *obj)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2010-01-09 01:29:05 +07:00
|
|
|
if (!obj || IS_ERR(obj))
|
2009-01-26 20:13:40 +07:00
|
|
|
return;
|
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
spin_lock(&obj->iommu_lock);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2017-07-29 03:49:14 +07:00
|
|
|
dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
obj->pd_dma = 0;
|
2011-06-02 05:46:12 +07:00
|
|
|
obj->iopgd = NULL;
|
2019-08-07 15:26:48 +07:00
|
|
|
iommu_disable(obj);
|
2011-02-17 02:35:51 +07:00
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
spin_unlock(&obj->iommu_lock);
|
2011-02-17 02:35:51 +07:00
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
|
2011-02-17 02:35:51 +07:00
|
|
|
}
|
|
|
|
|
2019-08-07 15:26:48 +07:00
|
|
|
static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
|
|
|
|
{
|
|
|
|
struct iotlb_lock lock;
|
|
|
|
struct cr_regs cr;
|
|
|
|
struct cr_regs *tmp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* check if there are any locked tlbs to save */
|
|
|
|
iotlb_lock_get(obj, &lock);
|
|
|
|
obj->num_cr_ctx = lock.base;
|
|
|
|
if (!obj->num_cr_ctx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tmp = obj->cr_ctx;
|
|
|
|
for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
|
|
|
|
* tmp++ = cr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
|
|
|
|
{
|
|
|
|
struct iotlb_lock l;
|
|
|
|
struct cr_regs *tmp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* no locked tlbs to restore */
|
|
|
|
if (!obj->num_cr_ctx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
l.base = 0;
|
|
|
|
tmp = obj->cr_ctx;
|
|
|
|
for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
|
|
|
|
l.vict = i;
|
|
|
|
iotlb_lock_set(obj, &l);
|
|
|
|
iotlb_load_cr(obj, tmp);
|
|
|
|
}
|
|
|
|
l.base = obj->num_cr_ctx;
|
|
|
|
l.vict = i;
|
|
|
|
iotlb_lock_set(obj, &l);
|
|
|
|
}
|
|
|
|
|
2019-08-07 15:26:50 +07:00
|
|
|
/**
|
|
|
|
* omap_iommu_domain_deactivate - deactivate attached iommu devices
|
|
|
|
* @domain: iommu domain attached to the target iommu device
|
|
|
|
*
|
|
|
|
* This API allows the client devices of IOMMU devices to suspend
|
|
|
|
* the IOMMUs they control at runtime, after they are idled and
|
|
|
|
* suspended all activity. System Suspend will leverage the PM
|
|
|
|
* driver late callbacks.
|
|
|
|
**/
|
|
|
|
int omap_iommu_domain_deactivate(struct iommu_domain *domain)
|
|
|
|
{
|
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
|
|
|
struct omap_iommu_device *iommu;
|
|
|
|
struct omap_iommu *oiommu;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!omap_domain->dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
iommu = omap_domain->iommus;
|
|
|
|
iommu += (omap_domain->num_iommus - 1);
|
|
|
|
for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
|
|
|
|
oiommu = iommu->iommu_dev;
|
|
|
|
pm_runtime_put_sync(oiommu->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* omap_iommu_domain_activate - activate attached iommu devices
|
|
|
|
* @domain: iommu domain attached to the target iommu device
|
|
|
|
*
|
|
|
|
* This API allows the client devices of IOMMU devices to resume the
|
|
|
|
* IOMMUs they control at runtime, before they can resume operations.
|
|
|
|
* System Resume will leverage the PM driver late callbacks.
|
|
|
|
**/
|
|
|
|
int omap_iommu_domain_activate(struct iommu_domain *domain)
|
|
|
|
{
|
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
|
|
|
struct omap_iommu_device *iommu;
|
|
|
|
struct omap_iommu *oiommu;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!omap_domain->dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
iommu = omap_domain->iommus;
|
|
|
|
for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
|
|
|
|
oiommu = iommu->iommu_dev;
|
|
|
|
pm_runtime_get_sync(oiommu->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
|
|
|
|
|
2019-08-07 15:26:47 +07:00
|
|
|
/**
|
|
|
|
* omap_iommu_runtime_suspend - disable an iommu device
|
|
|
|
* @dev: iommu device
|
|
|
|
*
|
|
|
|
* This function performs all that is necessary to disable an
|
|
|
|
* IOMMU device, either during final detachment from a client
|
|
|
|
* device, or during system/runtime suspend of the device. This
|
|
|
|
* includes programming all the appropriate IOMMU registers, and
|
|
|
|
* managing the associated omap_hwmod's state and the device's
|
2019-08-07 15:26:48 +07:00
|
|
|
* reset line. This function also saves the context of any
|
|
|
|
* locked TLBs if suspending.
|
2019-08-07 15:26:47 +07:00
|
|
|
**/
|
2019-09-06 22:15:38 +07:00
|
|
|
static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
|
2019-08-07 15:26:47 +07:00
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
struct iommu_platform_data *pdata = dev_get_platdata(dev);
|
|
|
|
struct omap_iommu *obj = to_iommu(dev);
|
|
|
|
int ret;
|
|
|
|
|
2019-08-07 15:26:48 +07:00
|
|
|
/* save the TLBs only during suspend, and not for power down */
|
|
|
|
if (obj->domain && obj->iopgd)
|
|
|
|
omap_iommu_save_tlb_entries(obj);
|
|
|
|
|
2019-08-07 15:26:47 +07:00
|
|
|
omap2_iommu_disable(obj);
|
|
|
|
|
|
|
|
if (pdata && pdata->device_idle)
|
|
|
|
pdata->device_idle(pdev);
|
|
|
|
|
|
|
|
if (pdata && pdata->assert_reset)
|
|
|
|
pdata->assert_reset(pdev, pdata->reset_name);
|
|
|
|
|
|
|
|
if (pdata && pdata->set_pwrdm_constraint) {
|
|
|
|
ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
|
|
|
|
if (ret) {
|
|
|
|
dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
|
|
|
|
ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* omap_iommu_runtime_resume - enable an iommu device
|
|
|
|
* @dev: iommu device
|
|
|
|
*
|
|
|
|
* This function performs all that is necessary to enable an
|
|
|
|
* IOMMU device, either during initial attachment to a client
|
|
|
|
* device, or during system/runtime resume of the device. This
|
|
|
|
* includes programming all the appropriate IOMMU registers, and
|
|
|
|
* managing the associated omap_hwmod's state and the device's
|
2019-08-07 15:26:48 +07:00
|
|
|
* reset line. The function also restores any locked TLBs if
|
|
|
|
* resuming after a suspend.
|
2019-08-07 15:26:47 +07:00
|
|
|
**/
|
2019-09-06 22:15:38 +07:00
|
|
|
static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
|
2019-08-07 15:26:47 +07:00
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
struct iommu_platform_data *pdata = dev_get_platdata(dev);
|
|
|
|
struct omap_iommu *obj = to_iommu(dev);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (pdata && pdata->set_pwrdm_constraint) {
|
|
|
|
ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
|
|
|
|
if (ret) {
|
|
|
|
dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
|
|
|
|
ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pdata && pdata->deassert_reset) {
|
|
|
|
ret = pdata->deassert_reset(pdev, pdata->reset_name);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "deassert_reset failed: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pdata && pdata->device_enable)
|
|
|
|
pdata->device_enable(pdev);
|
|
|
|
|
2019-08-07 15:26:48 +07:00
|
|
|
/* restore the TLBs only during resume, and not for power up */
|
|
|
|
if (obj->domain)
|
|
|
|
omap_iommu_restore_tlb_entries(obj);
|
|
|
|
|
2019-08-07 15:26:47 +07:00
|
|
|
ret = omap2_iommu_enable(obj);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-08-07 15:26:49 +07:00
|
|
|
/**
|
|
|
|
* omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
|
|
|
|
* @dev: iommu device
|
|
|
|
*
|
|
|
|
* This function performs the necessary checks to determine if the IOMMU
|
|
|
|
* device needs suspending or not. The function checks if the runtime_pm
|
|
|
|
* status of the device is suspended, and returns 1 in that case. This
|
|
|
|
* results in the PM core to skip invoking any of the Sleep PM callbacks
|
|
|
|
* (suspend, suspend_late, resume, resume_early etc).
|
|
|
|
*/
|
|
|
|
static int omap_iommu_prepare(struct device *dev)
|
|
|
|
{
|
|
|
|
if (pm_runtime_status_suspended(dev))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
static bool omap_iommu_can_register(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct device_node *np = pdev->dev.of_node;
|
|
|
|
|
|
|
|
if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* restrict IOMMU core registration only for processor-port MDMA MMUs
|
|
|
|
* on DRA7 DSPs
|
|
|
|
*/
|
|
|
|
if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
|
|
|
|
(!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-10-03 06:02:44 +07:00
|
|
|
static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
|
|
|
|
struct omap_iommu *obj)
|
|
|
|
{
|
|
|
|
struct device_node *np = pdev->dev.of_node;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
|
|
|
|
dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj->syscfg =
|
|
|
|
syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
|
|
|
|
if (IS_ERR(obj->syscfg)) {
|
|
|
|
/* can fail with -EPROBE_DEFER */
|
|
|
|
ret = PTR_ERR(obj->syscfg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
|
|
|
|
&obj->id)) {
|
|
|
|
dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (obj->id != 0 && obj->id != 1) {
|
|
|
|
dev_err(&pdev->dev, "invalid IOMMU instance id\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
/*
|
|
|
|
* OMAP Device MMU(IOMMU) detection
|
|
|
|
*/
|
2012-12-22 06:05:21 +07:00
|
|
|
static int omap_iommu_probe(struct platform_device *pdev)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
|
|
|
int err = -ENODEV;
|
|
|
|
int irq;
|
2011-08-18 02:57:56 +07:00
|
|
|
struct omap_iommu *obj;
|
2009-01-26 20:13:40 +07:00
|
|
|
struct resource *res;
|
2014-03-01 03:42:36 +07:00
|
|
|
struct device_node *of = pdev->dev.of_node;
|
2019-08-07 15:26:51 +07:00
|
|
|
struct orphan_dev *orphan_dev, *tmp;
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2017-04-12 12:21:27 +07:00
|
|
|
if (!of) {
|
|
|
|
pr_err("%s: only DT-based devices are supported\n", __func__);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2014-03-01 03:42:32 +07:00
|
|
|
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
|
2009-01-26 20:13:40 +07:00
|
|
|
if (!obj)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-08-07 15:26:47 +07:00
|
|
|
/*
|
|
|
|
* self-manage the ordering dependencies between omap_device_enable/idle
|
|
|
|
* and omap_device_assert/deassert_hardreset API
|
|
|
|
*/
|
|
|
|
if (pdev->dev.pm_domain) {
|
|
|
|
dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
|
|
|
|
pdev->dev.pm_domain = NULL;
|
|
|
|
}
|
|
|
|
|
2017-04-12 12:21:27 +07:00
|
|
|
obj->name = dev_name(&pdev->dev);
|
|
|
|
obj->nr_tlb_entries = 32;
|
|
|
|
err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
|
|
|
|
if (err && err != -EINVAL)
|
|
|
|
return err;
|
|
|
|
if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
|
|
|
|
return -EINVAL;
|
|
|
|
if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
|
|
|
|
obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
|
2014-03-01 03:42:36 +07:00
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
obj->dev = &pdev->dev;
|
|
|
|
obj->ctx = (void *)obj + sizeof(*obj);
|
2019-08-07 15:26:48 +07:00
|
|
|
obj->cr_ctx = devm_kzalloc(&pdev->dev,
|
|
|
|
sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!obj->cr_ctx)
|
|
|
|
return -ENOMEM;
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
spin_lock_init(&obj->iommu_lock);
|
2009-01-26 20:13:40 +07:00
|
|
|
spin_lock_init(&obj->page_table_lock);
|
|
|
|
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
2014-03-01 03:42:32 +07:00
|
|
|
obj->regbase = devm_ioremap_resource(obj->dev, res);
|
|
|
|
if (IS_ERR(obj->regbase))
|
|
|
|
return PTR_ERR(obj->regbase);
|
2011-03-14 19:28:32 +07:00
|
|
|
|
2015-10-03 06:02:44 +07:00
|
|
|
err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
irq = platform_get_irq(pdev, 0);
|
2014-03-01 03:42:32 +07:00
|
|
|
if (irq < 0)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
|
|
|
|
dev_name(obj->dev), obj);
|
2009-01-26 20:13:40 +07:00
|
|
|
if (err < 0)
|
2014-03-01 03:42:32 +07:00
|
|
|
return err;
|
2009-01-26 20:13:40 +07:00
|
|
|
platform_set_drvdata(pdev, obj);
|
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
if (omap_iommu_can_register(pdev)) {
|
|
|
|
obj->group = iommu_group_alloc();
|
|
|
|
if (IS_ERR(obj->group))
|
|
|
|
return PTR_ERR(obj->group);
|
2017-04-12 12:21:31 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
|
|
|
|
obj->name);
|
|
|
|
if (err)
|
|
|
|
goto out_group;
|
2017-04-12 12:21:30 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
|
2017-04-12 12:21:30 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
err = iommu_device_register(&obj->iommu);
|
|
|
|
if (err)
|
|
|
|
goto out_sysfs;
|
|
|
|
}
|
2017-04-12 12:21:30 +07:00
|
|
|
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_enable(obj->dev);
|
|
|
|
|
2014-10-23 05:22:30 +07:00
|
|
|
omap_iommu_debugfs_add(obj);
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
dev_info(&pdev->dev, "%s registered\n", obj->name);
|
2017-04-12 12:21:31 +07:00
|
|
|
|
2019-08-07 15:26:51 +07:00
|
|
|
list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) {
|
|
|
|
err = _omap_iommu_add_device(orphan_dev->dev);
|
|
|
|
if (!err) {
|
|
|
|
list_del(&orphan_dev->node);
|
|
|
|
kfree(orphan_dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
return 0;
|
2017-04-12 12:21:30 +07:00
|
|
|
|
|
|
|
out_sysfs:
|
|
|
|
iommu_device_sysfs_remove(&obj->iommu);
|
2017-04-12 12:21:31 +07:00
|
|
|
out_group:
|
|
|
|
iommu_group_put(obj->group);
|
2017-04-12 12:21:30 +07:00
|
|
|
return err;
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
|
|
|
|
2012-12-22 06:05:21 +07:00
|
|
|
static int omap_iommu_remove(struct platform_device *pdev)
|
2009-01-26 20:13:40 +07:00
|
|
|
{
|
2011-08-18 02:57:56 +07:00
|
|
|
struct omap_iommu *obj = platform_get_drvdata(pdev);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
if (obj->group) {
|
|
|
|
iommu_group_put(obj->group);
|
|
|
|
obj->group = NULL;
|
2017-04-12 12:21:31 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
iommu_device_sysfs_remove(&obj->iommu);
|
|
|
|
iommu_device_unregister(&obj->iommu);
|
|
|
|
}
|
2017-04-12 12:21:30 +07:00
|
|
|
|
2014-10-23 05:22:30 +07:00
|
|
|
omap_iommu_debugfs_remove(obj);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
2012-11-20 08:05:51 +07:00
|
|
|
pm_runtime_disable(obj->dev);
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
dev_info(&pdev->dev, "%s removed\n", obj->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-07 15:26:47 +07:00
|
|
|
static const struct dev_pm_ops omap_iommu_pm_ops = {
|
2019-08-07 15:26:49 +07:00
|
|
|
.prepare = omap_iommu_prepare,
|
|
|
|
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
|
|
|
pm_runtime_force_resume)
|
2019-08-07 15:26:47 +07:00
|
|
|
SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
|
|
|
|
omap_iommu_runtime_resume, NULL)
|
|
|
|
};
|
|
|
|
|
2014-09-11 20:37:36 +07:00
|
|
|
static const struct of_device_id omap_iommu_of_match[] = {
|
2014-03-01 03:42:36 +07:00
|
|
|
{ .compatible = "ti,omap2-iommu" },
|
|
|
|
{ .compatible = "ti,omap4-iommu" },
|
|
|
|
{ .compatible = "ti,dra7-iommu" },
|
2015-10-03 06:02:44 +07:00
|
|
|
{ .compatible = "ti,dra7-dsp-iommu" },
|
2014-03-01 03:42:36 +07:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
static struct platform_driver omap_iommu_driver = {
|
|
|
|
.probe = omap_iommu_probe,
|
2012-12-22 06:05:21 +07:00
|
|
|
.remove = omap_iommu_remove,
|
2009-01-26 20:13:40 +07:00
|
|
|
.driver = {
|
|
|
|
.name = "omap-iommu",
|
2019-08-07 15:26:47 +07:00
|
|
|
.pm = &omap_iommu_pm_ops,
|
2014-03-01 03:42:36 +07:00
|
|
|
.of_match_table = of_match_ptr(omap_iommu_of_match),
|
2009-01-26 20:13:40 +07:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2014-03-08 06:44:38 +07:00
|
|
|
static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
|
2012-11-03 02:24:06 +07:00
|
|
|
{
|
|
|
|
memset(e, 0, sizeof(*e));
|
|
|
|
|
|
|
|
e->da = da;
|
|
|
|
e->pa = pa;
|
2014-03-18 08:31:32 +07:00
|
|
|
e->valid = MMU_CAM_V;
|
2014-03-08 06:44:38 +07:00
|
|
|
e->pgsz = pgsz;
|
|
|
|
e->endian = MMU_RAM_ENDIAN_LITTLE;
|
|
|
|
e->elsz = MMU_RAM_ELSZ_8;
|
|
|
|
e->mixed = 0;
|
2012-11-03 02:24:06 +07:00
|
|
|
|
|
|
|
return iopgsz_to_bytes(e->pgsz);
|
|
|
|
}
|
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
2019-09-08 23:56:38 +07:00
|
|
|
phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
|
2011-06-02 05:46:12 +07:00
|
|
|
{
|
2015-03-26 19:43:09 +07:00
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
2017-09-06 05:56:18 +07:00
|
|
|
struct device *dev = omap_domain->dev;
|
|
|
|
struct omap_iommu_device *iommu;
|
|
|
|
struct omap_iommu *oiommu;
|
2011-06-02 05:46:12 +07:00
|
|
|
struct iotlb_entry e;
|
|
|
|
int omap_pgsz;
|
2017-09-06 05:56:18 +07:00
|
|
|
u32 ret = -EINVAL;
|
|
|
|
int i;
|
2011-06-02 05:46:12 +07:00
|
|
|
|
|
|
|
omap_pgsz = bytes_to_iopgsz(bytes);
|
|
|
|
if (omap_pgsz < 0) {
|
|
|
|
dev_err(dev, "invalid size to map: %d\n", bytes);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-01-22 20:42:06 +07:00
|
|
|
dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2014-03-08 06:44:38 +07:00
|
|
|
iotlb_init_entry(&e, da, pa, omap_pgsz);
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
iommu = omap_domain->iommus;
|
|
|
|
for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
|
|
|
|
oiommu = iommu->iommu_dev;
|
|
|
|
ret = omap_iopgtable_store_entry(oiommu, &e);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
while (i--) {
|
|
|
|
iommu--;
|
|
|
|
oiommu = iommu->iommu_dev;
|
|
|
|
iopgtable_clear_entry(oiommu, da);
|
|
|
|
}
|
|
|
|
}
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2011-09-03 00:32:31 +07:00
|
|
|
return ret;
|
2011-06-02 05:46:12 +07:00
|
|
|
}
|
|
|
|
|
2011-11-10 16:32:25 +07:00
|
|
|
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
|
2019-07-02 22:44:06 +07:00
|
|
|
size_t size, struct iommu_iotlb_gather *gather)
|
2011-06-02 05:46:12 +07:00
|
|
|
{
|
2015-03-26 19:43:09 +07:00
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
2017-09-06 05:56:18 +07:00
|
|
|
struct device *dev = omap_domain->dev;
|
|
|
|
struct omap_iommu_device *iommu;
|
|
|
|
struct omap_iommu *oiommu;
|
|
|
|
bool error = false;
|
|
|
|
size_t bytes = 0;
|
|
|
|
int i;
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2011-11-10 16:32:25 +07:00
|
|
|
dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
iommu = omap_domain->iommus;
|
|
|
|
for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
|
|
|
|
oiommu = iommu->iommu_dev;
|
|
|
|
bytes = iopgtable_clear_entry(oiommu, da);
|
|
|
|
if (!bytes)
|
|
|
|
error = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* simplify return - we are only checking if any of the iommus
|
|
|
|
* reported an error, but not if all of them are unmapping the
|
|
|
|
* same number of entries. This should not occur due to the
|
|
|
|
* mirror programming.
|
|
|
|
*/
|
|
|
|
return error ? 0 : bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int omap_iommu_count(struct device *dev)
|
|
|
|
{
|
|
|
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
while (arch_data->iommu_dev) {
|
|
|
|
count++;
|
|
|
|
arch_data++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* caller should call cleanup if this function fails */
|
|
|
|
static int omap_iommu_attach_init(struct device *dev,
|
|
|
|
struct omap_iommu_domain *odomain)
|
|
|
|
{
|
|
|
|
struct omap_iommu_device *iommu;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
odomain->num_iommus = omap_iommu_count(dev);
|
|
|
|
if (!odomain->num_iommus)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (!odomain->iommus)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iommu = odomain->iommus;
|
|
|
|
for (i = 0; i < odomain->num_iommus; i++, iommu++) {
|
|
|
|
iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
|
|
|
|
if (!iommu->pgtable)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* should never fail, but please keep this around to ensure
|
|
|
|
* we keep the hardware happy
|
|
|
|
*/
|
|
|
|
if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
|
|
|
|
IOPGD_TABLE_SIZE)))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct omap_iommu_device *iommu = odomain->iommus;
|
|
|
|
|
|
|
|
for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
|
|
|
|
kfree(iommu->pgtable);
|
|
|
|
|
|
|
|
kfree(odomain->iommus);
|
|
|
|
odomain->num_iommus = 0;
|
|
|
|
odomain->iommus = NULL;
|
2011-06-02 05:46:12 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|
|
|
{
|
2015-03-26 19:43:09 +07:00
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
2011-10-11 05:18:33 +07:00
|
|
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
2017-09-06 05:56:18 +07:00
|
|
|
struct omap_iommu_device *iommu;
|
2017-04-12 12:21:29 +07:00
|
|
|
struct omap_iommu *oiommu;
|
2011-06-02 05:46:12 +07:00
|
|
|
int ret = 0;
|
2017-09-06 05:56:18 +07:00
|
|
|
int i;
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2017-04-12 12:21:29 +07:00
|
|
|
if (!arch_data || !arch_data->iommu_dev) {
|
2014-09-05 05:27:29 +07:00
|
|
|
dev_err(dev, "device doesn't have an associated iommu\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
spin_lock(&omap_domain->lock);
|
|
|
|
|
2017-09-06 05:56:17 +07:00
|
|
|
/* only a single client device can be attached to a domain */
|
|
|
|
if (omap_domain->dev) {
|
2011-06-02 05:46:12 +07:00
|
|
|
dev_err(dev, "iommu domain is already attached\n");
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
ret = omap_iommu_attach_init(dev, omap_domain);
|
2017-04-12 12:21:29 +07:00
|
|
|
if (ret) {
|
2017-09-06 05:56:18 +07:00
|
|
|
dev_err(dev, "failed to allocate required iommu data %d\n",
|
|
|
|
ret);
|
|
|
|
goto init_fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
iommu = omap_domain->iommus;
|
|
|
|
for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
|
|
|
|
/* configure and enable the omap iommu */
|
|
|
|
oiommu = arch_data->iommu_dev;
|
|
|
|
ret = omap_iommu_attach(oiommu, iommu->pgtable);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "can't get omap iommu: %d\n", ret);
|
|
|
|
goto attach_fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
oiommu->domain = domain;
|
|
|
|
iommu->iommu_dev = oiommu;
|
2011-06-02 05:46:12 +07:00
|
|
|
}
|
|
|
|
|
2012-04-19 01:09:41 +07:00
|
|
|
omap_domain->dev = dev;
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
attach_fail:
|
|
|
|
while (i--) {
|
|
|
|
iommu--;
|
|
|
|
arch_data--;
|
|
|
|
oiommu = iommu->iommu_dev;
|
|
|
|
omap_iommu_detach(oiommu);
|
|
|
|
iommu->iommu_dev = NULL;
|
|
|
|
oiommu->domain = NULL;
|
|
|
|
}
|
|
|
|
init_fail:
|
|
|
|
omap_iommu_detach_fini(omap_domain);
|
2011-06-02 05:46:12 +07:00
|
|
|
out:
|
|
|
|
spin_unlock(&omap_domain->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-04-19 01:09:41 +07:00
|
|
|
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
|
2015-07-21 05:33:32 +07:00
|
|
|
struct device *dev)
|
2011-06-02 05:46:12 +07:00
|
|
|
{
|
2017-09-06 05:56:18 +07:00
|
|
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
|
|
|
struct omap_iommu_device *iommu = omap_domain->iommus;
|
|
|
|
struct omap_iommu *oiommu;
|
|
|
|
int i;
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2017-09-06 05:56:17 +07:00
|
|
|
if (!omap_domain->dev) {
|
|
|
|
dev_err(dev, "domain has no attached device\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
/* only a single device is supported per domain for now */
|
2017-09-06 05:56:17 +07:00
|
|
|
if (omap_domain->dev != dev) {
|
|
|
|
dev_err(dev, "invalid attached device\n");
|
2012-04-19 01:09:41 +07:00
|
|
|
return;
|
2011-06-02 05:46:12 +07:00
|
|
|
}
|
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
/*
|
|
|
|
* cleanup in the reverse order of attachment - this addresses
|
|
|
|
* any h/w dependencies between multiple instances, if any
|
|
|
|
*/
|
|
|
|
iommu += (omap_domain->num_iommus - 1);
|
|
|
|
arch_data += (omap_domain->num_iommus - 1);
|
|
|
|
for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
|
|
|
|
oiommu = iommu->iommu_dev;
|
|
|
|
iopgtable_clear_entry_all(oiommu);
|
|
|
|
|
|
|
|
omap_iommu_detach(oiommu);
|
|
|
|
iommu->iommu_dev = NULL;
|
|
|
|
oiommu->domain = NULL;
|
|
|
|
}
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
omap_iommu_detach_fini(omap_domain);
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2012-04-19 01:09:41 +07:00
|
|
|
omap_domain->dev = NULL;
|
|
|
|
}
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2012-04-19 01:09:41 +07:00
|
|
|
static void omap_iommu_detach_dev(struct iommu_domain *domain,
|
2015-07-21 05:33:32 +07:00
|
|
|
struct device *dev)
|
2012-04-19 01:09:41 +07:00
|
|
|
{
|
2015-03-26 19:43:09 +07:00
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
2012-04-19 01:09:41 +07:00
|
|
|
|
|
|
|
spin_lock(&omap_domain->lock);
|
|
|
|
_omap_iommu_detach_dev(omap_domain, dev);
|
2011-06-02 05:46:12 +07:00
|
|
|
spin_unlock(&omap_domain->lock);
|
|
|
|
}
|
|
|
|
|
2015-03-26 19:43:09 +07:00
|
|
|
static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
|
2011-06-02 05:46:12 +07:00
|
|
|
{
|
|
|
|
struct omap_iommu_domain *omap_domain;
|
|
|
|
|
2015-03-26 19:43:09 +07:00
|
|
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
|
|
|
return NULL;
|
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
|
2015-07-21 05:33:29 +07:00
|
|
|
if (!omap_domain)
|
2017-09-06 05:56:18 +07:00
|
|
|
return NULL;
|
2011-06-02 05:46:12 +07:00
|
|
|
|
|
|
|
spin_lock_init(&omap_domain->lock);
|
|
|
|
|
2015-03-26 19:43:09 +07:00
|
|
|
omap_domain->domain.geometry.aperture_start = 0;
|
|
|
|
omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
|
|
|
|
omap_domain->domain.geometry.force_aperture = true;
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2015-03-26 19:43:09 +07:00
|
|
|
return &omap_domain->domain;
|
2011-06-02 05:46:12 +07:00
|
|
|
}
|
|
|
|
|
2015-03-26 19:43:09 +07:00
|
|
|
static void omap_iommu_domain_free(struct iommu_domain *domain)
|
2011-06-02 05:46:12 +07:00
|
|
|
{
|
2015-03-26 19:43:09 +07:00
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
2011-06-02 05:46:12 +07:00
|
|
|
|
2012-04-19 01:09:41 +07:00
|
|
|
/*
|
|
|
|
* An iommu device is still attached
|
|
|
|
* (currently, only one device can be attached) ?
|
|
|
|
*/
|
2017-09-06 05:56:17 +07:00
|
|
|
if (omap_domain->dev)
|
2012-04-19 01:09:41 +07:00
|
|
|
_omap_iommu_detach_dev(omap_domain, omap_domain->dev);
|
|
|
|
|
2011-06-02 05:46:12 +07:00
|
|
|
kfree(omap_domain);
|
|
|
|
}
|
|
|
|
|
|
|
|
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
|
2015-07-21 05:33:32 +07:00
|
|
|
dma_addr_t da)
|
2011-06-02 05:46:12 +07:00
|
|
|
{
|
2015-03-26 19:43:09 +07:00
|
|
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
2017-09-06 05:56:18 +07:00
|
|
|
struct omap_iommu_device *iommu = omap_domain->iommus;
|
|
|
|
struct omap_iommu *oiommu = iommu->iommu_dev;
|
2011-06-02 05:46:12 +07:00
|
|
|
struct device *dev = oiommu->dev;
|
|
|
|
u32 *pgd, *pte;
|
|
|
|
phys_addr_t ret = 0;
|
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
/*
|
|
|
|
* all the iommus within the domain will have identical programming,
|
|
|
|
* so perform the lookup using just the first iommu
|
|
|
|
*/
|
2011-06-02 05:46:12 +07:00
|
|
|
iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
|
|
|
|
|
|
|
|
if (pte) {
|
|
|
|
if (iopte_is_small(*pte))
|
|
|
|
ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
|
|
|
|
else if (iopte_is_large(*pte))
|
|
|
|
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
|
|
|
|
else
|
2013-05-31 06:10:38 +07:00
|
|
|
dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
|
2015-07-21 05:33:32 +07:00
|
|
|
(unsigned long long)da);
|
2011-06-02 05:46:12 +07:00
|
|
|
} else {
|
|
|
|
if (iopgd_is_section(*pgd))
|
|
|
|
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
|
|
|
|
else if (iopgd_is_super(*pgd))
|
|
|
|
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
|
|
|
|
else
|
2013-05-31 06:10:38 +07:00
|
|
|
dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
|
2015-07-21 05:33:32 +07:00
|
|
|
(unsigned long long)da);
|
2011-06-02 05:46:12 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-08-07 15:26:51 +07:00
|
|
|
static int _omap_iommu_add_device(struct device *dev)
|
2014-03-01 03:42:38 +07:00
|
|
|
{
|
2017-09-06 05:56:18 +07:00
|
|
|
struct omap_iommu_arch_data *arch_data, *tmp;
|
2017-04-12 12:21:29 +07:00
|
|
|
struct omap_iommu *oiommu;
|
2017-04-12 12:21:31 +07:00
|
|
|
struct iommu_group *group;
|
2014-03-01 03:42:38 +07:00
|
|
|
struct device_node *np;
|
2014-09-05 05:27:30 +07:00
|
|
|
struct platform_device *pdev;
|
2017-09-06 05:56:18 +07:00
|
|
|
int num_iommus, i;
|
2017-04-12 12:21:30 +07:00
|
|
|
int ret;
|
2019-08-07 15:26:51 +07:00
|
|
|
struct orphan_dev *orphan_dev;
|
|
|
|
unsigned long flags;
|
2014-03-01 03:42:38 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate the archdata iommu structure for DT-based devices.
|
|
|
|
*
|
|
|
|
* TODO: Simplify this when removing non-DT support completely from the
|
|
|
|
* IOMMU users.
|
|
|
|
*/
|
|
|
|
if (!dev->of_node)
|
|
|
|
return 0;
|
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
/*
|
|
|
|
* retrieve the count of IOMMU nodes using phandle size as element size
|
|
|
|
* since #iommu-cells = 0 for OMAP
|
|
|
|
*/
|
|
|
|
num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
|
|
|
|
sizeof(phandle));
|
|
|
|
if (num_iommus < 0)
|
2014-03-01 03:42:38 +07:00
|
|
|
return 0;
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:03:40 +07:00
|
|
|
arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
|
2017-09-06 05:56:18 +07:00
|
|
|
if (!arch_data)
|
|
|
|
return -ENOMEM;
|
2014-09-05 05:27:30 +07:00
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
|
|
|
|
np = of_parse_phandle(dev->of_node, "iommus", i);
|
|
|
|
if (!np) {
|
|
|
|
kfree(arch_data);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pdev = of_find_device_by_node(np);
|
2019-08-07 15:26:51 +07:00
|
|
|
if (!pdev) {
|
2017-09-06 05:56:18 +07:00
|
|
|
of_node_put(np);
|
|
|
|
kfree(arch_data);
|
2019-08-07 15:26:51 +07:00
|
|
|
spin_lock_irqsave(&orphan_lock, flags);
|
|
|
|
list_for_each_entry(orphan_dev, &orphan_dev_list,
|
|
|
|
node) {
|
|
|
|
if (orphan_dev->dev == dev)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&orphan_lock, flags);
|
|
|
|
|
|
|
|
if (orphan_dev && orphan_dev->dev == dev)
|
|
|
|
return -EPROBE_DEFER;
|
|
|
|
|
|
|
|
orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL);
|
|
|
|
orphan_dev->dev = dev;
|
|
|
|
spin_lock_irqsave(&orphan_lock, flags);
|
|
|
|
list_add(&orphan_dev->node, &orphan_dev_list);
|
|
|
|
spin_unlock_irqrestore(&orphan_lock, flags);
|
|
|
|
return -EPROBE_DEFER;
|
2017-09-06 05:56:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
oiommu = platform_get_drvdata(pdev);
|
|
|
|
if (!oiommu) {
|
|
|
|
of_node_put(np);
|
|
|
|
kfree(arch_data);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp->iommu_dev = oiommu;
|
2019-08-07 15:26:51 +07:00
|
|
|
tmp->dev = &pdev->dev;
|
2017-04-12 12:21:29 +07:00
|
|
|
|
2014-03-01 03:42:38 +07:00
|
|
|
of_node_put(np);
|
|
|
|
}
|
|
|
|
|
2017-09-06 05:56:18 +07:00
|
|
|
/*
|
|
|
|
* use the first IOMMU alone for the sysfs device linking.
|
|
|
|
* TODO: Evaluate if a single iommu_group needs to be
|
|
|
|
* maintained for both IOMMUs
|
|
|
|
*/
|
|
|
|
oiommu = arch_data->iommu_dev;
|
2017-04-12 12:21:30 +07:00
|
|
|
ret = iommu_device_link(&oiommu->iommu, dev);
|
|
|
|
if (ret) {
|
|
|
|
kfree(arch_data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-01 03:42:38 +07:00
|
|
|
dev->archdata.iommu = arch_data;
|
|
|
|
|
2017-04-12 12:21:31 +07:00
|
|
|
/*
|
|
|
|
* IOMMU group initialization calls into omap_iommu_device_group, which
|
|
|
|
* needs a valid dev->archdata.iommu pointer
|
|
|
|
*/
|
|
|
|
group = iommu_group_get_for_dev(dev);
|
|
|
|
if (IS_ERR(group)) {
|
|
|
|
iommu_device_unlink(&oiommu->iommu, dev);
|
|
|
|
dev->archdata.iommu = NULL;
|
|
|
|
kfree(arch_data);
|
|
|
|
return PTR_ERR(group);
|
|
|
|
}
|
|
|
|
iommu_group_put(group);
|
|
|
|
|
2014-03-01 03:42:38 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-07 15:26:51 +07:00
|
|
|
static int omap_iommu_add_device(struct device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = _omap_iommu_add_device(dev);
|
|
|
|
if (ret == -EPROBE_DEFER)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-01 03:42:38 +07:00
|
|
|
static void omap_iommu_remove_device(struct device *dev)
|
|
|
|
{
|
|
|
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
|
|
|
|
|
|
|
if (!dev->of_node || !arch_data)
|
|
|
|
return;
|
|
|
|
|
2017-04-12 12:21:30 +07:00
|
|
|
iommu_device_unlink(&arch_data->iommu_dev->iommu, dev);
|
2017-04-12 12:21:31 +07:00
|
|
|
iommu_group_remove_device(dev);
|
2017-04-12 12:21:30 +07:00
|
|
|
|
2017-04-12 12:21:29 +07:00
|
|
|
dev->archdata.iommu = NULL;
|
2014-03-01 03:42:38 +07:00
|
|
|
kfree(arch_data);
|
2017-04-12 12:21:30 +07:00
|
|
|
|
2014-03-01 03:42:38 +07:00
|
|
|
}
|
|
|
|
|
2017-04-12 12:21:31 +07:00
|
|
|
static struct iommu_group *omap_iommu_device_group(struct device *dev)
|
|
|
|
{
|
|
|
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
2017-06-28 17:50:16 +07:00
|
|
|
struct iommu_group *group = ERR_PTR(-EINVAL);
|
2017-04-12 12:21:31 +07:00
|
|
|
|
|
|
|
if (arch_data->iommu_dev)
|
2018-03-01 18:22:08 +07:00
|
|
|
group = iommu_group_ref_get(arch_data->iommu_dev->group);
|
2017-04-12 12:21:31 +07:00
|
|
|
|
|
|
|
return group;
|
|
|
|
}
|
|
|
|
|
2014-06-27 14:03:12 +07:00
|
|
|
static const struct iommu_ops omap_iommu_ops = {
|
2015-03-26 19:43:09 +07:00
|
|
|
.domain_alloc = omap_iommu_domain_alloc,
|
|
|
|
.domain_free = omap_iommu_domain_free,
|
2011-06-02 05:46:12 +07:00
|
|
|
.attach_dev = omap_iommu_attach_dev,
|
|
|
|
.detach_dev = omap_iommu_detach_dev,
|
|
|
|
.map = omap_iommu_map,
|
|
|
|
.unmap = omap_iommu_unmap,
|
|
|
|
.iova_to_phys = omap_iommu_iova_to_phys,
|
2014-03-01 03:42:38 +07:00
|
|
|
.add_device = omap_iommu_add_device,
|
|
|
|
.remove_device = omap_iommu_remove_device,
|
2017-04-12 12:21:31 +07:00
|
|
|
.device_group = omap_iommu_device_group,
|
2011-11-10 16:32:27 +07:00
|
|
|
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
|
2011-06-02 05:46:12 +07:00
|
|
|
};
|
|
|
|
|
2009-01-26 20:13:40 +07:00
|
|
|
static int __init omap_iommu_init(void)
|
|
|
|
{
|
|
|
|
struct kmem_cache *p;
|
2019-08-17 05:58:37 +07:00
|
|
|
const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
|
2009-01-26 20:13:40 +07:00
|
|
|
size_t align = 1 << 10; /* L2 pagetable alignement */
|
2015-02-06 17:44:06 +07:00
|
|
|
struct device_node *np;
|
2017-04-12 12:21:26 +07:00
|
|
|
int ret;
|
2015-02-06 17:44:06 +07:00
|
|
|
|
|
|
|
np = of_find_matching_node(NULL, omap_iommu_of_match);
|
|
|
|
if (!np)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
of_node_put(np);
|
2009-01-26 20:13:40 +07:00
|
|
|
|
|
|
|
p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
|
2017-07-29 03:49:14 +07:00
|
|
|
NULL);
|
2009-01-26 20:13:40 +07:00
|
|
|
if (!p)
|
|
|
|
return -ENOMEM;
|
|
|
|
iopte_cachep = p;
|
|
|
|
|
2014-10-23 05:22:30 +07:00
|
|
|
omap_iommu_debugfs_init();
|
|
|
|
|
2017-04-12 12:21:26 +07:00
|
|
|
ret = platform_driver_register(&omap_iommu_driver);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("%s: failed to register driver\n", __func__);
|
|
|
|
goto fail_driver;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
|
|
|
|
if (ret)
|
|
|
|
goto fail_bus;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_bus:
|
|
|
|
platform_driver_unregister(&omap_iommu_driver);
|
|
|
|
fail_driver:
|
|
|
|
kmem_cache_destroy(iopte_cachep);
|
|
|
|
return ret;
|
2009-01-26 20:13:40 +07:00
|
|
|
}
|
2012-02-26 17:14:14 +07:00
|
|
|
subsys_initcall(omap_iommu_init);
|
2015-07-21 05:33:24 +07:00
|
|
|
/* must be ready before omap3isp is probed */
|