linux_dsm_epyc7002/drivers/gpu/drm/panfrost/panfrost_mmu.c
Linus Torvalds 289991ce1c drm fixes for 5.4-rc1
core:
 - Some cleanups and fixes in the self-refresh helpers
 - Some cleanups and fixes in the atomic helpers
 
 amdgpu:
 - Fix a 64 bit divide
 - Prevent a memory leak in a failure case in dc
 - Load proper gfx firmware on navi14 variants
 - Add more navi12 and navi14 PCI ids
 - Misc fixes for renoir
 - Fix bandwidth issues with multiple displays on vega20
 - Support for Dali
 - Fix a possible oops with KFD on hawaii
 - Fix for backlight level after resume on some APUs
 - Other misc fixes
 
 panfrost:
 - Multiple panfrost fixes for regulator support and page fault handling
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJdjZr6AAoJEAx081l5xIa+nboP+gPWzx45Q3IsbnaZmcdFTFEf
 +/XgScoFcv5Uhd3aXtrSYDvPnSyNXpGsV5ccE/FtxNd4G75n20tPFxGNhjzyXfdc
 B2x1IRgc82W1KxYwwDlmd+f/86h6uthFkh1ToKN3hsHWNm8Wu8AgoJnoWvqwluf9
 natSFnQPQIvcADpbpyk8FiNcXvMg0qrKQ8aj3uPxqUs4/ftigzez+5vYJOkktoEJ
 NFtlouVvIZejVo6l4Q5ebXXsol7On02iHUszpdJtb5FxMcBQwAafewCGln2622cA
 8ooWmekZNtoHUH3WmUlrs7TqPKtoOqIEkMO8UvCJDwBB4/ft8sJRPDKFgk547E/8
 Htv6MZXCSOT+/XxebM/wHijOg3MQVjPzO9s73YSmkytzGZVQ/Fgohl/6W+bN/xAm
 j/huS5ZozengAldFJHG4wruSk820Vzx736x2pk+9sbpf7PdFDIpuZus8U8wHc411
 hu3S2397IxyX4XswLg8BTaIOhCXwT7CluQ9mYD1THPgRzG5YPha8JelTcwwlVsD9
 2Cw6mCUAqydHHMboWQnEhRXhuhVfGlPAdJTsdyoI6zdXYqU/ThihJPBgG0wSq0y0
 fAsj/9NRqSzg6hk9vm1QdCeOthKOuAZ0PgLcVHI1RNSwEyrN8yOupVwe7+Mn+q2z
 UNbfr2qXGqKxn6rqUy2W
 =yCyF
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2019-09-27' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Fixes built up over the past 1.5 weeks or so, it's two weeks of
  amdgpu, some core cleanups and some panfrost fixes. I also finally
  figured out why my desktop was slow to do a bunch of stuff (someone
  gave it an IPv6 address which can't reach anything!).

  core:
   - Some cleanups and fixes in the self-refresh helpers
   - Some cleanups and fixes in the atomic helpers

  amdgpu:
   - Fix a 64 bit divide
   - Prevent a memory leak in a failure case in dc
   - Load proper gfx firmware on navi14 variants
   - Add more navi12 and navi14 PCI ids
   - Misc fixes for renoir
   - Fix bandwidth issues with multiple displays on vega20
   - Support for Dali
   - Fix a possible oops with KFD on hawaii
   - Fix for backlight level after resume on some APUs
   - Other misc fixes

  panfrost:
   - Multiple panfrost fixes for regulator support and page fault
     handling"

* tag 'drm-next-2019-09-27' of git://anongit.freedesktop.org/drm/drm: (34 commits)
  drm/amd/display: prevent memory leak
  drm/amdgpu/gfx10: add support for wks firmware loading
  drm/amdgpu/display: include slab.h in dcn21_resource.c
  drm/amdgpu/display: fix 64 bit divide
  drm/panfrost: Prevent race when handling page fault
  drm/panfrost: Remove NULL checks for regulator
  drm/panfrost: Fix regulator_get_optional() misuse
  drm: Measure Self Refresh Entry/Exit times to avoid thrashing
  drm: Fix kerneldoc and remove unused struct member in self_refresh helper
  drm/atomic: Rename crtc_state->pageflip_flags to async_flip
  drm/atomic: Reject FLIP_ASYNC unconditionally
  drm/atomic: Take the atomic toys away from X
  drm/amdgpu: flag navi12 and 14 as experimental for 5.4
  drm/kms: Duct-tape for mode object lifetime checks
  drm/amdgpu: add navi12 pci id
  drm/amdgpu: add navi14 PCI ID for work station SKU
  drm/amdkfd: Swap trap temporary registers in gfx10 trap handler
  drm/amd/powerplay: implement sysfs for getting dpm clock
  drm/amd/display: Restore backlight brightness after system resume
  drm/amd/display: Implement voltage limitation for dali
  ...
2019-09-27 11:13:35 -07:00

650 lines
16 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/shmem_fs.h>
#include <linux/sizes.h>
#include "panfrost_device.h"
#include "panfrost_mmu.h"
#include "panfrost_gem.h"
#include "panfrost_features.h"
#include "panfrost_regs.h"
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
u32 val;
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending. */
ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
if (ret)
dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
return ret;
}
static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
{
int status;
/* write AS_COMMAND when MMU is ready to accept another command */
status = wait_ready(pfdev, as_nr);
if (!status)
mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
return status;
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
u64 iova, size_t size)
{
u8 region_width;
u64 region = iova & PAGE_MASK;
/*
* fls returns:
* 1 .. 32
*
* 10 + fls(num_pages)
* results in the range (11 .. 42)
*/
size = round_up(size, PAGE_SIZE);
region_width = 10 + fls(size >> PAGE_SHIFT);
if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
/* not pow2, so must go up to the next pow2 */
region_width += 1;
}
region |= region_width;
/* Lock the region that needs to be updated */
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
}
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
u64 iova, size_t size, u32 op)
{
if (as_nr < 0)
return 0;
if (op != AS_COMMAND_UNLOCK)
lock_region(pfdev, as_nr, iova, size);
/* Run the MMU operation */
write_cmd(pfdev, as_nr, op);
/* Wait for the flush to complete */
return wait_ready(pfdev, as_nr);
}
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
u64 iova, size_t size, u32 op)
{
int ret;
spin_lock(&pfdev->as_lock);
ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
spin_unlock(&pfdev->as_lock);
return ret;
}
static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as_nr = mmu->as;
struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
/* Need to revisit mem attrs.
* NC is the default, Mali driver is inner WT.
*/
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
spin_lock(&pfdev->as_lock);
as = mmu->as;
if (as >= 0) {
int en = atomic_inc_return(&mmu->as_count);
WARN_ON(en >= NUM_JOB_SLOTS);
list_move(&mmu->list, &pfdev->as_lru_list);
goto out;
}
/* Check for a free AS */
as = ffz(pfdev->as_alloc_mask);
if (!(BIT(as) & pfdev->features.as_present)) {
struct panfrost_mmu *lru_mmu;
list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
if (!atomic_read(&lru_mmu->as_count))
break;
}
WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
list_del_init(&lru_mmu->list);
as = lru_mmu->as;
WARN_ON(as < 0);
lru_mmu->as = -1;
}
/* Assign the free or reclaimed AS to the FD */
mmu->as = as;
set_bit(as, &pfdev->as_alloc_mask);
atomic_set(&mmu->as_count, 1);
list_add(&mmu->list, &pfdev->as_lru_list);
dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
panfrost_mmu_enable(pfdev, mmu);
out:
spin_unlock(&pfdev->as_lock);
return as;
}
void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
atomic_dec(&mmu->as_count);
WARN_ON(atomic_read(&mmu->as_count) < 0);
}
void panfrost_mmu_reset(struct panfrost_device *pfdev)
{
struct panfrost_mmu *mmu, *mmu_tmp;
spin_lock(&pfdev->as_lock);
pfdev->as_alloc_mask = 0;
list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
mmu->as = -1;
atomic_set(&mmu->as_count, 0);
list_del_init(&mmu->list);
}
spin_unlock(&pfdev->as_lock);
mmu_write(pfdev, MMU_INT_CLEAR, ~0);
mmu_write(pfdev, MMU_INT_MASK, ~0);
}
static size_t get_pgsize(u64 addr, size_t size)
{
if (addr & (SZ_2M - 1) || size < SZ_2M)
return SZ_4K;
return SZ_2M;
}
void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
u64 iova, size_t size)
{
if (mmu->as < 0)
return;
pm_runtime_get_noresume(pfdev->dev);
/* Flush the PTs only if we're already awake */
if (pm_runtime_active(pfdev->dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
pm_runtime_put_sync_autosuspend(pfdev->dev);
}
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
u64 iova, int prot, struct sg_table *sgt)
{
unsigned int count;
struct scatterlist *sgl;
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
u64 start_iova = iova;
for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
unsigned long paddr = sg_dma_address(sgl);
size_t len = sg_dma_len(sgl);
dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
while (len) {
size_t pgsize = get_pgsize(iova | paddr, len);
ops->map(ops, iova, paddr, pgsize, prot);
iova += pgsize;
paddr += pgsize;
len -= pgsize;
}
}
panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
return 0;
}
int panfrost_mmu_map(struct panfrost_gem_object *bo)
{
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(bo->is_mapped))
return 0;
if (bo->noexec)
prot |= IOMMU_NOEXEC;
sgt = drm_gem_shmem_get_pages_sgt(obj);
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
bo->is_mapped = true;
return 0;
}
void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
{
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
u64 iova = bo->node.start << PAGE_SHIFT;
size_t len = bo->node.size << PAGE_SHIFT;
size_t unmapped_len = 0;
if (WARN_ON(!bo->is_mapped))
return;
dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
while (unmapped_len < len) {
size_t unmapped_page;
size_t pgsize = get_pgsize(iova, len - unmapped_len);
if (ops->iova_to_phys(ops, iova)) {
unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
WARN_ON(unmapped_page != pgsize);
}
iova += pgsize;
unmapped_len += pgsize;
}
panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
bo->is_mapped = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
{}
static void mmu_tlb_sync_context(void *cookie)
{
//struct panfrost_device *pfdev = cookie;
// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
}
static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
void *cookie)
{
mmu_tlb_sync_context(cookie);
}
static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
void *cookie)
{
mmu_tlb_sync_context(cookie);
}
static const struct iommu_flush_ops mmu_tlb_ops = {
.tlb_flush_all = mmu_tlb_inv_context_s1,
.tlb_flush_walk = mmu_tlb_flush_walk,
.tlb_flush_leaf = mmu_tlb_flush_leaf,
};
int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
{
struct panfrost_mmu *mmu = &priv->mmu;
struct panfrost_device *pfdev = priv->pfdev;
INIT_LIST_HEAD(&mmu->list);
mmu->as = -1;
mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K | SZ_2M,
.ias = FIELD_GET(0xff, pfdev->features.mmu_features),
.oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
priv);
if (!mmu->pgtbl_ops)
return -EINVAL;
return 0;
}
void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
{
struct panfrost_device *pfdev = priv->pfdev;
struct panfrost_mmu *mmu = &priv->mmu;
spin_lock(&pfdev->as_lock);
if (mmu->as >= 0) {
pm_runtime_get_noresume(pfdev->dev);
if (pm_runtime_active(pfdev->dev))
panfrost_mmu_disable(pfdev, mmu->as);
pm_runtime_put_autosuspend(pfdev->dev);
clear_bit(mmu->as, &pfdev->as_alloc_mask);
clear_bit(mmu->as, &pfdev->as_in_use_mask);
list_del(&mmu->list);
}
spin_unlock(&pfdev->as_lock);
free_io_pgtable_ops(mmu->pgtbl_ops);
}
static struct panfrost_gem_object *
addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
{
struct panfrost_gem_object *bo = NULL;
struct panfrost_file_priv *priv;
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
struct panfrost_mmu *mmu;
spin_lock(&pfdev->as_lock);
list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
if (as == mmu->as)
break;
}
if (as != mmu->as)
goto out;
priv = container_of(mmu, struct panfrost_file_priv, mmu);
spin_lock(&priv->mm_lock);
drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start &&
offset < (node->start + node->size)) {
bo = drm_mm_node_to_panfrost_bo(node);
drm_gem_object_get(&bo->base.base);
break;
}
}
spin_unlock(&priv->mm_lock);
out:
spin_unlock(&pfdev->as_lock);
return bo;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
{
int ret, i;
struct panfrost_gem_object *bo;
struct address_space *mapping;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
bo = addr_to_drm_mm_node(pfdev, as, addr);
if (!bo)
return -ENOENT;
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
bo->node.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
}
WARN_ON(bo->mmu->as != as);
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
page_offset -= bo->node.start;
mutex_lock(&bo->base.pages_lock);
if (!bo->base.pages) {
bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts) {
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM;
goto err_bo;
}
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
if (!pages) {
kfree(bo->sgts);
bo->sgts = NULL;
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM;
goto err_bo;
}
bo->base.pages = pages;
bo->base.pages_use_count = 1;
} else
pages = bo->base.pages;
mapping = bo->base.base.filp->f_mapping;
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]);
goto err_pages;
}
}
mutex_unlock(&bo->base.pages_lock);
sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
goto err_pages;
if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
ret = -EINVAL;
goto err_map;
}
mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
bo->is_mapped = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
drm_gem_object_put_unlocked(&bo->base.base);
return 0;
err_map:
sg_free_table(sgt);
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_bo:
drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
static const char *access_type_name(struct panfrost_device *pfdev,
u32 fault_status)
{
switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
return "ATOMIC";
else
return "UNKNOWN";
case AS_FAULTSTATUS_ACCESS_TYPE_READ:
return "READ";
case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
return "WRITE";
case AS_FAULTSTATUS_ACCESS_TYPE_EX:
return "EXECUTE";
default:
WARN_ON(1);
return NULL;
}
}
static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
if (!mmu_read(pfdev, MMU_INT_STAT))
return IRQ_NONE;
mmu_write(pfdev, MMU_INT_MASK, 0);
return IRQ_WAKE_THREAD;
}
static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
int i, ret;
for (i = 0; status; i++) {
u32 mask = BIT(i) | BIT(i + 16);
u64 addr;
u32 fault_status;
u32 exception_type;
u32 access_type;
u32 source_id;
if (!(status & mask))
continue;
fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
/* decode the fault status */
exception_type = fault_status & 0xFF;
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
/* Page fault only */
if ((status & mask) == BIT(i)) {
WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
if (!ret) {
mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
status &= ~mask;
continue;
}
}
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
"decoded fault status: %s\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
i, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
exception_type, panfrost_exception_name(pfdev, exception_type),
access_type, access_type_name(pfdev, fault_status),
source_id);
mmu_write(pfdev, MMU_INT_CLEAR, mask);
status &= ~mask;
}
mmu_write(pfdev, MMU_INT_MASK, ~0);
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{
int err, irq;
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
if (irq <= 0)
return -ENODEV;
err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
IRQF_SHARED, "mmu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
return err;
}
return 0;
}
void panfrost_mmu_fini(struct panfrost_device *pfdev)
{
mmu_write(pfdev, MMU_INT_MASK, 0);
}