mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 23:31:02 +07:00
4e598fad22
Local memory objects are similar to our usual scatterlist, but instead of using the struct page stored therein, we need to use the sg->dma_address. Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20200103204137.2131004-1-chris@chris-wilson.co.uk
152 lines
4.3 KiB
C
152 lines
4.3 KiB
C
/*
|
|
* Copyright © 2014 Intel Corporation
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the next
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
* Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
* IN THE SOFTWARE.
|
|
*
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/io-mapping.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include "i915_drv.h"
|
|
|
|
struct remap_pfn {
|
|
struct mm_struct *mm;
|
|
unsigned long pfn;
|
|
pgprot_t prot;
|
|
|
|
struct sgt_iter sgt;
|
|
resource_size_t iobase;
|
|
};
|
|
|
|
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
|
|
{
|
|
struct remap_pfn *r = data;
|
|
|
|
/* Special PTE are not associated with any struct page */
|
|
set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
|
|
r->pfn++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define use_dma(io) ((io) != -1)
|
|
|
|
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
|
|
{
|
|
if (use_dma(r->iobase))
|
|
return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
|
|
else
|
|
return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
|
|
}
|
|
|
|
static int remap_sg(pte_t *pte, unsigned long addr, void *data)
|
|
{
|
|
struct remap_pfn *r = data;
|
|
|
|
if (GEM_WARN_ON(!r->sgt.pfn))
|
|
return -EINVAL;
|
|
|
|
/* Special PTE are not associated with any struct page */
|
|
set_pte_at(r->mm, addr, pte,
|
|
pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
|
|
r->pfn++; /* track insertions in case we need to unwind later */
|
|
|
|
r->sgt.curr += PAGE_SIZE;
|
|
if (r->sgt.curr >= r->sgt.max)
|
|
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* remap_io_mapping - remap an IO mapping to userspace
|
|
* @vma: user vma to map to
|
|
* @addr: target user address to start at
|
|
* @pfn: physical address of kernel memory
|
|
* @size: size of map area
|
|
* @iomap: the source io_mapping
|
|
*
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
*/
|
|
int remap_io_mapping(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long pfn, unsigned long size,
|
|
struct io_mapping *iomap)
|
|
{
|
|
struct remap_pfn r;
|
|
int err;
|
|
|
|
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
|
|
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
|
|
|
|
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
|
|
r.mm = vma->vm_mm;
|
|
r.pfn = pfn;
|
|
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
|
|
(pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
|
|
|
|
err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
|
|
if (unlikely(err)) {
|
|
zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* remap_io_sg - remap an IO mapping to userspace
|
|
* @vma: user vma to map to
|
|
* @addr: target user address to start at
|
|
* @size: size of map area
|
|
* @sgl: Start sg entry
|
|
* @iobase: Use stored dma address offset by this address or pfn if -1
|
|
*
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
*/
|
|
int remap_io_sg(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long size,
|
|
struct scatterlist *sgl, resource_size_t iobase)
|
|
{
|
|
struct remap_pfn r = {
|
|
.mm = vma->vm_mm,
|
|
.prot = vma->vm_page_prot,
|
|
.sgt = __sgt_iter(sgl, use_dma(iobase)),
|
|
.iobase = iobase,
|
|
};
|
|
int err;
|
|
|
|
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
|
|
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
|
|
|
|
if (!use_dma(iobase))
|
|
flush_cache_range(vma, addr, size);
|
|
|
|
err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
|
|
if (unlikely(err)) {
|
|
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|