mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 21:15:44 +07:00
1764b992be
Provide a way to set the PTE within apply_page_range for discontiguous
objects in addition to the existing method of just incrementing the pfn
for a page range.
Fixes: cc662126b4
("drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET")
Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191231200356.409475-1-chris@chris-wilson.co.uk
142 lines
4.0 KiB
C
142 lines
4.0 KiB
C
/*
|
|
* Copyright © 2014 Intel Corporation
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the next
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
* Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
* IN THE SOFTWARE.
|
|
*
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/io-mapping.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include "i915_drv.h"
|
|
|
|
struct remap_pfn {
|
|
struct mm_struct *mm;
|
|
unsigned long pfn;
|
|
pgprot_t prot;
|
|
|
|
struct sgt_iter sgt;
|
|
};
|
|
|
|
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
|
|
{
|
|
struct remap_pfn *r = data;
|
|
|
|
/* Special PTE are not associated with any struct page */
|
|
set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
|
|
r->pfn++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline unsigned long sgt_pfn(const struct sgt_iter *sgt)
|
|
{
|
|
return sgt->pfn + (sgt->curr >> PAGE_SHIFT);
|
|
}
|
|
|
|
static int remap_sg_page(pte_t *pte, unsigned long addr, void *data)
|
|
{
|
|
struct remap_pfn *r = data;
|
|
|
|
if (GEM_WARN_ON(!r->sgt.pfn))
|
|
return -EINVAL;
|
|
|
|
/* Special PTE are not associated with any struct page */
|
|
set_pte_at(r->mm, addr, pte,
|
|
pte_mkspecial(pfn_pte(sgt_pfn(&r->sgt), r->prot)));
|
|
r->pfn++; /* track insertions in case we need to unwind later */
|
|
|
|
r->sgt.curr += PAGE_SIZE;
|
|
if (r->sgt.curr >= r->sgt.max)
|
|
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), false);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* remap_io_mapping - remap an IO mapping to userspace
|
|
* @vma: user vma to map to
|
|
* @addr: target user address to start at
|
|
* @pfn: physical address of kernel memory
|
|
* @size: size of map area
|
|
* @iomap: the source io_mapping
|
|
*
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
*/
|
|
int remap_io_mapping(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long pfn, unsigned long size,
|
|
struct io_mapping *iomap)
|
|
{
|
|
struct remap_pfn r;
|
|
int err;
|
|
|
|
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
|
|
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
|
|
|
|
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
|
|
r.mm = vma->vm_mm;
|
|
r.pfn = pfn;
|
|
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
|
|
(pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
|
|
|
|
err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
|
|
if (unlikely(err)) {
|
|
zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* remap_io_sg_page - remap an IO mapping to userspace
|
|
* @vma: user vma to map to
|
|
* @addr: target user address to start at
|
|
* @size: size of map area
|
|
* @sgl: Start sg entry
|
|
*
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
*/
|
|
int remap_io_sg_page(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long size,
|
|
struct scatterlist *sgl)
|
|
{
|
|
struct remap_pfn r = {
|
|
.mm = vma->vm_mm,
|
|
.prot = vma->vm_page_prot,
|
|
.sgt = __sgt_iter(sgl, false),
|
|
};
|
|
int err;
|
|
|
|
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
|
|
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
|
|
|
|
flush_cache_range(vma, addr, size);
|
|
err = apply_to_page_range(r.mm, addr, size, remap_sg_page, &r);
|
|
if (unlikely(err)) {
|
|
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|