mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 02:36:49 +07:00
e31cf2f4ca
Patch series "mm: consolidate definitions of page table accessors", v2. The low level page table accessors (pXY_index(), pXY_offset()) are duplicated across all architectures and sometimes more than once. For instance, we have 31 definition of pgd_offset() for 25 supported architectures. Most of these definitions are actually identical and typically it boils down to, e.g. static inline unsigned long pmd_index(unsigned long address) { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); } These definitions can be shared among 90% of the arches provided XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined. For architectures that really need a custom version there is always possibility to override the generic version with the usual ifdefs magic. These patches introduce include/linux/pgtable.h that replaces include/asm-generic/pgtable.h and add the definitions of the page table accessors to the new header. This patch (of 12): The linux/mm.h header includes <asm/pgtable.h> to allow inlining of the functions involving page table manipulations, e.g. pte_alloc() and pmd_alloc(). So, there is no point to explicitly include <asm/pgtable.h> in the files that include <linux/mm.h>. The include statements in such cases are remove with a simple loop: for f in $(git grep -l "include <linux/mm.h>") ; do sed -i -e '/include <asm\/pgtable.h>/ d' $f done Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
151 lines
4.3 KiB
C
151 lines
4.3 KiB
C
/*
|
|
* Copyright © 2014 Intel Corporation
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the next
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
* Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
* IN THE SOFTWARE.
|
|
*
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/io-mapping.h>
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
struct remap_pfn {
|
|
struct mm_struct *mm;
|
|
unsigned long pfn;
|
|
pgprot_t prot;
|
|
|
|
struct sgt_iter sgt;
|
|
resource_size_t iobase;
|
|
};
|
|
|
|
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
|
|
{
|
|
struct remap_pfn *r = data;
|
|
|
|
/* Special PTE are not associated with any struct page */
|
|
set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
|
|
r->pfn++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define use_dma(io) ((io) != -1)
|
|
|
|
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
|
|
{
|
|
if (use_dma(r->iobase))
|
|
return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
|
|
else
|
|
return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
|
|
}
|
|
|
|
static int remap_sg(pte_t *pte, unsigned long addr, void *data)
|
|
{
|
|
struct remap_pfn *r = data;
|
|
|
|
if (GEM_WARN_ON(!r->sgt.pfn))
|
|
return -EINVAL;
|
|
|
|
/* Special PTE are not associated with any struct page */
|
|
set_pte_at(r->mm, addr, pte,
|
|
pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
|
|
r->pfn++; /* track insertions in case we need to unwind later */
|
|
|
|
r->sgt.curr += PAGE_SIZE;
|
|
if (r->sgt.curr >= r->sgt.max)
|
|
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* remap_io_mapping - remap an IO mapping to userspace
|
|
* @vma: user vma to map to
|
|
* @addr: target user address to start at
|
|
* @pfn: physical address of kernel memory
|
|
* @size: size of map area
|
|
* @iomap: the source io_mapping
|
|
*
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
*/
|
|
int remap_io_mapping(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long pfn, unsigned long size,
|
|
struct io_mapping *iomap)
|
|
{
|
|
struct remap_pfn r;
|
|
int err;
|
|
|
|
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
|
|
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
|
|
|
|
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
|
|
r.mm = vma->vm_mm;
|
|
r.pfn = pfn;
|
|
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
|
|
(pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
|
|
|
|
err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
|
|
if (unlikely(err)) {
|
|
zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* remap_io_sg - remap an IO mapping to userspace
|
|
* @vma: user vma to map to
|
|
* @addr: target user address to start at
|
|
* @size: size of map area
|
|
* @sgl: Start sg entry
|
|
* @iobase: Use stored dma address offset by this address or pfn if -1
|
|
*
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
*/
|
|
int remap_io_sg(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long size,
|
|
struct scatterlist *sgl, resource_size_t iobase)
|
|
{
|
|
struct remap_pfn r = {
|
|
.mm = vma->vm_mm,
|
|
.prot = vma->vm_page_prot,
|
|
.sgt = __sgt_iter(sgl, use_dma(iobase)),
|
|
.iobase = iobase,
|
|
};
|
|
int err;
|
|
|
|
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
|
|
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
|
|
|
|
if (!use_dma(iobase))
|
|
flush_cache_range(vma, addr, size);
|
|
|
|
err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
|
|
if (unlikely(err)) {
|
|
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|