mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 05:05:13 +07:00
b7b3c01b19
In support of device-dax growing the ability to front physically dis-contiguous ranges of memory, update devm_memremap_pages() to track multiple ranges with a single reference counter and devm instance. Convert all [devm_]memremap_pages() users to specify the number of ranges they are mapping in their 'struct dev_pagemap' instance. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: "Jérôme Glisse" <jglisse@redhat.co Cc: Andy Lutomirski <luto@kernel.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brice Goglin <Brice.Goglin@inria.fr> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hulk Robot <hulkci@huawei.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Jason Yan <yanaijie@huawei.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: Jia He <justin.he@arm.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: kernel test robot <lkp@intel.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: Will Deacon <will@kernel.org> Link: https://lkml.kernel.org/r/159643103789.4062302.18426128170217903785.stgit@dwillia2-desk3.amr.corp.intel.com Link: https://lkml.kernel.org/r/160106116293.30709.13350662794915396198.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
201 lines
4.2 KiB
C
201 lines
4.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/errno.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/memremap.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <xen/page.h>
|
|
#include <xen/xen.h>
|
|
|
|
static DEFINE_MUTEX(list_lock);
|
|
static LIST_HEAD(page_list);
|
|
static unsigned int list_count;
|
|
|
|
static int fill_list(unsigned int nr_pages)
|
|
{
|
|
struct dev_pagemap *pgmap;
|
|
struct resource *res;
|
|
void *vaddr;
|
|
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
|
|
int ret = -ENOMEM;
|
|
|
|
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
|
if (!res)
|
|
return -ENOMEM;
|
|
|
|
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
|
|
if (!pgmap)
|
|
goto err_pgmap;
|
|
|
|
pgmap->type = MEMORY_DEVICE_GENERIC;
|
|
res->name = "Xen scratch";
|
|
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
|
|
|
ret = allocate_resource(&iomem_resource, res,
|
|
alloc_pages * PAGE_SIZE, 0, -1,
|
|
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
|
|
if (ret < 0) {
|
|
pr_err("Cannot allocate new IOMEM resource\n");
|
|
goto err_resource;
|
|
}
|
|
|
|
pgmap->range = (struct range) {
|
|
.start = res->start,
|
|
.end = res->end,
|
|
};
|
|
pgmap->nr_range = 1;
|
|
pgmap->owner = res;
|
|
|
|
#ifdef CONFIG_XEN_HAVE_PVMMU
|
|
/*
|
|
* memremap will build page tables for the new memory so
|
|
* the p2m must contain invalid entries so the correct
|
|
* non-present PTEs will be written.
|
|
*
|
|
* If a failure occurs, the original (identity) p2m entries
|
|
* are not restored since this region is now known not to
|
|
* conflict with any devices.
|
|
*/
|
|
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
xen_pfn_t pfn = PFN_DOWN(res->start);
|
|
|
|
for (i = 0; i < alloc_pages; i++) {
|
|
if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
|
|
pr_warn("set_phys_to_machine() failed, no memory added\n");
|
|
ret = -ENOMEM;
|
|
goto err_memremap;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
|
|
if (IS_ERR(vaddr)) {
|
|
pr_err("Cannot remap memory range\n");
|
|
ret = PTR_ERR(vaddr);
|
|
goto err_memremap;
|
|
}
|
|
|
|
for (i = 0; i < alloc_pages; i++) {
|
|
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
|
|
|
|
BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
|
|
list_add(&pg->lru, &page_list);
|
|
list_count++;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err_memremap:
|
|
release_resource(res);
|
|
err_resource:
|
|
kfree(pgmap);
|
|
err_pgmap:
|
|
kfree(res);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* xen_alloc_unpopulated_pages - alloc unpopulated pages
|
|
* @nr_pages: Number of pages
|
|
* @pages: pages returned
|
|
* @return 0 on success, error otherwise
|
|
*/
|
|
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
|
|
{
|
|
unsigned int i;
|
|
int ret = 0;
|
|
|
|
mutex_lock(&list_lock);
|
|
if (list_count < nr_pages) {
|
|
ret = fill_list(nr_pages - list_count);
|
|
if (ret)
|
|
goto out;
|
|
}
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
struct page *pg = list_first_entry_or_null(&page_list,
|
|
struct page,
|
|
lru);
|
|
|
|
BUG_ON(!pg);
|
|
list_del(&pg->lru);
|
|
list_count--;
|
|
pages[i] = pg;
|
|
|
|
#ifdef CONFIG_XEN_HAVE_PVMMU
|
|
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
ret = xen_alloc_p2m_entry(page_to_pfn(pg));
|
|
if (ret < 0) {
|
|
unsigned int j;
|
|
|
|
for (j = 0; j <= i; j++) {
|
|
list_add(&pages[j]->lru, &page_list);
|
|
list_count++;
|
|
}
|
|
goto out;
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
out:
|
|
mutex_unlock(&list_lock);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
|
|
|
|
/**
|
|
* xen_free_unpopulated_pages - return unpopulated pages
|
|
* @nr_pages: Number of pages
|
|
* @pages: pages to return
|
|
*/
|
|
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
|
|
{
|
|
unsigned int i;
|
|
|
|
mutex_lock(&list_lock);
|
|
for (i = 0; i < nr_pages; i++) {
|
|
list_add(&pages[i]->lru, &page_list);
|
|
list_count++;
|
|
}
|
|
mutex_unlock(&list_lock);
|
|
}
|
|
EXPORT_SYMBOL(xen_free_unpopulated_pages);
|
|
|
|
#ifdef CONFIG_XEN_PV
|
|
static int __init init(void)
|
|
{
|
|
unsigned int i;
|
|
|
|
if (!xen_domain())
|
|
return -ENODEV;
|
|
|
|
if (!xen_pv_domain())
|
|
return 0;
|
|
|
|
/*
|
|
* Initialize with pages from the extra memory regions (see
|
|
* arch/x86/xen/setup.c).
|
|
*/
|
|
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
|
|
unsigned int j;
|
|
|
|
for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
|
|
struct page *pg =
|
|
pfn_to_page(xen_extra_mem[i].start_pfn + j);
|
|
|
|
list_add(&pg->lru, &page_list);
|
|
list_count++;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
subsys_initcall(init);
|
|
#endif
|