mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 18:32:14 +07:00
5c2c2587b1
get_dev_page() enables paths like get_user_pages() to pin a dynamically mapped pfn-range (devm_memremap_pages()) while the resulting struct page objects are in use. Unlike get_page() it may fail if the device is, or is in the process of being, disabled. While the initial lookup of the range may be an expensive list walk, the result is cached to speed up subsequent lookups which are likely to be in the same mapped range. devm_memremap_pages() now requires a reference counter to be specified at init time. For pmem this means moving request_queue allocation into pmem_alloc() so the existing queue usage counter can track "device pages". ZONE_DEVICE pages always have an elevated count and will never be on an lru reclaim list. That space in 'struct page' can be redirected for other uses, but for safety introduce a poison value that will always trip __list_add() to assert. This allows half of the struct list_head storage to be reclaimed with some assurance to back up the assumption that the page count never goes to zero and a list_add() is never attempted. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Logan Gunthorpe <logang@deltatee.com> Cc: Dave Hansen <dave@sr71.net> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
115 lines
3.2 KiB
C
115 lines
3.2 KiB
C
#ifndef _LINUX_MEMREMAP_H_
|
|
#define _LINUX_MEMREMAP_H_
|
|
#include <linux/mm.h>
|
|
#include <linux/ioport.h>
|
|
#include <linux/percpu-refcount.h>
|
|
|
|
struct resource;
|
|
struct device;
|
|
|
|
/**
|
|
* struct vmem_altmap - pre-allocated storage for vmemmap_populate
|
|
* @base_pfn: base of the entire dev_pagemap mapping
|
|
* @reserve: pages mapped, but reserved for driver use (relative to @base)
|
|
* @free: free pages set aside in the mapping for memmap storage
|
|
* @align: pages reserved to meet allocation alignments
|
|
* @alloc: track pages consumed, private to vmemmap_populate()
|
|
*/
|
|
struct vmem_altmap {
|
|
const unsigned long base_pfn;
|
|
const unsigned long reserve;
|
|
unsigned long free;
|
|
unsigned long align;
|
|
unsigned long alloc;
|
|
};
|
|
|
|
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
|
|
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
|
|
|
|
#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_ZONE_DEVICE)
|
|
struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
|
|
#else
|
|
static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
|
|
{
|
|
return NULL;
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
|
|
* @altmap: pre-allocated/reserved memory for vmemmap allocations
|
|
* @res: physical address range covered by @ref
|
|
* @ref: reference count that pins the devm_memremap_pages() mapping
|
|
* @dev: host device of the mapping for debug
|
|
*/
|
|
struct dev_pagemap {
|
|
struct vmem_altmap *altmap;
|
|
const struct resource *res;
|
|
struct percpu_ref *ref;
|
|
struct device *dev;
|
|
};
|
|
|
|
#ifdef CONFIG_ZONE_DEVICE
|
|
void *devm_memremap_pages(struct device *dev, struct resource *res,
|
|
struct percpu_ref *ref, struct vmem_altmap *altmap);
|
|
struct dev_pagemap *find_dev_pagemap(resource_size_t phys);
|
|
#else
|
|
static inline void *devm_memremap_pages(struct device *dev,
|
|
struct resource *res, struct percpu_ref *ref,
|
|
struct vmem_altmap *altmap)
|
|
{
|
|
/*
|
|
* Fail attempts to call devm_memremap_pages() without
|
|
* ZONE_DEVICE support enabled, this requires callers to fall
|
|
* back to plain devm_memremap() based on config
|
|
*/
|
|
WARN_ON_ONCE(1);
|
|
return ERR_PTR(-ENXIO);
|
|
}
|
|
|
|
static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
|
|
{
|
|
return NULL;
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
|
|
* @pfn: page frame number to lookup page_map
|
|
* @pgmap: optional known pgmap that already has a reference
|
|
*
|
|
* @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
|
|
* same mapping.
|
|
*/
|
|
static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
|
|
struct dev_pagemap *pgmap)
|
|
{
|
|
const struct resource *res = pgmap ? pgmap->res : NULL;
|
|
resource_size_t phys = PFN_PHYS(pfn);
|
|
|
|
/*
|
|
* In the cached case we're already holding a live reference so
|
|
* we can simply do a blind increment
|
|
*/
|
|
if (res && phys >= res->start && phys <= res->end) {
|
|
percpu_ref_get(pgmap->ref);
|
|
return pgmap;
|
|
}
|
|
|
|
/* fall back to slow path lookup */
|
|
rcu_read_lock();
|
|
pgmap = find_dev_pagemap(phys);
|
|
if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
|
|
pgmap = NULL;
|
|
rcu_read_unlock();
|
|
|
|
return pgmap;
|
|
}
|
|
|
|
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
|
|
{
|
|
if (pgmap)
|
|
percpu_ref_put(pgmap->ref);
|
|
}
|
|
#endif /* _LINUX_MEMREMAP_H_ */
|