mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 04:45:23 +07:00
5042db43cc
HMM (heterogeneous memory management) need struct page to support migration from system main memory to device memory. Reasons for HMM and migration to device memory is explained with HMM core patch. This patch deals with device memory that is un-addressable memory (ie CPU can not access it). Hence we do not want those struct page to be manage like regular memory. That is why we extend ZONE_DEVICE to support different types of memory. A persistent memory type is define for existing user of ZONE_DEVICE and a new device un-addressable type is added for the un-addressable memory type. There is a clear separation between what is expected from each memory type and existing user of ZONE_DEVICE are un-affected by new requirement and new use of the un-addressable type. All specific code path are protect with test against the memory type. Because memory is un-addressable we use a new special swap type for when a page is migrated to device memory (this reduces the number of maximum swap file). The main two additions beside memory type to ZONE_DEVICE is two callbacks. First one, page_free() is call whenever page refcount reach 1 (which means the page is free as ZONE_DEVICE page never reach a refcount of 0). This allow device driver to manage its memory and associated struct page. The second callback page_fault() happens when there is a CPU access to an address that is back by a device page (which are un-addressable by the CPU). This callback is responsible to migrate the page back to system main memory. Device driver can not block migration back to system memory, HMM make sure that such page can not be pin into device memory. If device is in some error condition and can not migrate memory back then a CPU page fault to device memory should end with SIGBUS. [arnd@arndb.de: fix warning] Link: http://lkml.kernel.org/r/20170823133213.712917-1-arnd@arndb.de Link: http://lkml.kernel.org/r/20170817000548.32038-8-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Dan Williams <dan.j.williams@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Nellans <dnellans@nvidia.com> Cc: Evgeny Baskakov <ebaskakov@nvidia.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mark Hairgrove <mhairgrove@nvidia.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Sherry Cheung <SCheung@nvidia.com> Cc: Subhash Gutti <sgutti@nvidia.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Bob Liu <liubo95@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
371 lines
8.8 KiB
C
371 lines
8.8 KiB
C
#ifndef _LINUX_SWAPOPS_H
|
|
#define _LINUX_SWAPOPS_H
|
|
|
|
#include <linux/radix-tree.h>
|
|
#include <linux/bug.h>
|
|
|
|
/*
|
|
* swapcache pages are stored in the swapper_space radix tree. We want to
|
|
* get good packing density in that tree, so the index should be dense in
|
|
* the low-order bits.
|
|
*
|
|
* We arrange the `type' and `offset' fields so that `type' is at the seven
|
|
* high-order bits of the swp_entry_t and `offset' is right-aligned in the
|
|
* remaining bits. Although `type' itself needs only five bits, we allow for
|
|
* shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
|
|
*
|
|
* swp_entry_t's are *never* stored anywhere in their arch-dependent format.
|
|
*/
|
|
#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
|
|
(MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
|
|
#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
|
|
|
|
/*
|
|
* Store a type+offset into a swp_entry_t in an arch-independent format
|
|
*/
|
|
static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
|
|
{
|
|
swp_entry_t ret;
|
|
|
|
ret.val = (type << SWP_TYPE_SHIFT(ret)) |
|
|
(offset & SWP_OFFSET_MASK(ret));
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Extract the `type' field from a swp_entry_t. The swp_entry_t is in
|
|
* arch-independent format
|
|
*/
|
|
static inline unsigned swp_type(swp_entry_t entry)
|
|
{
|
|
return (entry.val >> SWP_TYPE_SHIFT(entry));
|
|
}
|
|
|
|
/*
|
|
* Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
|
|
* arch-independent format
|
|
*/
|
|
static inline pgoff_t swp_offset(swp_entry_t entry)
|
|
{
|
|
return entry.val & SWP_OFFSET_MASK(entry);
|
|
}
|
|
|
|
#ifdef CONFIG_MMU
|
|
/* check whether a pte points to a swap entry */
|
|
static inline int is_swap_pte(pte_t pte)
|
|
{
|
|
return !pte_none(pte) && !pte_present(pte);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Convert the arch-dependent pte representation of a swp_entry_t into an
|
|
* arch-independent swp_entry_t.
|
|
*/
|
|
static inline swp_entry_t pte_to_swp_entry(pte_t pte)
|
|
{
|
|
swp_entry_t arch_entry;
|
|
|
|
if (pte_swp_soft_dirty(pte))
|
|
pte = pte_swp_clear_soft_dirty(pte);
|
|
arch_entry = __pte_to_swp_entry(pte);
|
|
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
|
|
}
|
|
|
|
/*
|
|
* Convert the arch-independent representation of a swp_entry_t into the
|
|
* arch-dependent pte representation.
|
|
*/
|
|
static inline pte_t swp_entry_to_pte(swp_entry_t entry)
|
|
{
|
|
swp_entry_t arch_entry;
|
|
|
|
arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
|
|
return __swp_entry_to_pte(arch_entry);
|
|
}
|
|
|
|
static inline swp_entry_t radix_to_swp_entry(void *arg)
|
|
{
|
|
swp_entry_t entry;
|
|
|
|
entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
|
|
return entry;
|
|
}
|
|
|
|
static inline void *swp_to_radix_entry(swp_entry_t entry)
|
|
{
|
|
unsigned long value;
|
|
|
|
value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
|
|
return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
|
|
static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
|
|
{
|
|
return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
|
|
page_to_pfn(page));
|
|
}
|
|
|
|
static inline bool is_device_private_entry(swp_entry_t entry)
|
|
{
|
|
int type = swp_type(entry);
|
|
return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
|
|
}
|
|
|
|
static inline void make_device_private_entry_read(swp_entry_t *entry)
|
|
{
|
|
*entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
|
|
}
|
|
|
|
static inline bool is_write_device_private_entry(swp_entry_t entry)
|
|
{
|
|
return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
|
|
}
|
|
|
|
static inline struct page *device_private_entry_to_page(swp_entry_t entry)
|
|
{
|
|
return pfn_to_page(swp_offset(entry));
|
|
}
|
|
|
|
int device_private_entry_fault(struct vm_area_struct *vma,
|
|
unsigned long addr,
|
|
swp_entry_t entry,
|
|
unsigned int flags,
|
|
pmd_t *pmdp);
|
|
#else /* CONFIG_DEVICE_PRIVATE */
|
|
static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline void make_device_private_entry_read(swp_entry_t *entry)
|
|
{
|
|
}
|
|
|
|
static inline bool is_device_private_entry(swp_entry_t entry)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool is_write_device_private_entry(swp_entry_t entry)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline struct page *device_private_entry_to_page(swp_entry_t entry)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline int device_private_entry_fault(struct vm_area_struct *vma,
|
|
unsigned long addr,
|
|
swp_entry_t entry,
|
|
unsigned int flags,
|
|
pmd_t *pmdp)
|
|
{
|
|
return VM_FAULT_SIGBUS;
|
|
}
|
|
#endif /* CONFIG_DEVICE_PRIVATE */
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
static inline swp_entry_t make_migration_entry(struct page *page, int write)
|
|
{
|
|
BUG_ON(!PageLocked(compound_head(page)));
|
|
|
|
return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
|
|
page_to_pfn(page));
|
|
}
|
|
|
|
static inline int is_migration_entry(swp_entry_t entry)
|
|
{
|
|
return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
|
|
swp_type(entry) == SWP_MIGRATION_WRITE);
|
|
}
|
|
|
|
static inline int is_write_migration_entry(swp_entry_t entry)
|
|
{
|
|
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
|
|
}
|
|
|
|
static inline struct page *migration_entry_to_page(swp_entry_t entry)
|
|
{
|
|
struct page *p = pfn_to_page(swp_offset(entry));
|
|
/*
|
|
* Any use of migration entries may only occur while the
|
|
* corresponding page is locked
|
|
*/
|
|
BUG_ON(!PageLocked(compound_head(p)));
|
|
return p;
|
|
}
|
|
|
|
static inline void make_migration_entry_read(swp_entry_t *entry)
|
|
{
|
|
*entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
|
|
}
|
|
|
|
extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
|
|
spinlock_t *ptl);
|
|
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long address);
|
|
extern void migration_entry_wait_huge(struct vm_area_struct *vma,
|
|
struct mm_struct *mm, pte_t *pte);
|
|
#else
|
|
|
|
#define make_migration_entry(page, write) swp_entry(0, 0)
|
|
static inline int is_migration_entry(swp_entry_t swp)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline struct page *migration_entry_to_page(swp_entry_t entry)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
|
|
static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
|
|
spinlock_t *ptl) { }
|
|
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long address) { }
|
|
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
|
|
struct mm_struct *mm, pte_t *pte) { }
|
|
static inline int is_write_migration_entry(swp_entry_t entry)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif
|
|
|
|
struct page_vma_mapped_walk;
|
|
|
|
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
|
extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
|
struct page *page);
|
|
|
|
extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
|
|
struct page *new);
|
|
|
|
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
|
|
|
|
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
|
|
{
|
|
swp_entry_t arch_entry;
|
|
|
|
if (pmd_swp_soft_dirty(pmd))
|
|
pmd = pmd_swp_clear_soft_dirty(pmd);
|
|
arch_entry = __pmd_to_swp_entry(pmd);
|
|
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
|
|
}
|
|
|
|
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
|
|
{
|
|
swp_entry_t arch_entry;
|
|
|
|
arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
|
|
return __swp_entry_to_pmd(arch_entry);
|
|
}
|
|
|
|
static inline int is_pmd_migration_entry(pmd_t pmd)
|
|
{
|
|
return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
|
|
}
|
|
#else
|
|
static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
|
struct page *page)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
|
|
static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
|
|
struct page *new)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
|
|
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
|
|
|
|
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
|
|
{
|
|
return __pmd(0);
|
|
}
|
|
|
|
static inline int is_pmd_migration_entry(pmd_t pmd)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
|
|
extern atomic_long_t num_poisoned_pages __read_mostly;
|
|
|
|
/*
|
|
* Support for hardware poisoned pages
|
|
*/
|
|
static inline swp_entry_t make_hwpoison_entry(struct page *page)
|
|
{
|
|
BUG_ON(!PageLocked(page));
|
|
return swp_entry(SWP_HWPOISON, page_to_pfn(page));
|
|
}
|
|
|
|
static inline int is_hwpoison_entry(swp_entry_t entry)
|
|
{
|
|
return swp_type(entry) == SWP_HWPOISON;
|
|
}
|
|
|
|
static inline bool test_set_page_hwpoison(struct page *page)
|
|
{
|
|
return TestSetPageHWPoison(page);
|
|
}
|
|
|
|
static inline void num_poisoned_pages_inc(void)
|
|
{
|
|
atomic_long_inc(&num_poisoned_pages);
|
|
}
|
|
|
|
static inline void num_poisoned_pages_dec(void)
|
|
{
|
|
atomic_long_dec(&num_poisoned_pages);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline swp_entry_t make_hwpoison_entry(struct page *page)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline int is_hwpoison_entry(swp_entry_t swp)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline bool test_set_page_hwpoison(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void num_poisoned_pages_inc(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
|
|
static inline int non_swap_entry(swp_entry_t entry)
|
|
{
|
|
return swp_type(entry) >= MAX_SWAPFILES;
|
|
}
|
|
#else
|
|
static inline int non_swap_entry(swp_entry_t entry)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif /* _LINUX_SWAPOPS_H */
|