mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-01 19:26:45 +07:00
62b61f611e
The previous patch enables page migration of ksm pages, but that soon gets into trouble: not surprising, since we're using the ksm page lock to lock operations on its stable_node, but page migration switches the page whose lock is to be used for that. Another layer of locking would fix it, but do we need that yet? Do we actually need page migration of ksm pages? Yes, memory hotremove needs to offline sections of memory: and since we stopped allocating ksm pages with GFP_HIGHUSER, they will tend to be GFP_HIGHUSER_MOVABLE candidates for migration. But KSM is currently unconscious of NUMA issues, happily merging pages from different NUMA nodes: at present the rule must be, not to use MADV_MERGEABLE where you care about NUMA. So no, NUMA page migration of ksm pages does not make sense yet. So, to complete support for ksm swapping we need to make hotremove safe. ksm_memory_callback() take ksm_thread_mutex when MEM_GOING_OFFLINE and release it when MEM_OFFLINE or MEM_CANCEL_OFFLINE. But if mapped pages are freed before migration reaches them, stable_nodes may be left still pointing to struct pages which have been removed from the system: the stable_node needs to identify a page by pfn rather than page pointer, then it can safely prune them when MEM_OFFLINE. And make NUMA migration skip PageKsm pages where it skips PageReserved. But it's only when we reach unmap_and_move() that the page lock is taken and we can be sure that raised pagecount has prevented a PageAnon from being upgraded: so add offlining arg to migrate_pages(), to migrate ksm page when offlining (has sufficient locking) but reject it otherwise. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Chris Wright <chrisw@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
47 lines
1.3 KiB
C
47 lines
1.3 KiB
C
#ifndef _LINUX_MIGRATE_H
|
|
#define _LINUX_MIGRATE_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/mempolicy.h>
|
|
|
|
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
#define PAGE_MIGRATION 1
|
|
|
|
extern int putback_lru_pages(struct list_head *l);
|
|
extern int migrate_page(struct address_space *,
|
|
struct page *, struct page *);
|
|
extern int migrate_pages(struct list_head *l, new_page_t x,
|
|
unsigned long private, int offlining);
|
|
|
|
extern int fail_migrate_page(struct address_space *,
|
|
struct page *, struct page *);
|
|
|
|
extern int migrate_prep(void);
|
|
extern int migrate_vmas(struct mm_struct *mm,
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
unsigned long flags);
|
|
#else
|
|
#define PAGE_MIGRATION 0
|
|
|
|
static inline int putback_lru_pages(struct list_head *l) { return 0; }
|
|
static inline int migrate_pages(struct list_head *l, new_page_t x,
|
|
unsigned long private, int offlining) { return -ENOSYS; }
|
|
|
|
static inline int migrate_prep(void) { return -ENOSYS; }
|
|
|
|
static inline int migrate_vmas(struct mm_struct *mm,
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
unsigned long flags)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
/* Possible settings for the migrate_page() method in address_operations */
|
|
#define migrate_page NULL
|
|
#define fail_migrate_page NULL
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
|
#endif /* _LINUX_MIGRATE_H */
|