mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 23:40:54 +07:00
8e321fefb0
The arbitrary restriction on page counts offered by the core migrate_page_move_mapping() code results in rather suspicious looking fiddling with page reference counts in the aio_migratepage() operation. To fix this, make migrate_page_move_mapping() take an extra_count parameter that allows aio to tell the code about its own reference count on the page being migrated. While cleaning up aio_migratepage(), make it validate that the old page being passed in is actually what aio_migratepage() expects to prevent misbehaviour in the case of races. Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
136 lines
4.3 KiB
C
136 lines
4.3 KiB
C
#ifndef _LINUX_MIGRATE_H
|
|
#define _LINUX_MIGRATE_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/migrate_mode.h>
|
|
|
|
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
|
|
|
|
/*
|
|
* Return values from addresss_space_operations.migratepage():
|
|
* - negative errno on page migration failure;
|
|
* - zero on page migration success;
|
|
*
|
|
* The balloon page migration introduces this special case where a 'distinct'
|
|
* return code is used to flag a successful page migration to unmap_and_move().
|
|
* This approach is necessary because page migration can race against balloon
|
|
* deflation procedure, and for such case we could introduce a nasty page leak
|
|
* if a successfully migrated balloon page gets released concurrently with
|
|
* migration's unmap_and_move() wrap-up steps.
|
|
*/
|
|
#define MIGRATEPAGE_SUCCESS 0
|
|
#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
|
|
* sucessful migration case.
|
|
*/
|
|
enum migrate_reason {
|
|
MR_COMPACTION,
|
|
MR_MEMORY_FAILURE,
|
|
MR_MEMORY_HOTPLUG,
|
|
MR_SYSCALL, /* also applies to cpusets */
|
|
MR_MEMPOLICY_MBIND,
|
|
MR_NUMA_MISPLACED,
|
|
MR_CMA
|
|
};
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
|
extern void putback_lru_pages(struct list_head *l);
|
|
extern void putback_movable_pages(struct list_head *l);
|
|
extern int migrate_page(struct address_space *,
|
|
struct page *, struct page *, enum migrate_mode);
|
|
extern int migrate_pages(struct list_head *l, new_page_t x,
|
|
unsigned long private, enum migrate_mode mode, int reason);
|
|
|
|
extern int fail_migrate_page(struct address_space *,
|
|
struct page *, struct page *);
|
|
|
|
extern int migrate_prep(void);
|
|
extern int migrate_prep_local(void);
|
|
extern int migrate_vmas(struct mm_struct *mm,
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
unsigned long flags);
|
|
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
|
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page);
|
|
extern int migrate_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page,
|
|
struct buffer_head *head, enum migrate_mode mode,
|
|
int extra_count);
|
|
#else
|
|
|
|
static inline void putback_lru_pages(struct list_head *l) {}
|
|
static inline void putback_movable_pages(struct list_head *l) {}
|
|
static inline int migrate_pages(struct list_head *l, new_page_t x,
|
|
unsigned long private, enum migrate_mode mode, int reason)
|
|
{ return -ENOSYS; }
|
|
|
|
static inline int migrate_prep(void) { return -ENOSYS; }
|
|
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
|
|
|
static inline int migrate_vmas(struct mm_struct *mm,
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
unsigned long flags)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
static inline void migrate_page_copy(struct page *newpage,
|
|
struct page *page) {}
|
|
|
|
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
/* Possible settings for the migrate_page() method in address_operations */
|
|
#define migrate_page NULL
|
|
#define fail_migrate_page NULL
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
extern bool pmd_trans_migrating(pmd_t pmd);
|
|
extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
|
|
extern int migrate_misplaced_page(struct page *page,
|
|
struct vm_area_struct *vma, int node);
|
|
extern bool migrate_ratelimited(int node);
|
|
#else
|
|
static inline bool pmd_trans_migrating(pmd_t pmd)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
|
|
{
|
|
}
|
|
static inline int migrate_misplaced_page(struct page *page,
|
|
struct vm_area_struct *vma, int node)
|
|
{
|
|
return -EAGAIN; /* can't migrate now */
|
|
}
|
|
static inline bool migrate_ratelimited(int node)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
pmd_t *pmd, pmd_t entry,
|
|
unsigned long address,
|
|
struct page *page, int node);
|
|
#else
|
|
static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
pmd_t *pmd, pmd_t entry,
|
|
unsigned long address,
|
|
struct page *page, int node)
|
|
{
|
|
return -EAGAIN;
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
|
|
|
|
#endif /* _LINUX_MIGRATE_H */
|