mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 04:16:44 +07:00
d6d86c0a7f
Sasha Levin reported KASAN splash inside isolate_migratepages_range(). Problem is in the function __is_movable_balloon_page() which tests AS_BALLOON_MAP in page->mapping->flags. This function has no protection against anonymous pages. As result it tried to check address space flags inside struct anon_vma. Further investigation shows more problems in current implementation: * Special branch in __unmap_and_move() never works: balloon_page_movable() checks page flags and page_count. In __unmap_and_move() page is locked, reference counter is elevated, thus balloon_page_movable() always fails. As a result execution goes to the normal migration path. virtballoon_migratepage() returns MIGRATEPAGE_BALLOON_SUCCESS instead of MIGRATEPAGE_SUCCESS, move_to_new_page() thinks this is an error code and assigns newpage->mapping to NULL. Newly migrated page lose connectivity with balloon an all ability for further migration. * lru_lock erroneously required in isolate_migratepages_range() for isolation ballooned page. This function releases lru_lock periodically, this makes migration mostly impossible for some pages. * balloon_page_dequeue have a tight race with balloon_page_isolate: balloon_page_isolate could be executed in parallel with dequeue between picking page from list and locking page_lock. Race is rare because they use trylock_page() for locking. This patch fixes all of them. Instead of fake mapping with special flag this patch uses special state of page->_mapcount: PAGE_BALLOON_MAPCOUNT_VALUE = -256. Buddy allocator uses PAGE_BUDDY_MAPCOUNT_VALUE = -128 for similar purpose. Storing mark directly in struct page makes everything safer and easier. PagePrivate is used to mark pages present in page list (i.e. not isolated, like PageLRU for normal pages). It replaces special rules for reference counter and makes balloon migration similar to migration of normal pages. This flag is protected by page_lock together with link to the balloon device. Signed-off-by: Konstantin Khlebnikov <k.khlebnikov@samsung.com> Reported-by: Sasha Levin <sasha.levin@oracle.com> Link: http://lkml.kernel.org/p/53E6CEAA.9020105@oracle.com Cc: Rafael Aquini <aquini@redhat.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: <stable@vger.kernel.org> [3.8+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
121 lines
3.5 KiB
C
121 lines
3.5 KiB
C
#ifndef _LINUX_MIGRATE_H
|
|
#define _LINUX_MIGRATE_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/migrate_mode.h>
|
|
|
|
typedef struct page *new_page_t(struct page *page, unsigned long private,
|
|
int **reason);
|
|
typedef void free_page_t(struct page *page, unsigned long private);
|
|
|
|
/*
|
|
* Return values from addresss_space_operations.migratepage():
|
|
* - negative errno on page migration failure;
|
|
* - zero on page migration success;
|
|
*/
|
|
#define MIGRATEPAGE_SUCCESS 0
|
|
|
|
enum migrate_reason {
|
|
MR_COMPACTION,
|
|
MR_MEMORY_FAILURE,
|
|
MR_MEMORY_HOTPLUG,
|
|
MR_SYSCALL, /* also applies to cpusets */
|
|
MR_MEMPOLICY_MBIND,
|
|
MR_NUMA_MISPLACED,
|
|
MR_CMA
|
|
};
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
|
extern void putback_movable_pages(struct list_head *l);
|
|
extern int migrate_page(struct address_space *,
|
|
struct page *, struct page *, enum migrate_mode);
|
|
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
|
|
unsigned long private, enum migrate_mode mode, int reason);
|
|
|
|
extern int migrate_prep(void);
|
|
extern int migrate_prep_local(void);
|
|
extern int migrate_vmas(struct mm_struct *mm,
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
unsigned long flags);
|
|
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
|
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page);
|
|
extern int migrate_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page,
|
|
struct buffer_head *head, enum migrate_mode mode,
|
|
int extra_count);
|
|
#else
|
|
|
|
static inline void putback_movable_pages(struct list_head *l) {}
|
|
static inline int migrate_pages(struct list_head *l, new_page_t new,
|
|
free_page_t free, unsigned long private, enum migrate_mode mode,
|
|
int reason)
|
|
{ return -ENOSYS; }
|
|
|
|
static inline int migrate_prep(void) { return -ENOSYS; }
|
|
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
|
|
|
static inline int migrate_vmas(struct mm_struct *mm,
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
unsigned long flags)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
static inline void migrate_page_copy(struct page *newpage,
|
|
struct page *page) {}
|
|
|
|
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
extern bool pmd_trans_migrating(pmd_t pmd);
|
|
extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
|
|
extern int migrate_misplaced_page(struct page *page,
|
|
struct vm_area_struct *vma, int node);
|
|
extern bool migrate_ratelimited(int node);
|
|
#else
|
|
static inline bool pmd_trans_migrating(pmd_t pmd)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
|
|
{
|
|
}
|
|
static inline int migrate_misplaced_page(struct page *page,
|
|
struct vm_area_struct *vma, int node)
|
|
{
|
|
return -EAGAIN; /* can't migrate now */
|
|
}
|
|
static inline bool migrate_ratelimited(int node)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
pmd_t *pmd, pmd_t entry,
|
|
unsigned long address,
|
|
struct page *page, int node);
|
|
#else
|
|
static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
pmd_t *pmd, pmd_t entry,
|
|
unsigned long address,
|
|
struct page *page, int node)
|
|
{
|
|
return -EAGAIN;
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
|
|
|
|
#endif /* _LINUX_MIGRATE_H */
|