mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
ac39cf8cb8
FILE_MAPPED per memcg of migrated file cache is not properly updated, because our hook in page_add_file_rmap() can't know to which memcg FILE_MAPPED should be counted. Basically, this patch is for fixing the bug but includes some big changes to fix up other messes. Now, at migrating mapped file, events happen in following sequence. 1. allocate a new page. 2. get memcg of an old page. 3. charge ageinst a new page before migration. But at this point, no changes to new page's page_cgroup, no commit for the charge. (IOW, PCG_USED bit is not set.) 4. page migration replaces radix-tree, old-page and new-page. 5. page migration remaps the new page if the old page was mapped. 6. Here, the new page is unlocked. 7. memcg commits the charge for newpage, Mark the new page's page_cgroup as PCG_USED. Because "commit" happens after page-remap, we can count FILE_MAPPED at "5", because we should avoid to trust page_cgroup->mem_cgroup. if PCG_USED bit is unset. (Note: memcg's LRU removal code does that but LRU-isolation logic is used for helping it. When we overwrite page_cgroup->mem_cgroup, page_cgroup is not on LRU or page_cgroup->mem_cgroup is NULL.) We can lose file_mapped accounting information at 5 because FILE_MAPPED is updated only when mapcount changes 0->1. So we should catch it. BTW, historically, above implemntation comes from migration-failure of anonymous page. Because we charge both of old page and new page with mapcount=0, we can't catch - the page is really freed before remap. - migration fails but it's freed before remap or .....corner cases. New migration sequence with memcg is: 1. allocate a new page. 2. mark PageCgroupMigration to the old page. 3. charge against a new page onto the old page's memcg. (here, new page's pc is marked as PageCgroupUsed.) 4. page migration replaces radix-tree, page table, etc... 5. At remapping, new page's page_cgroup is now makrked as "USED" We can catch 0->1 event and FILE_MAPPED will be properly updated. And we can catch SWAPOUT event after unlock this and freeing this page by unmap() can be caught. 7. Clear PageCgroupMigration of the old page. So, FILE_MAPPED will be correctly updated. Then, for what MIGRATION flag is ? Without it, at migration failure, we may have to charge old page again because it may be fully unmapped. "charge" means that we have to dive into memory reclaim or something complated. So, it's better to avoid charge it again. Before this patch, __commit_charge() was working for both of the old/new page and fixed up all. But this technique has some racy condtion around FILE_MAPPED and SWAPOUT etc... Now, the kernel use MIGRATION flag and don't uncharge old page until the end of migration. I hope this change will make memcg's page migration much simpler. This page migration has caused several troubles. Worth to add a flag for simplification. Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Tested-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Reported-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
165 lines
3.9 KiB
C
165 lines
3.9 KiB
C
#ifndef __LINUX_PAGE_CGROUP_H
|
|
#define __LINUX_PAGE_CGROUP_H
|
|
|
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
|
#include <linux/bit_spinlock.h>
|
|
/*
|
|
* Page Cgroup can be considered as an extended mem_map.
|
|
* A page_cgroup page is associated with every page descriptor. The
|
|
* page_cgroup helps us identify information about the cgroup
|
|
* All page cgroups are allocated at boot or memory hotplug event,
|
|
* then the page cgroup for pfn always exists.
|
|
*/
|
|
struct page_cgroup {
|
|
unsigned long flags;
|
|
struct mem_cgroup *mem_cgroup;
|
|
struct page *page;
|
|
struct list_head lru; /* per cgroup LRU list */
|
|
};
|
|
|
|
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
static inline void __init page_cgroup_init_flatmem(void)
|
|
{
|
|
}
|
|
extern void __init page_cgroup_init(void);
|
|
#else
|
|
void __init page_cgroup_init_flatmem(void);
|
|
static inline void __init page_cgroup_init(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
struct page_cgroup *lookup_page_cgroup(struct page *page);
|
|
|
|
enum {
|
|
/* flags for mem_cgroup */
|
|
PCG_LOCK, /* page cgroup is locked */
|
|
PCG_CACHE, /* charged as cache */
|
|
PCG_USED, /* this object is in use. */
|
|
PCG_ACCT_LRU, /* page has been accounted for */
|
|
PCG_FILE_MAPPED, /* page is accounted as "mapped" */
|
|
PCG_MIGRATION, /* under page migration */
|
|
};
|
|
|
|
#define TESTPCGFLAG(uname, lname) \
|
|
static inline int PageCgroup##uname(struct page_cgroup *pc) \
|
|
{ return test_bit(PCG_##lname, &pc->flags); }
|
|
|
|
#define SETPCGFLAG(uname, lname) \
|
|
static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
|
|
{ set_bit(PCG_##lname, &pc->flags); }
|
|
|
|
#define CLEARPCGFLAG(uname, lname) \
|
|
static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
|
|
{ clear_bit(PCG_##lname, &pc->flags); }
|
|
|
|
#define TESTCLEARPCGFLAG(uname, lname) \
|
|
static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
|
|
{ return test_and_clear_bit(PCG_##lname, &pc->flags); }
|
|
|
|
TESTPCGFLAG(Locked, LOCK)
|
|
|
|
/* Cache flag is set only once (at allocation) */
|
|
TESTPCGFLAG(Cache, CACHE)
|
|
CLEARPCGFLAG(Cache, CACHE)
|
|
SETPCGFLAG(Cache, CACHE)
|
|
|
|
TESTPCGFLAG(Used, USED)
|
|
CLEARPCGFLAG(Used, USED)
|
|
SETPCGFLAG(Used, USED)
|
|
|
|
SETPCGFLAG(AcctLRU, ACCT_LRU)
|
|
CLEARPCGFLAG(AcctLRU, ACCT_LRU)
|
|
TESTPCGFLAG(AcctLRU, ACCT_LRU)
|
|
TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
|
|
|
|
|
|
SETPCGFLAG(FileMapped, FILE_MAPPED)
|
|
CLEARPCGFLAG(FileMapped, FILE_MAPPED)
|
|
TESTPCGFLAG(FileMapped, FILE_MAPPED)
|
|
|
|
SETPCGFLAG(Migration, MIGRATION)
|
|
CLEARPCGFLAG(Migration, MIGRATION)
|
|
TESTPCGFLAG(Migration, MIGRATION)
|
|
|
|
static inline int page_cgroup_nid(struct page_cgroup *pc)
|
|
{
|
|
return page_to_nid(pc->page);
|
|
}
|
|
|
|
static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
|
|
{
|
|
return page_zonenum(pc->page);
|
|
}
|
|
|
|
static inline void lock_page_cgroup(struct page_cgroup *pc)
|
|
{
|
|
bit_spin_lock(PCG_LOCK, &pc->flags);
|
|
}
|
|
|
|
static inline void unlock_page_cgroup(struct page_cgroup *pc)
|
|
{
|
|
bit_spin_unlock(PCG_LOCK, &pc->flags);
|
|
}
|
|
|
|
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
|
|
struct page_cgroup;
|
|
|
|
static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
|
{
|
|
}
|
|
|
|
static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void page_cgroup_init(void)
|
|
{
|
|
}
|
|
|
|
static inline void __init page_cgroup_init_flatmem(void)
|
|
{
|
|
}
|
|
|
|
#endif
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
|
|
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
|
|
unsigned short old, unsigned short new);
|
|
extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
|
|
extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
|
|
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
|
|
extern void swap_cgroup_swapoff(int type);
|
|
#else
|
|
|
|
static inline
|
|
unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline
|
|
unsigned short lookup_swap_cgroup(swp_entry_t ent)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int
|
|
swap_cgroup_swapon(int type, unsigned long max_pages)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void swap_cgroup_swapoff(int type)
|
|
{
|
|
return;
|
|
}
|
|
|
|
#endif
|
|
#endif
|