2008-10-19 10:28:16 +07:00
|
|
|
#ifndef __LINUX_PAGE_CGROUP_H
|
|
|
|
#define __LINUX_PAGE_CGROUP_H
|
|
|
|
|
2011-03-24 06:42:30 +07:00
|
|
|
enum {
|
|
|
|
/* flags for mem_cgroup */
|
|
|
|
PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
|
|
|
|
PCG_USED, /* this object is in use. */
|
|
|
|
PCG_MIGRATION, /* under page migration */
|
|
|
|
__NR_PCG_FLAGS,
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifndef __GENERATING_BOUNDS_H
|
|
|
|
#include <generated/bounds.h>
|
|
|
|
|
2012-08-01 06:43:02 +07:00
|
|
|
#ifdef CONFIG_MEMCG
|
2008-10-19 10:28:16 +07:00
|
|
|
#include <linux/bit_spinlock.h>
|
2011-03-24 06:42:30 +07:00
|
|
|
|
2008-10-19 10:28:16 +07:00
|
|
|
/*
|
|
|
|
* Page Cgroup can be considered as an extended mem_map.
|
|
|
|
* A page_cgroup page is associated with every page descriptor. The
|
|
|
|
* page_cgroup helps us identify information about the cgroup
|
|
|
|
* All page cgroups are allocated at boot or memory hotplug event,
|
|
|
|
* then the page cgroup for pfn always exists.
|
|
|
|
*/
|
|
|
|
struct page_cgroup {
|
|
|
|
unsigned long flags;
|
|
|
|
struct mem_cgroup *mem_cgroup;
|
|
|
|
};
|
|
|
|
|
2008-11-23 00:33:24 +07:00
|
|
|
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
|
2009-06-12 14:33:53 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
|
|
static inline void __init page_cgroup_init_flatmem(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
extern void __init page_cgroup_init(void);
|
|
|
|
#else
|
|
|
|
void __init page_cgroup_init_flatmem(void);
|
|
|
|
static inline void __init page_cgroup_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-10-19 10:28:16 +07:00
|
|
|
struct page_cgroup *lookup_page_cgroup(struct page *page);
|
2011-03-24 06:42:30 +07:00
|
|
|
struct page *lookup_cgroup_page(struct page_cgroup *pc);
|
2008-10-19 10:28:16 +07:00
|
|
|
|
|
|
|
#define TESTPCGFLAG(uname, lname) \
|
|
|
|
static inline int PageCgroup##uname(struct page_cgroup *pc) \
|
|
|
|
{ return test_bit(PCG_##lname, &pc->flags); }
|
|
|
|
|
|
|
|
#define SETPCGFLAG(uname, lname) \
|
|
|
|
static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
|
|
|
|
{ set_bit(PCG_##lname, &pc->flags); }
|
|
|
|
|
|
|
|
#define CLEARPCGFLAG(uname, lname) \
|
|
|
|
static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
|
|
|
|
{ clear_bit(PCG_##lname, &pc->flags); }
|
|
|
|
|
2009-09-24 05:56:32 +07:00
|
|
|
#define TESTCLEARPCGFLAG(uname, lname) \
|
|
|
|
static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
|
|
|
|
{ return test_and_clear_bit(PCG_##lname, &pc->flags); }
|
|
|
|
|
2008-10-19 10:28:16 +07:00
|
|
|
TESTPCGFLAG(Used, USED)
|
|
|
|
CLEARPCGFLAG(Used, USED)
|
2009-09-24 05:56:32 +07:00
|
|
|
SETPCGFLAG(Used, USED)
|
|
|
|
|
memcg: fix mis-accounting of file mapped racy with migration
FILE_MAPPED per memcg of migrated file cache is not properly updated,
because our hook in page_add_file_rmap() can't know to which memcg
FILE_MAPPED should be counted.
Basically, this patch is for fixing the bug but includes some big changes
to fix up other messes.
Now, at migrating mapped file, events happen in following sequence.
1. allocate a new page.
2. get memcg of an old page.
3. charge ageinst a new page before migration. But at this point,
no changes to new page's page_cgroup, no commit for the charge.
(IOW, PCG_USED bit is not set.)
4. page migration replaces radix-tree, old-page and new-page.
5. page migration remaps the new page if the old page was mapped.
6. Here, the new page is unlocked.
7. memcg commits the charge for newpage, Mark the new page's page_cgroup
as PCG_USED.
Because "commit" happens after page-remap, we can count FILE_MAPPED
at "5", because we should avoid to trust page_cgroup->mem_cgroup.
if PCG_USED bit is unset.
(Note: memcg's LRU removal code does that but LRU-isolation logic is used
for helping it. When we overwrite page_cgroup->mem_cgroup, page_cgroup is
not on LRU or page_cgroup->mem_cgroup is NULL.)
We can lose file_mapped accounting information at 5 because FILE_MAPPED
is updated only when mapcount changes 0->1. So we should catch it.
BTW, historically, above implemntation comes from migration-failure
of anonymous page. Because we charge both of old page and new page
with mapcount=0, we can't catch
- the page is really freed before remap.
- migration fails but it's freed before remap
or .....corner cases.
New migration sequence with memcg is:
1. allocate a new page.
2. mark PageCgroupMigration to the old page.
3. charge against a new page onto the old page's memcg. (here, new page's pc
is marked as PageCgroupUsed.)
4. page migration replaces radix-tree, page table, etc...
5. At remapping, new page's page_cgroup is now makrked as "USED"
We can catch 0->1 event and FILE_MAPPED will be properly updated.
And we can catch SWAPOUT event after unlock this and freeing this
page by unmap() can be caught.
7. Clear PageCgroupMigration of the old page.
So, FILE_MAPPED will be correctly updated.
Then, for what MIGRATION flag is ?
Without it, at migration failure, we may have to charge old page again
because it may be fully unmapped. "charge" means that we have to dive into
memory reclaim or something complated. So, it's better to avoid
charge it again. Before this patch, __commit_charge() was working for
both of the old/new page and fixed up all. But this technique has some
racy condtion around FILE_MAPPED and SWAPOUT etc...
Now, the kernel use MIGRATION flag and don't uncharge old page until
the end of migration.
I hope this change will make memcg's page migration much simpler. This
page migration has caused several troubles. Worth to add a flag for
simplification.
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Tested-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Reported-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-27 04:42:46 +07:00
|
|
|
SETPCGFLAG(Migration, MIGRATION)
|
|
|
|
CLEARPCGFLAG(Migration, MIGRATION)
|
|
|
|
TESTPCGFLAG(Migration, MIGRATION)
|
|
|
|
|
2008-10-19 10:28:16 +07:00
|
|
|
static inline void lock_page_cgroup(struct page_cgroup *pc)
|
|
|
|
{
|
2011-01-14 06:47:38 +07:00
|
|
|
/*
|
|
|
|
* Don't take this lock in IRQ context.
|
2012-03-22 06:34:22 +07:00
|
|
|
* This lock is for pc->mem_cgroup, USED, MIGRATION
|
2011-01-14 06:47:38 +07:00
|
|
|
*/
|
2008-10-19 10:28:16 +07:00
|
|
|
bit_spin_lock(PCG_LOCK, &pc->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void unlock_page_cgroup(struct page_cgroup *pc)
|
|
|
|
{
|
|
|
|
bit_spin_unlock(PCG_LOCK, &pc->flags);
|
|
|
|
}
|
|
|
|
|
2012-08-01 06:43:02 +07:00
|
|
|
#else /* CONFIG_MEMCG */
|
2008-10-19 10:28:16 +07:00
|
|
|
struct page_cgroup;
|
|
|
|
|
2008-11-23 00:33:24 +07:00
|
|
|
static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
2008-10-19 10:28:16 +07:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-10-23 04:15:05 +07:00
|
|
|
|
|
|
|
static inline void page_cgroup_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-06-12 14:33:53 +07:00
|
|
|
static inline void __init page_cgroup_init_flatmem(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-08-01 06:43:02 +07:00
|
|
|
#endif /* CONFIG_MEMCG */
|
2009-01-08 09:07:58 +07:00
|
|
|
|
|
|
|
#include <linux/swap.h>
|
2009-09-20 17:50:44 +07:00
|
|
|
|
2012-08-01 06:43:02 +07:00
|
|
|
#ifdef CONFIG_MEMCG_SWAP
|
2010-03-11 06:22:17 +07:00
|
|
|
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
|
|
|
|
unsigned short old, unsigned short new);
|
2009-04-03 06:57:45 +07:00
|
|
|
extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
|
2012-01-13 08:18:48 +07:00
|
|
|
extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
|
2009-01-08 09:07:58 +07:00
|
|
|
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
|
|
|
|
extern void swap_cgroup_swapoff(int type);
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline
|
2009-04-03 06:57:45 +07:00
|
|
|
unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
|
2009-01-08 09:07:58 +07:00
|
|
|
{
|
2009-04-03 06:57:45 +07:00
|
|
|
return 0;
|
2009-01-08 09:07:58 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
2012-01-13 08:18:48 +07:00
|
|
|
unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
|
2009-01-08 09:07:58 +07:00
|
|
|
{
|
2009-04-03 06:57:45 +07:00
|
|
|
return 0;
|
2009-01-08 09:07:58 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
swap_cgroup_swapon(int type, unsigned long max_pages)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void swap_cgroup_swapoff(int type)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-08-01 06:43:02 +07:00
|
|
|
#endif /* CONFIG_MEMCG_SWAP */
|
2011-03-24 06:42:30 +07:00
|
|
|
|
|
|
|
#endif /* !__GENERATING_BOUNDS_H */
|
|
|
|
|
|
|
|
#endif /* __LINUX_PAGE_CGROUP_H */
|