mm: memcg: push down PageSwapCache check into uncharge entry functions

Not all uncharge paths need to check if the page is swapcache, some of
them can know for sure.

Push down the check into all callsites of uncharge_common() so that the
patch that removes some of them is more obvious.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Wanpeng Li <liwp.linux@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2012-07-31 16:45:31 -07:00 committed by Linus Torvalds
parent 5d84c7766e
commit 0c59b89c81

View File

@ -2987,8 +2987,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return NULL; return NULL;
if (PageSwapCache(page)) VM_BUG_ON(PageSwapCache(page));
return NULL;
if (PageTransHuge(page)) { if (PageTransHuge(page)) {
nr_pages <<= compound_order(page); nr_pages <<= compound_order(page);
@ -3085,6 +3084,8 @@ void mem_cgroup_uncharge_page(struct page *page)
if (page_mapped(page)) if (page_mapped(page))
return; return;
VM_BUG_ON(page->mapping && !PageAnon(page)); VM_BUG_ON(page->mapping && !PageAnon(page));
if (PageSwapCache(page))
return;
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
} }
@ -3092,6 +3093,8 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
{ {
VM_BUG_ON(page_mapped(page)); VM_BUG_ON(page_mapped(page));
VM_BUG_ON(page->mapping); VM_BUG_ON(page->mapping);
if (PageSwapCache(page))
return;
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
} }
@ -3156,6 +3159,8 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
if (!swapout) /* this was a swap cache but the swap is unused ! */ if (!swapout) /* this was a swap cache but the swap is unused ! */
ctype = MEM_CGROUP_CHARGE_TYPE_DROP; ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
if (PageSwapCache(page))
return;
memcg = __mem_cgroup_uncharge_common(page, ctype, false); memcg = __mem_cgroup_uncharge_common(page, ctype, false);
/* /*
@ -3345,10 +3350,11 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
unused = oldpage; unused = oldpage;
} }
anon = PageAnon(used); anon = PageAnon(used);
__mem_cgroup_uncharge_common(unused, if (!PageSwapCache(unused))
anon ? MEM_CGROUP_CHARGE_TYPE_ANON __mem_cgroup_uncharge_common(unused,
: MEM_CGROUP_CHARGE_TYPE_CACHE, anon ? MEM_CGROUP_CHARGE_TYPE_ANON
true); : MEM_CGROUP_CHARGE_TYPE_CACHE,
true);
css_put(&memcg->css); css_put(&memcg->css);
/* /*
* We disallowed uncharge of pages under migration because mapcount * We disallowed uncharge of pages under migration because mapcount