mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 00:50:54 +07:00
memcg: make it possible to use the stock for more than one page
We currently have a percpu stock cache scheme that charges one page at a time from memcg->res, the user counter. When the kernel memory controller comes into play, we'll need to charge more than that. This is because kernel memory allocations will also draw from the user counter, and can be bigger than a single page, as it is the case with the stack (usually 2 pages) or some higher order slabs. [glommer@parallels.com: added a changelog ] Signed-off-by: Suleiman Souhlal <suleiman@google.com> Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: JoonSoo Kim <js1304@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Cc: Suleiman Souhlal <suleiman@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c2974058a9
commit
a0956d5449
@ -2060,20 +2060,28 @@ struct memcg_stock_pcp {
|
||||
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
|
||||
static DEFINE_MUTEX(percpu_charge_mutex);
|
||||
|
||||
/*
|
||||
* Try to consume stocked charge on this cpu. If success, one page is consumed
|
||||
* from local stock and true is returned. If the stock is 0 or charges from a
|
||||
* cgroup which is not current target, returns false. This stock will be
|
||||
* refilled.
|
||||
/**
|
||||
* consume_stock: Try to consume stocked charge on this cpu.
|
||||
* @memcg: memcg to consume from.
|
||||
* @nr_pages: how many pages to charge.
|
||||
*
|
||||
* The charges will only happen if @memcg matches the current cpu's memcg
|
||||
* stock, and at least @nr_pages are available in that stock. Failure to
|
||||
* service an allocation will refill the stock.
|
||||
*
|
||||
* returns true if successful, false otherwise.
|
||||
*/
|
||||
static bool consume_stock(struct mem_cgroup *memcg)
|
||||
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
||||
{
|
||||
struct memcg_stock_pcp *stock;
|
||||
bool ret = true;
|
||||
|
||||
if (nr_pages > CHARGE_BATCH)
|
||||
return false;
|
||||
|
||||
stock = &get_cpu_var(memcg_stock);
|
||||
if (memcg == stock->cached && stock->nr_pages)
|
||||
stock->nr_pages--;
|
||||
if (memcg == stock->cached && stock->nr_pages >= nr_pages)
|
||||
stock->nr_pages -= nr_pages;
|
||||
else /* need to call res_counter_charge */
|
||||
ret = false;
|
||||
put_cpu_var(memcg_stock);
|
||||
@ -2371,7 +2379,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
||||
memcg = *ptr;
|
||||
if (mem_cgroup_is_root(memcg))
|
||||
goto done;
|
||||
if (nr_pages == 1 && consume_stock(memcg))
|
||||
if (consume_stock(memcg, nr_pages))
|
||||
goto done;
|
||||
css_get(&memcg->css);
|
||||
} else {
|
||||
@ -2396,7 +2404,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
||||
rcu_read_unlock();
|
||||
goto done;
|
||||
}
|
||||
if (nr_pages == 1 && consume_stock(memcg)) {
|
||||
if (consume_stock(memcg, nr_pages)) {
|
||||
/*
|
||||
* It seems dagerous to access memcg without css_get().
|
||||
* But considering how consume_stok works, it's not
|
||||
|
Loading…
Reference in New Issue
Block a user