mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 09:51:00 +07:00
slab/mempolicy: always use local policy from interrupt context
slab_node() could access current->mempolicy from interrupt context. However there's a race condition during exit where the mempolicy is first freed and then the pointer zeroed. Using this from interrupts seems bogus anyways. The interrupt will interrupt a random process and therefore get a random mempolicy. Many times, this will be idle's, which noone can change. Just disable this here and always use local for slab from interrupts. I also cleaned up the callers of slab_node a bit which always passed the same argument. I believe the original mempolicy code did that in fact, so it's likely a regression. v2: send version with correct logic v3: simplify. fix typo. Reported-by: Arun Sharma <asharma@fb.com> Cc: penberg@kernel.org Cc: cl@linux.com Signed-off-by: Andi Kleen <ak@linux.intel.com> [tdmackey@twitter.com: Rework control flow based on feedback from cl@linux.com, fix logic, and cleanup current task_struct reference] Acked-by: David Rientjes <rientjes@google.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: David Mackey <tdmackey@twitter.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
8c138bc009
commit
e7b691b085
@ -215,7 +215,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
|
||||
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
|
||||
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
|
||||
const nodemask_t *mask);
|
||||
extern unsigned slab_node(struct mempolicy *policy);
|
||||
extern unsigned slab_node(void);
|
||||
|
||||
extern enum zone_type policy_zone;
|
||||
|
||||
|
@ -1602,8 +1602,14 @@ static unsigned interleave_nodes(struct mempolicy *policy)
|
||||
* task can change it's policy. The system default policy requires no
|
||||
* such protection.
|
||||
*/
|
||||
unsigned slab_node(struct mempolicy *policy)
|
||||
unsigned slab_node(void)
|
||||
{
|
||||
struct mempolicy *policy;
|
||||
|
||||
if (in_interrupt())
|
||||
return numa_node_id();
|
||||
|
||||
policy = current->mempolicy;
|
||||
if (!policy || policy->flags & MPOL_F_LOCAL)
|
||||
return numa_node_id();
|
||||
|
||||
|
@ -3310,7 +3310,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
|
||||
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
|
||||
nid_alloc = cpuset_slab_spread_node();
|
||||
else if (current->mempolicy)
|
||||
nid_alloc = slab_node(current->mempolicy);
|
||||
nid_alloc = slab_node();
|
||||
if (nid_alloc != nid_here)
|
||||
return ____cache_alloc_node(cachep, flags, nid_alloc);
|
||||
return NULL;
|
||||
@ -3342,7 +3342,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
|
||||
|
||||
retry_cpuset:
|
||||
cpuset_mems_cookie = get_mems_allowed();
|
||||
zonelist = node_zonelist(slab_node(current->mempolicy), flags);
|
||||
zonelist = node_zonelist(slab_node(), flags);
|
||||
|
||||
retry:
|
||||
/*
|
||||
|
@ -1617,7 +1617,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
|
||||
|
||||
do {
|
||||
cpuset_mems_cookie = get_mems_allowed();
|
||||
zonelist = node_zonelist(slab_node(current->mempolicy), flags);
|
||||
zonelist = node_zonelist(slab_node(), flags);
|
||||
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
|
||||
struct kmem_cache_node *n;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user