mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 05:46:39 +07:00
58568d2a82
Fix allocating page cache/slab object on the unallowed node when memory spread is set by updating tasks' mems_allowed after its cpuset's mems is changed. In order to update tasks' mems_allowed in time, we must modify the code of memory policy. Because the memory policy is applied in the process's context originally. After applying this patch, one task directly manipulates anothers mems_allowed, and we use alloc_lock in the task_struct to protect mems_allowed and memory policy of the task. But in the fast path, we didn't use lock to protect them, because adding a lock may lead to performance regression. But if we don't add a lock,the task might see no nodes when changing cpuset's mems_allowed to some non-overlapping set. In order to avoid it, we set all new allowed nodes, then clear newly disallowed ones. [lee.schermerhorn@hp.com: The rework of mpol_new() to extract the adjusting of the node mask to apply cpuset and mpol flags "context" breaks set_mempolicy() and mbind() with MPOL_PREFERRED and a NULL nodemask--i.e., explicit local allocation. Fix this by adding the check for MPOL_PREFERRED and empty node mask to mpol_new_mpolicy(). Remove the now unneeded 'nodes = NULL' from mpol_new(). Note that mpol_new_mempolicy() is always called with a non-NULL 'nodes' parameter now that it has been removed from mpol_new(). Therefore, we don't need to test nodes for NULL before testing it for 'empty'. However, just to be extra paranoid, add a VM_BUG_ON() to verify this assumption.] [lee.schermerhorn@hp.com: I don't think the function name 'mpol_new_mempolicy' is descriptive enough to differentiate it from mpol_new(). This function applies cpuset set context, usually constraining nodes to those allowed by the cpuset. However, when the 'RELATIVE_NODES flag is set, it also translates the nodes. So I settled on 'mpol_set_nodemask()', because the comment block for mpol_new() mentions that we need to call this function to "set nodes". Some additional minor line length, whitespace and typo cleanup.] Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Paul Menage <menage@google.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
199 lines
4.6 KiB
C
199 lines
4.6 KiB
C
#ifndef _LINUX_CPUSET_H
|
|
#define _LINUX_CPUSET_H
|
|
/*
|
|
* cpuset interface
|
|
*
|
|
* Copyright (C) 2003 BULL SA
|
|
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
|
|
*
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/cgroup.h>
|
|
#include <linux/mm.h>
|
|
|
|
#ifdef CONFIG_CPUSETS
|
|
|
|
extern int number_of_cpusets; /* How many cpusets are defined in system? */
|
|
|
|
extern int cpuset_init(void);
|
|
extern void cpuset_init_smp(void);
|
|
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
|
|
extern void cpuset_cpus_allowed_locked(struct task_struct *p,
|
|
struct cpumask *mask);
|
|
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
|
|
#define cpuset_current_mems_allowed (current->mems_allowed)
|
|
void cpuset_init_current_mems_allowed(void);
|
|
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
|
|
|
|
extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
|
|
extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
|
|
|
|
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
|
|
{
|
|
return number_of_cpusets <= 1 ||
|
|
__cpuset_node_allowed_softwall(node, gfp_mask);
|
|
}
|
|
|
|
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
|
|
{
|
|
return number_of_cpusets <= 1 ||
|
|
__cpuset_node_allowed_hardwall(node, gfp_mask);
|
|
}
|
|
|
|
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
|
|
{
|
|
return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
|
|
}
|
|
|
|
static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
|
|
{
|
|
return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
|
|
}
|
|
|
|
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
|
|
const struct task_struct *tsk2);
|
|
|
|
#define cpuset_memory_pressure_bump() \
|
|
do { \
|
|
if (cpuset_memory_pressure_enabled) \
|
|
__cpuset_memory_pressure_bump(); \
|
|
} while (0)
|
|
extern int cpuset_memory_pressure_enabled;
|
|
extern void __cpuset_memory_pressure_bump(void);
|
|
|
|
extern const struct file_operations proc_cpuset_operations;
|
|
struct seq_file;
|
|
extern void cpuset_task_status_allowed(struct seq_file *m,
|
|
struct task_struct *task);
|
|
|
|
extern void cpuset_lock(void);
|
|
extern void cpuset_unlock(void);
|
|
|
|
extern int cpuset_mem_spread_node(void);
|
|
|
|
static inline int cpuset_do_page_mem_spread(void)
|
|
{
|
|
return current->flags & PF_SPREAD_PAGE;
|
|
}
|
|
|
|
static inline int cpuset_do_slab_mem_spread(void)
|
|
{
|
|
return current->flags & PF_SPREAD_SLAB;
|
|
}
|
|
|
|
extern int current_cpuset_is_being_rebound(void);
|
|
|
|
extern void rebuild_sched_domains(void);
|
|
|
|
extern void cpuset_print_task_mems_allowed(struct task_struct *p);
|
|
|
|
static inline void set_mems_allowed(nodemask_t nodemask)
|
|
{
|
|
current->mems_allowed = nodemask;
|
|
}
|
|
|
|
#else /* !CONFIG_CPUSETS */
|
|
|
|
static inline int cpuset_init(void) { return 0; }
|
|
static inline void cpuset_init_smp(void) {}
|
|
|
|
static inline void cpuset_cpus_allowed(struct task_struct *p,
|
|
struct cpumask *mask)
|
|
{
|
|
cpumask_copy(mask, cpu_possible_mask);
|
|
}
|
|
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
|
|
struct cpumask *mask)
|
|
{
|
|
cpumask_copy(mask, cpu_possible_mask);
|
|
}
|
|
|
|
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
|
|
{
|
|
return node_possible_map;
|
|
}
|
|
|
|
#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
|
|
static inline void cpuset_init_current_mems_allowed(void) {}
|
|
|
|
static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
|
|
const struct task_struct *tsk2)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline void cpuset_memory_pressure_bump(void) {}
|
|
|
|
static inline void cpuset_task_status_allowed(struct seq_file *m,
|
|
struct task_struct *task)
|
|
{
|
|
}
|
|
|
|
static inline void cpuset_lock(void) {}
|
|
static inline void cpuset_unlock(void) {}
|
|
|
|
static inline int cpuset_mem_spread_node(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int cpuset_do_page_mem_spread(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int cpuset_do_slab_mem_spread(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int current_cpuset_is_being_rebound(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void rebuild_sched_domains(void)
|
|
{
|
|
partition_sched_domains(1, NULL, NULL);
|
|
}
|
|
|
|
static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
|
|
{
|
|
}
|
|
|
|
static inline void set_mems_allowed(nodemask_t nodemask)
|
|
{
|
|
}
|
|
|
|
#endif /* !CONFIG_CPUSETS */
|
|
|
|
#endif /* _LINUX_CPUSET_H */
|