mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-14 20:17:34 +07:00
generic: reduce stack pressure in sched_affinity
* Modify sched_affinity functions to pass cpumask_t variables by reference instead of by value. * Use new set_cpus_allowed_ptr function. Depends on: [sched-devel]: sched: add new set_cpus_allowed_ptr function Cc: Paul Jackson <pj@sgi.com> Cc: Cliff Wickman <cpw@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f9a86fcbbb
commit
b53e921ba1
@ -251,18 +251,18 @@ struct threshold_attr {
|
|||||||
ssize_t(*store) (struct threshold_block *, const char *, size_t count);
|
ssize_t(*store) (struct threshold_block *, const char *, size_t count);
|
||||||
};
|
};
|
||||||
|
|
||||||
static cpumask_t affinity_set(unsigned int cpu)
|
static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
|
||||||
|
cpumask_t *newmask)
|
||||||
{
|
{
|
||||||
cpumask_t oldmask = current->cpus_allowed;
|
*oldmask = current->cpus_allowed;
|
||||||
cpumask_t newmask = CPU_MASK_NONE;
|
cpus_clear(*newmask);
|
||||||
cpu_set(cpu, newmask);
|
cpu_set(cpu, *newmask);
|
||||||
set_cpus_allowed(current, newmask);
|
set_cpus_allowed_ptr(current, newmask);
|
||||||
return oldmask;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void affinity_restore(cpumask_t oldmask)
|
static void affinity_restore(const cpumask_t *oldmask)
|
||||||
{
|
{
|
||||||
set_cpus_allowed(current, oldmask);
|
set_cpus_allowed_ptr(current, oldmask);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SHOW_FIELDS(name) \
|
#define SHOW_FIELDS(name) \
|
||||||
@ -277,15 +277,15 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
char *end;
|
char *end;
|
||||||
cpumask_t oldmask;
|
cpumask_t oldmask, newmask;
|
||||||
unsigned long new = simple_strtoul(buf, &end, 0);
|
unsigned long new = simple_strtoul(buf, &end, 0);
|
||||||
if (end == buf)
|
if (end == buf)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
b->interrupt_enable = !!new;
|
b->interrupt_enable = !!new;
|
||||||
|
|
||||||
oldmask = affinity_set(b->cpu);
|
affinity_set(b->cpu, &oldmask, &newmask);
|
||||||
threshold_restart_bank(b, 0, 0);
|
threshold_restart_bank(b, 0, 0);
|
||||||
affinity_restore(oldmask);
|
affinity_restore(&oldmask);
|
||||||
|
|
||||||
return end - buf;
|
return end - buf;
|
||||||
}
|
}
|
||||||
@ -294,7 +294,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
char *end;
|
char *end;
|
||||||
cpumask_t oldmask;
|
cpumask_t oldmask, newmask;
|
||||||
u16 old;
|
u16 old;
|
||||||
unsigned long new = simple_strtoul(buf, &end, 0);
|
unsigned long new = simple_strtoul(buf, &end, 0);
|
||||||
if (end == buf)
|
if (end == buf)
|
||||||
@ -306,9 +306,9 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
|
|||||||
old = b->threshold_limit;
|
old = b->threshold_limit;
|
||||||
b->threshold_limit = new;
|
b->threshold_limit = new;
|
||||||
|
|
||||||
oldmask = affinity_set(b->cpu);
|
affinity_set(b->cpu, &oldmask, &newmask);
|
||||||
threshold_restart_bank(b, 0, old);
|
threshold_restart_bank(b, 0, old);
|
||||||
affinity_restore(oldmask);
|
affinity_restore(&oldmask);
|
||||||
|
|
||||||
return end - buf;
|
return end - buf;
|
||||||
}
|
}
|
||||||
@ -316,10 +316,10 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
|
|||||||
static ssize_t show_error_count(struct threshold_block *b, char *buf)
|
static ssize_t show_error_count(struct threshold_block *b, char *buf)
|
||||||
{
|
{
|
||||||
u32 high, low;
|
u32 high, low;
|
||||||
cpumask_t oldmask;
|
cpumask_t oldmask, newmask;
|
||||||
oldmask = affinity_set(b->cpu);
|
affinity_set(b->cpu, &oldmask, &newmask);
|
||||||
rdmsr(b->address, low, high);
|
rdmsr(b->address, low, high);
|
||||||
affinity_restore(oldmask);
|
affinity_restore(&oldmask);
|
||||||
return sprintf(buf, "%x\n",
|
return sprintf(buf, "%x\n",
|
||||||
(high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
|
(high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
|
||||||
}
|
}
|
||||||
@ -327,10 +327,10 @@ static ssize_t show_error_count(struct threshold_block *b, char *buf)
|
|||||||
static ssize_t store_error_count(struct threshold_block *b,
|
static ssize_t store_error_count(struct threshold_block *b,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
cpumask_t oldmask;
|
cpumask_t oldmask, newmask;
|
||||||
oldmask = affinity_set(b->cpu);
|
affinity_set(b->cpu, &oldmask, &newmask);
|
||||||
threshold_restart_bank(b, 1, 0);
|
threshold_restart_bank(b, 1, 0);
|
||||||
affinity_restore(oldmask);
|
affinity_restore(&oldmask);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -468,7 +468,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
|||||||
{
|
{
|
||||||
int i, err = 0;
|
int i, err = 0;
|
||||||
struct threshold_bank *b = NULL;
|
struct threshold_bank *b = NULL;
|
||||||
cpumask_t oldmask = CPU_MASK_NONE;
|
cpumask_t oldmask, newmask;
|
||||||
char name[32];
|
char name[32];
|
||||||
|
|
||||||
sprintf(name, "threshold_bank%i", bank);
|
sprintf(name, "threshold_bank%i", bank);
|
||||||
@ -519,10 +519,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
|||||||
|
|
||||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||||
|
|
||||||
oldmask = affinity_set(cpu);
|
affinity_set(cpu, &oldmask, &newmask);
|
||||||
err = allocate_threshold_blocks(cpu, bank, 0,
|
err = allocate_threshold_blocks(cpu, bank, 0,
|
||||||
MSR_IA32_MC0_MISC + bank * 4);
|
MSR_IA32_MC0_MISC + bank * 4);
|
||||||
affinity_restore(oldmask);
|
affinity_restore(&oldmask);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
@ -2034,7 +2034,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
|
extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
|
||||||
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
|
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
|
||||||
|
|
||||||
extern int sched_mc_power_savings, sched_smt_power_savings;
|
extern int sched_mc_power_savings, sched_smt_power_savings;
|
||||||
|
@ -445,7 +445,7 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
|
|||||||
if (retval)
|
if (retval)
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
return sched_setaffinity(pid, new_mask);
|
return sched_setaffinity(pid, &new_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
|
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
|
||||||
|
@ -1007,10 +1007,10 @@ void __synchronize_sched(void)
|
|||||||
if (sched_getaffinity(0, &oldmask) < 0)
|
if (sched_getaffinity(0, &oldmask) < 0)
|
||||||
oldmask = cpu_possible_map;
|
oldmask = cpu_possible_map;
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
sched_setaffinity(0, cpumask_of_cpu(cpu));
|
sched_setaffinity(0, &cpumask_of_cpu(cpu));
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
sched_setaffinity(0, oldmask);
|
sched_setaffinity(0, &oldmask);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__synchronize_sched);
|
EXPORT_SYMBOL_GPL(__synchronize_sched);
|
||||||
|
|
||||||
|
@ -4908,9 +4908,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
long sched_setaffinity(pid_t pid, cpumask_t new_mask)
|
long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
|
||||||
{
|
{
|
||||||
cpumask_t cpus_allowed;
|
cpumask_t cpus_allowed;
|
||||||
|
cpumask_t new_mask = *in_mask;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
@ -4991,7 +4992,7 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
|
|||||||
if (retval)
|
if (retval)
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
return sched_setaffinity(pid, new_mask);
|
return sched_setaffinity(pid, &new_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user