mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 12:50:53 +07:00
Cputime cleanups on reader side:
* Improve naming and code location * Consolidate adjustment code * Comment the adjustement code Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJQt5oaAAoJEIUkVEdQjox3hNwP/2QP7p9BHCPwGWenIi4aVUWH tlDLwWvQE919YPYL4AUgz4b9f4G7U7dbBozIJRxhB0rjqrbXU6PDvVCIwVyDH2xQ mTp5qdqyysgzqgZ7q0t27zLfHEANRcH8Tnrqj2XustqvdYcIzZKZeNkFsF3QRiDw utIEmE8A9mBnWDP7O4fDmo8onHNUmJc50Y0c/WJW7fbtq5aCh2vn87efV4GYGNjk e1qZuLRWdZYXkDnO6zqD5tUe/kB0ioPzXXyBkYAHXCMhCpkMDu7c18N+IrY80kBb vBQqeAGlpUuXnJ/MDFazqqbmezBYhnTIbnojyWO4ONzi2z6L3K9F1/zukM4WtvLv RNDF4MS7smFjyXXXfliIGOhvI5C5O9bosPOzBtvwHSYrnS5KGL8fv8N8tXixqytW nX5NEcjfCZXpNpm4TELcDyAvOrVMFe2CQwKgLBPSY1zRch34nJi9G55uKKSjg1xd Z1aDbVZFNt9R3ozV1rVaptNzagEa/023bvmnB8IiuA9oh6rNZOHhsc/lo1T2VaeO PhJqD50JPbJyycJ1m0pIW8iVSUxfIvJtICEHgVSCPH5A58PsKFr+8ELs+InTPTDt 11V7dxHAmspar1CO1mqYMMIS4VKgPfwNI6zuaO+JlmU4nMB42y8WAZn/lzMyafQE Uswa6UTBBiU159HNzgDh =FRxY -----END PGP SIGNATURE----- Merge tag 'cputime-adjustment-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into sched/core Pull cputime cleanups from Frederic Weisbecker: * Improve naming and code location * Consolidate adjustment code * Comment the adjustement code Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
e783377e93
@ -438,7 +438,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||
|
||||
min_flt += sig->min_flt;
|
||||
maj_flt += sig->maj_flt;
|
||||
thread_group_times(task, &utime, &stime);
|
||||
thread_group_cputime_adjusted(task, &utime, &stime);
|
||||
gtime += sig->gtime;
|
||||
}
|
||||
|
||||
@ -454,7 +454,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||
if (!whole) {
|
||||
min_flt = task->min_flt;
|
||||
maj_flt = task->maj_flt;
|
||||
task_times(task, &utime, &stime);
|
||||
task_cputime_adjusted(task, &utime, &stime);
|
||||
gtime = task->gtime;
|
||||
}
|
||||
|
||||
|
@ -1272,81 +1272,6 @@ static const struct file_operations proc_pid_sched_operations = {
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_AUTOGROUP
|
||||
/*
|
||||
* Print out autogroup related information:
|
||||
*/
|
||||
static int sched_autogroup_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct inode *inode = m->private;
|
||||
struct task_struct *p;
|
||||
|
||||
p = get_proc_task(inode);
|
||||
if (!p)
|
||||
return -ESRCH;
|
||||
proc_sched_autogroup_show_task(p, m);
|
||||
|
||||
put_task_struct(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
sched_autogroup_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *offset)
|
||||
{
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
struct task_struct *p;
|
||||
char buffer[PROC_NUMBUF];
|
||||
int nice;
|
||||
int err;
|
||||
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
if (count > sizeof(buffer) - 1)
|
||||
count = sizeof(buffer) - 1;
|
||||
if (copy_from_user(buffer, buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
err = kstrtoint(strstrip(buffer), 0, &nice);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
p = get_proc_task(inode);
|
||||
if (!p)
|
||||
return -ESRCH;
|
||||
|
||||
err = proc_sched_autogroup_set_nice(p, nice);
|
||||
if (err)
|
||||
count = err;
|
||||
|
||||
put_task_struct(p);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int sched_autogroup_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = single_open(filp, sched_autogroup_show, NULL);
|
||||
if (!ret) {
|
||||
struct seq_file *m = filp->private_data;
|
||||
|
||||
m->private = inode;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations proc_pid_sched_autogroup_operations = {
|
||||
.open = sched_autogroup_open,
|
||||
.read = seq_read,
|
||||
.write = sched_autogroup_write,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_SCHED_AUTOGROUP */
|
||||
|
||||
static ssize_t comm_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *offset)
|
||||
{
|
||||
@ -2657,9 +2582,6 @@ static const struct pid_entry tgid_base_stuff[] = {
|
||||
INF("limits", S_IRUGO, proc_pid_limits),
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_AUTOGROUP
|
||||
REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
|
||||
#endif
|
||||
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
|
||||
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
|
||||
|
@ -433,14 +433,29 @@ struct cpu_itimer {
|
||||
u32 incr_error;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cputime - snaphsot of system and user cputime
|
||||
* @utime: time spent in user mode
|
||||
* @stime: time spent in system mode
|
||||
*
|
||||
* Gathers a generic snapshot of user and system time.
|
||||
*/
|
||||
struct cputime {
|
||||
cputime_t utime;
|
||||
cputime_t stime;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct task_cputime - collected CPU time counts
|
||||
* @utime: time spent in user mode, in &cputime_t units
|
||||
* @stime: time spent in kernel mode, in &cputime_t units
|
||||
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
|
||||
*
|
||||
* This structure groups together three kinds of CPU time that are
|
||||
* tracked for threads and thread groups. Most things considering
|
||||
* This is an extension of struct cputime that includes the total runtime
|
||||
* spent by the task from the scheduler point of view.
|
||||
*
|
||||
* As a result, this structure groups together three kinds of CPU time
|
||||
* that are tracked for threads and thread groups. Most things considering
|
||||
* CPU time want to group these counts together and treat all three
|
||||
* of them in parallel.
|
||||
*/
|
||||
@ -581,7 +596,7 @@ struct signal_struct {
|
||||
cputime_t gtime;
|
||||
cputime_t cgtime;
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
cputime_t prev_utime, prev_stime;
|
||||
struct cputime prev_cputime;
|
||||
#endif
|
||||
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
|
||||
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
|
||||
@ -1340,7 +1355,7 @@ struct task_struct {
|
||||
cputime_t utime, stime, utimescaled, stimescaled;
|
||||
cputime_t gtime;
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
cputime_t prev_utime, prev_stime;
|
||||
struct cputime prev_cputime;
|
||||
#endif
|
||||
unsigned long nvcsw, nivcsw; /* context switch counts */
|
||||
struct timespec start_time; /* monotonic time */
|
||||
@ -1751,8 +1766,8 @@ static inline void put_task_struct(struct task_struct *t)
|
||||
__put_task_struct(t);
|
||||
}
|
||||
|
||||
extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
|
||||
/*
|
||||
* Per process flags
|
||||
|
@ -1186,11 +1186,11 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
||||
* as other threads in the parent group can be right
|
||||
* here reaping other children at the same time.
|
||||
*
|
||||
* We use thread_group_times() to get times for the thread
|
||||
* We use thread_group_cputime_adjusted() to get times for the thread
|
||||
* group, which consolidates times for all threads in the
|
||||
* group including the group leader.
|
||||
*/
|
||||
thread_group_times(p, &tgutime, &tgstime);
|
||||
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
|
||||
spin_lock_irq(&p->real_parent->sighand->siglock);
|
||||
psig = p->real_parent->signal;
|
||||
sig = p->signal;
|
||||
|
@ -1222,7 +1222,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
p->utime = p->stime = p->gtime = 0;
|
||||
p->utimescaled = p->stimescaled = 0;
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
p->prev_utime = p->prev_stime = 0;
|
||||
p->prev_cputime.utime = p->prev_cputime.stime = 0;
|
||||
#endif
|
||||
#if defined(SPLIT_RSS_COUNTING)
|
||||
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
|
||||
|
@ -217,30 +217,6 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
{
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
struct task_struct *t;
|
||||
|
||||
times->utime = sig->utime;
|
||||
times->stime = sig->stime;
|
||||
times->sum_exec_runtime = sig->sum_sched_runtime;
|
||||
|
||||
rcu_read_lock();
|
||||
/* make sure we can trust tsk->thread_group list */
|
||||
if (!likely(pid_alive(tsk)))
|
||||
goto out;
|
||||
|
||||
t = tsk;
|
||||
do {
|
||||
times->utime += t->utime;
|
||||
times->stime += t->stime;
|
||||
times->sum_exec_runtime += task_sched_runtime(t);
|
||||
} while_each_thread(tsk, t);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
|
||||
{
|
||||
if (b->utime > a->utime)
|
||||
|
@ -110,6 +110,9 @@ static inline struct autogroup *autogroup_create(void)
|
||||
|
||||
bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
||||
{
|
||||
if (!sysctl_sched_autogroup_enabled)
|
||||
return false;
|
||||
|
||||
if (tg != &root_task_group)
|
||||
return false;
|
||||
|
||||
@ -155,8 +158,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
||||
/* Allocates GFP_KERNEL, cannot be called under any spinlock */
|
||||
void sched_autogroup_create_attach(struct task_struct *p)
|
||||
{
|
||||
struct autogroup *ag = autogroup_create();
|
||||
struct autogroup *ag;
|
||||
|
||||
if (!sysctl_sched_autogroup_enabled)
|
||||
return;
|
||||
ag = autogroup_create();
|
||||
autogroup_move_group(p, ag);
|
||||
/* drop extra reference added by autogroup_create() */
|
||||
autogroup_kref_put(ag);
|
||||
@ -172,11 +178,15 @@ EXPORT_SYMBOL(sched_autogroup_detach);
|
||||
|
||||
void sched_autogroup_fork(struct signal_struct *sig)
|
||||
{
|
||||
if (!sysctl_sched_autogroup_enabled)
|
||||
return;
|
||||
sig->autogroup = autogroup_task_get(current);
|
||||
}
|
||||
|
||||
void sched_autogroup_exit(struct signal_struct *sig)
|
||||
{
|
||||
if (!sysctl_sched_autogroup_enabled)
|
||||
return;
|
||||
autogroup_kref_put(sig->autogroup);
|
||||
}
|
||||
|
||||
@ -189,58 +199,6 @@ static int __init setup_autogroup(char *str)
|
||||
|
||||
__setup("noautogroup", setup_autogroup);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
|
||||
{
|
||||
static unsigned long next = INITIAL_JIFFIES;
|
||||
struct autogroup *ag;
|
||||
int err;
|
||||
|
||||
if (nice < -20 || nice > 19)
|
||||
return -EINVAL;
|
||||
|
||||
err = security_task_setnice(current, nice);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (nice < 0 && !can_nice(current, nice))
|
||||
return -EPERM;
|
||||
|
||||
/* this is a heavy operation taking global locks.. */
|
||||
if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
|
||||
return -EAGAIN;
|
||||
|
||||
next = HZ / 10 + jiffies;
|
||||
ag = autogroup_task_get(p);
|
||||
|
||||
down_write(&ag->lock);
|
||||
err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]);
|
||||
if (!err)
|
||||
ag->nice = nice;
|
||||
up_write(&ag->lock);
|
||||
|
||||
autogroup_kref_put(ag);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
|
||||
{
|
||||
struct autogroup *ag = autogroup_task_get(p);
|
||||
|
||||
if (!task_group_is_autogroup(ag->tg))
|
||||
goto out;
|
||||
|
||||
down_read(&ag->lock);
|
||||
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
|
||||
up_read(&ag->lock);
|
||||
|
||||
out:
|
||||
autogroup_kref_put(ag);
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
int autogroup_path(struct task_group *tg, char *buf, int buflen)
|
||||
{
|
||||
|
@ -24,9 +24,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
|
||||
static inline struct task_group *
|
||||
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
||||
{
|
||||
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
|
||||
|
||||
if (enabled && task_wants_autogroup(p, tg))
|
||||
if (task_wants_autogroup(p, tg))
|
||||
return p->signal->autogroup->tg;
|
||||
|
||||
return tg;
|
||||
|
@ -288,6 +288,34 @@ static __always_inline bool steal_account_process_tick(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Accumulate raw cputime values of dead tasks (sig->[us]time) and live
|
||||
* tasks (sum on group iteration) belonging to @tsk's group.
|
||||
*/
|
||||
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
{
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
struct task_struct *t;
|
||||
|
||||
times->utime = sig->utime;
|
||||
times->stime = sig->stime;
|
||||
times->sum_exec_runtime = sig->sum_sched_runtime;
|
||||
|
||||
rcu_read_lock();
|
||||
/* make sure we can trust tsk->thread_group list */
|
||||
if (!likely(pid_alive(tsk)))
|
||||
goto out;
|
||||
|
||||
t = tsk;
|
||||
do {
|
||||
times->utime += t->utime;
|
||||
times->stime += t->stime;
|
||||
times->sum_exec_runtime += task_sched_runtime(t);
|
||||
} while_each_thread(tsk, t);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
@ -417,13 +445,13 @@ void account_idle_ticks(unsigned long ticks)
|
||||
* Use precise platform statistics if available:
|
||||
*/
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
*ut = p->utime;
|
||||
*st = p->stime;
|
||||
}
|
||||
|
||||
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
struct task_cputime cputime;
|
||||
|
||||
@ -488,14 +516,30 @@ static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
|
||||
return (__force cputime_t) temp;
|
||||
}
|
||||
|
||||
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
/*
|
||||
* Adjust tick based cputime random precision against scheduler
|
||||
* runtime accounting.
|
||||
*/
|
||||
static void cputime_adjust(struct task_cputime *curr,
|
||||
struct cputime *prev,
|
||||
cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
cputime_t rtime, utime = p->utime, total = utime + p->stime;
|
||||
cputime_t rtime, utime, total;
|
||||
|
||||
utime = curr->utime;
|
||||
total = utime + curr->stime;
|
||||
|
||||
/*
|
||||
* Use CFS's precise accounting:
|
||||
* Tick based cputime accounting depend on random scheduling
|
||||
* timeslices of a task to be interrupted or not by the timer.
|
||||
* Depending on these circumstances, the number of these interrupts
|
||||
* may be over or under-optimistic, matching the real user and system
|
||||
* cputime with a variable precision.
|
||||
*
|
||||
* Fix this by scaling these tick based values against the total
|
||||
* runtime accounted by the CFS scheduler.
|
||||
*/
|
||||
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
|
||||
rtime = nsecs_to_cputime(curr->sum_exec_runtime);
|
||||
|
||||
if (total)
|
||||
utime = scale_utime(utime, rtime, total);
|
||||
@ -503,38 +547,36 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
utime = rtime;
|
||||
|
||||
/*
|
||||
* Compare with previous values, to keep monotonicity:
|
||||
* If the tick based count grows faster than the scheduler one,
|
||||
* the result of the scaling may go backward.
|
||||
* Let's enforce monotonicity.
|
||||
*/
|
||||
p->prev_utime = max(p->prev_utime, utime);
|
||||
p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
|
||||
prev->utime = max(prev->utime, utime);
|
||||
prev->stime = max(prev->stime, rtime - prev->utime);
|
||||
|
||||
*ut = p->prev_utime;
|
||||
*st = p->prev_stime;
|
||||
*ut = prev->utime;
|
||||
*st = prev->stime;
|
||||
}
|
||||
|
||||
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
struct task_cputime cputime = {
|
||||
.utime = p->utime,
|
||||
.stime = p->stime,
|
||||
.sum_exec_runtime = p->se.sum_exec_runtime,
|
||||
};
|
||||
|
||||
cputime_adjust(&cputime, &p->prev_cputime, ut, st);
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called with siglock held.
|
||||
*/
|
||||
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
struct signal_struct *sig = p->signal;
|
||||
struct task_cputime cputime;
|
||||
cputime_t rtime, utime, total;
|
||||
|
||||
thread_group_cputime(p, &cputime);
|
||||
|
||||
total = cputime.utime + cputime.stime;
|
||||
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
|
||||
|
||||
if (total)
|
||||
utime = scale_utime(cputime.utime, rtime, total);
|
||||
else
|
||||
utime = rtime;
|
||||
|
||||
sig->prev_utime = max(sig->prev_utime, utime);
|
||||
sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
|
||||
|
||||
*ut = sig->prev_utime;
|
||||
*st = sig->prev_stime;
|
||||
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
|
||||
}
|
||||
#endif
|
||||
|
@ -3330,7 +3330,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||
* Batch and idle tasks do not preempt non-idle tasks (their preemption
|
||||
* is driven by the tick):
|
||||
*/
|
||||
if (unlikely(p->policy != SCHED_NORMAL))
|
||||
if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
|
||||
return;
|
||||
|
||||
find_matching_se(&se, &pse);
|
||||
|
@ -31,6 +31,11 @@ SCHED_FEAT(LAST_BUDDY, true)
|
||||
*/
|
||||
SCHED_FEAT(CACHE_HOT_BUDDY, true)
|
||||
|
||||
/*
|
||||
* Allow wakeup-time preemption of the current task:
|
||||
*/
|
||||
SCHED_FEAT(WAKEUP_PREEMPTION, true)
|
||||
|
||||
/*
|
||||
* Use arch dependent cpu power functions
|
||||
*/
|
||||
|
@ -1046,7 +1046,7 @@ void do_sys_times(struct tms *tms)
|
||||
cputime_t tgutime, tgstime, cutime, cstime;
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
thread_group_times(current, &tgutime, &tgstime);
|
||||
thread_group_cputime_adjusted(current, &tgutime, &tgstime);
|
||||
cutime = current->signal->cutime;
|
||||
cstime = current->signal->cstime;
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
@ -1704,7 +1704,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
|
||||
utime = stime = 0;
|
||||
|
||||
if (who == RUSAGE_THREAD) {
|
||||
task_times(current, &utime, &stime);
|
||||
task_cputime_adjusted(current, &utime, &stime);
|
||||
accumulate_thread_rusage(p, r);
|
||||
maxrss = p->signal->maxrss;
|
||||
goto out;
|
||||
@ -1730,7 +1730,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
|
||||
break;
|
||||
|
||||
case RUSAGE_SELF:
|
||||
thread_group_times(p, &tgutime, &tgstime);
|
||||
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
|
||||
utime += tgutime;
|
||||
stime += tgstime;
|
||||
r->ru_nvcsw += p->signal->nvcsw;
|
||||
|
@ -367,10 +367,8 @@ static struct ctl_table kern_table[] = {
|
||||
.procname = "sched_autogroup_enabled",
|
||||
.data = &sysctl_sched_autogroup_enabled,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &zero,
|
||||
.extra2 = &one,
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_CFS_BANDWIDTH
|
||||
|
Loading…
Reference in New Issue
Block a user