Merge branch 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup updates from Tejun Heo:

 - Oleg's pids controller accounting update which gets rid of rcu delay
   in pids accounting updates

 - rstat (cgroup hierarchical stat collection mechanism) optimization

 - Doc updates

* 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
  cpuset: remove unused task_has_mempolicy()
  cgroup, rstat: Don't flush subtree root unless necessary
  cgroup: add documentation for pids.events file
  Documentation: cgroup-v2: eliminate markup warnings
  MAINTAINERS: Update cgroup entry
  cgroup/pids: turn cgroup_subsys->free() into cgroup_subsys->release() to fix the accounting
This commit is contained in:
Linus Torvalds 2019-03-07 10:11:41 -08:00
commit 1fc1cd8399
10 changed files with 28 additions and 29 deletions

View File

@ -1519,7 +1519,7 @@ protected workload.
The limits are only applied at the peer level in the hierarchy. This means that
in the diagram below, only groups A, B, and C will influence each other, and
groups D and F will influence each other. Group G will influence nobody.
groups D and F will influence each other. Group G will influence nobody::
[root]
/ | \

View File

@ -33,6 +33,9 @@ limit in the hierarchy is followed).
pids.current tracks all child cgroup hierarchies, so parent/pids.current is a
superset of parent/child/pids.current.
The pids.events file contains event counters:
- max: Number of times fork failed because limit was hit.
Example
-------

View File

@ -3970,9 +3970,10 @@ M: Johannes Weiner <hannes@cmpxchg.org>
L: cgroups@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
F: Documentation/cgroup*
F: Documentation/admin-guide/cgroup-v2.rst
F: Documentation/cgroup-v1/
F: include/linux/cgroup*
F: kernel/cgroup*
F: kernel/cgroup/
CONTROL GROUP - CPUSET
M: Li Zefan <lizefan@huawei.com>

View File

@ -606,7 +606,7 @@ struct cgroup_subsys {
void (*cancel_fork)(struct task_struct *task);
void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task);
void (*free)(struct task_struct *task);
void (*release)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
bool early_init:1;

View File

@ -121,6 +121,7 @@ extern int cgroup_can_fork(struct task_struct *p);
extern void cgroup_cancel_fork(struct task_struct *p);
extern void cgroup_post_fork(struct task_struct *p);
void cgroup_exit(struct task_struct *p);
void cgroup_release(struct task_struct *p);
void cgroup_free(struct task_struct *p);
int cgroup_init_early(void);
@ -697,6 +698,7 @@ static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
static inline void cgroup_cancel_fork(struct task_struct *p) {}
static inline void cgroup_post_fork(struct task_struct *p) {}
static inline void cgroup_exit(struct task_struct *p) {}
static inline void cgroup_release(struct task_struct *p) {}
static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }

View File

@ -197,7 +197,7 @@ static u64 css_serial_nr_next = 1;
*/
static u16 have_fork_callback __read_mostly;
static u16 have_exit_callback __read_mostly;
static u16 have_free_callback __read_mostly;
static u16 have_release_callback __read_mostly;
static u16 have_canfork_callback __read_mostly;
/* cgroup namespace for init task */
@ -5326,7 +5326,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
have_fork_callback |= (bool)ss->fork << ss->id;
have_exit_callback |= (bool)ss->exit << ss->id;
have_free_callback |= (bool)ss->free << ss->id;
have_release_callback |= (bool)ss->release << ss->id;
have_canfork_callback |= (bool)ss->can_fork << ss->id;
/* At system boot, before all subsystems have been
@ -5762,16 +5762,19 @@ void cgroup_exit(struct task_struct *tsk)
} while_each_subsys_mask();
}
void cgroup_free(struct task_struct *task)
void cgroup_release(struct task_struct *task)
{
struct css_set *cset = task_css_set(task);
struct cgroup_subsys *ss;
int ssid;
do_each_subsys_mask(ss, ssid, have_free_callback) {
ss->free(task);
do_each_subsys_mask(ss, ssid, have_release_callback) {
ss->release(task);
} while_each_subsys_mask();
}
void cgroup_free(struct task_struct *task)
{
struct css_set *cset = task_css_set(task);
put_css_set(cset);
}

View File

@ -203,19 +203,6 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
return css_cs(cs->css.parent);
}
#ifdef CONFIG_NUMA
static inline bool task_has_mempolicy(struct task_struct *task)
{
return task->mempolicy;
}
#else
static inline bool task_has_mempolicy(struct task_struct *task)
{
return false;
}
#endif
/* bits in struct cpuset flags field */
typedef enum {
CS_ONLINE,

View File

@ -247,7 +247,7 @@ static void pids_cancel_fork(struct task_struct *task)
pids_uncharge(pids, 1);
}
static void pids_free(struct task_struct *task)
static void pids_release(struct task_struct *task)
{
struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
@ -342,7 +342,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
.cancel_attach = pids_cancel_attach,
.can_fork = pids_can_fork,
.cancel_fork = pids_cancel_fork,
.free = pids_free,
.release = pids_release,
.legacy_cftypes = pids_files,
.dfl_cftypes = pids_files,
.threaded = true,

View File

@ -87,7 +87,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
struct cgroup *root, int cpu)
{
struct cgroup_rstat_cpu *rstatc;
struct cgroup *parent;
if (pos == root)
return NULL;
@ -115,8 +114,8 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
* However, due to the way we traverse, @pos will be the first
* child in most cases. The only exception is @root.
*/
parent = cgroup_parent(pos);
if (parent && rstatc->updated_next) {
if (rstatc->updated_next) {
struct cgroup *parent = cgroup_parent(pos);
struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
struct cgroup_rstat_cpu *nrstatc;
struct cgroup **nextp;
@ -140,9 +139,12 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
* updated stat.
*/
smp_mb();
return pos;
}
return pos;
/* only happens for @root */
return NULL;
}
/* see cgroup_rstat_flush() */

View File

@ -219,6 +219,7 @@ void release_task(struct task_struct *p)
}
write_unlock_irq(&tasklist_lock);
cgroup_release(p);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);