From fa3ca07e96185aa1496b405472399a2a2a336a17 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 14 Apr 2013 11:36:56 -0700 Subject: cgroup: refactor hierarchy_id handling We're planning to converting hierarchy_ida to an idr and use it to look up hierarchy from its id. As we want the mapping to happen atomically with cgroupfs_root registration, this patch refactors hierarchy_id init / exit so that ida operations happen inside cgroup_[root_]mutex. * s/init_root_id()/cgroup_init_root_id()/ and make it return 0 or -errno like a normal function. * Move hierarchy_id initialization from cgroup_root_from_opts() into cgroup_mount() block where the root is confirmed to be used and being registered while holding both mutexes. * Split cgroup_drop_id() into cgroup_exit_root_id() and cgroup_free_root(), so that ID release can happen before dropping the mutexes in cgroup_kill_sb(). The latter expects hierarchy_id to be exited before being invoked. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2a99262..dbc84f7 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1426,13 +1426,13 @@ static void init_cgroup_root(struct cgroupfs_root *root) list_add_tail(&cgrp->allcg_node, &root->allcg_list); } -static bool init_root_id(struct cgroupfs_root *root) +static int cgroup_init_root_id(struct cgroupfs_root *root) { - int ret = 0; + int ret; do { if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL)) - return false; + return -ENOMEM; spin_lock(&hierarchy_id_lock); /* Try to allocate the next unused ID */ ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id, @@ -1448,7 +1448,18 @@ static bool init_root_id(struct cgroupfs_root *root) } spin_unlock(&hierarchy_id_lock); } while (ret); - return true; + return 0; +} + +static void cgroup_exit_root_id(struct cgroupfs_root *root) +{ + if (root->hierarchy_id) { + spin_lock(&hierarchy_id_lock); + ida_remove(&hierarchy_ida, root->hierarchy_id); + spin_unlock(&hierarchy_id_lock); + + root->hierarchy_id = 0; + } } static int cgroup_test_super(struct super_block *sb, void *data) @@ -1482,10 +1493,6 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) if (!root) return ERR_PTR(-ENOMEM); - if (!init_root_id(root)) { - kfree(root); - return ERR_PTR(-ENOMEM); - } init_cgroup_root(root); root->subsys_mask = opts->subsys_mask; @@ -1500,17 +1507,15 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) return root; } -static void cgroup_drop_root(struct cgroupfs_root *root) +static void cgroup_free_root(struct cgroupfs_root *root) { - if (!root) - return; + if (root) { + /* hierarhcy ID shoulid already have been released */ + WARN_ON_ONCE(root->hierarchy_id); - BUG_ON(!root->hierarchy_id); - spin_lock(&hierarchy_id_lock); - ida_remove(&hierarchy_ida, root->hierarchy_id); - spin_unlock(&hierarchy_id_lock); - ida_destroy(&root->cgroup_ida); - kfree(root); + ida_destroy(&root->cgroup_ida); + kfree(root); + } } static int cgroup_set_super(struct super_block *sb, void *data) @@ -1597,7 +1602,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts); if (IS_ERR(sb)) { ret = PTR_ERR(sb); - cgroup_drop_root(opts.new_root); + cgroup_free_root(opts.new_root); goto drop_modules; } @@ -1641,6 +1646,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (ret) goto unlock_drop; + ret = cgroup_init_root_id(root); + if (ret) + goto unlock_drop; + ret = rebind_subsystems(root, root->subsys_mask); if (ret == -EBUSY) { free_cg_links(&tmp_cg_links); @@ -1684,7 +1693,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, * We re-used an existing hierarchy - the new root (if * any) is not needed */ - cgroup_drop_root(opts.new_root); + cgroup_free_root(opts.new_root); if (((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) && root->flags != opts.flags) { @@ -1702,6 +1711,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, return dget(sb->s_root); unlock_drop: + cgroup_exit_root_id(root); mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); mutex_unlock(&inode->i_mutex); @@ -1754,13 +1764,15 @@ static void cgroup_kill_sb(struct super_block *sb) { root_count--; } + cgroup_exit_root_id(root); + mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); simple_xattrs_free(&cgrp->xattrs); kill_litter_super(sb); - cgroup_drop_root(root); + cgroup_free_root(root); } static struct file_system_type cgroup_fs_type = { @@ -4642,7 +4654,9 @@ int __init cgroup_init(void) /* Add init_css_set to the hash table */ key = css_set_hash(init_css_set.subsys); hash_add(css_set_table, &init_css_set.hlist, key); - BUG_ON(!init_root_id(&rootnode)); + + /* allocate id for the dummy hierarchy */ + BUG_ON(cgroup_init_root_id(&rootnode)); cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); if (!cgroup_kobj) { -- cgit v0.10.2 From 54e7b4eb15fc4354d5ada5469e3db4a220ddb3ed Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 14 Apr 2013 11:36:57 -0700 Subject: cgroup: drop hierarchy_id_lock Now that hierarchy_id alloc / free are protected by the cgroup mutexes, there's no need for this separate lock. Drop it. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index dbc84f7..3ef677d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -189,9 +189,13 @@ struct cgroup_event { static LIST_HEAD(roots); static int root_count; +/* + * Hierarchy ID allocation and mapping. It follows the same exclusion + * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for + * writes, either for reads. + */ static DEFINE_IDA(hierarchy_ida); static int next_hierarchy_id; -static DEFINE_SPINLOCK(hierarchy_id_lock); /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ #define dummytop (&rootnode.top_cgroup) @@ -1430,10 +1434,12 @@ static int cgroup_init_root_id(struct cgroupfs_root *root) { int ret; + lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&cgroup_root_mutex); + do { if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL)) return -ENOMEM; - spin_lock(&hierarchy_id_lock); /* Try to allocate the next unused ID */ ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id, &root->hierarchy_id); @@ -1446,18 +1452,17 @@ static int cgroup_init_root_id(struct cgroupfs_root *root) /* Can only get here if the 31-bit IDR is full ... */ BUG_ON(ret); } - spin_unlock(&hierarchy_id_lock); } while (ret); return 0; } static void cgroup_exit_root_id(struct cgroupfs_root *root) { + lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&cgroup_root_mutex); + if (root->hierarchy_id) { - spin_lock(&hierarchy_id_lock); ida_remove(&hierarchy_ida, root->hierarchy_id); - spin_unlock(&hierarchy_id_lock); - root->hierarchy_id = 0; } } @@ -4656,8 +4661,14 @@ int __init cgroup_init(void) hash_add(css_set_table, &init_css_set.hlist, key); /* allocate id for the dummy hierarchy */ + mutex_lock(&cgroup_mutex); + mutex_lock(&cgroup_root_mutex); + BUG_ON(cgroup_init_root_id(&rootnode)); + mutex_unlock(&cgroup_root_mutex); + mutex_unlock(&cgroup_mutex); + cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); if (!cgroup_kobj) { err = -ENOMEM; -- cgit v0.10.2 From 1a574231669f8c3065c83974e9557fcbbd94b8a6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 14 Apr 2013 11:36:58 -0700 Subject: cgroup: make hierarchy_id use cyclic idr We want to be able to lookup a hierarchy from its id and cyclic allocation is a whole lot simpler with idr. Convert to idr and use idr_alloc_cyclc(). Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3ef677d..dcb417c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -194,8 +194,7 @@ static int root_count; * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for * writes, either for reads. */ -static DEFINE_IDA(hierarchy_ida); -static int next_hierarchy_id; +static DEFINE_IDR(cgroup_hierarchy_idr); /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ #define dummytop (&rootnode.top_cgroup) @@ -1432,27 +1431,16 @@ static void init_cgroup_root(struct cgroupfs_root *root) static int cgroup_init_root_id(struct cgroupfs_root *root) { - int ret; + int id; lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_root_mutex); - do { - if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL)) - return -ENOMEM; - /* Try to allocate the next unused ID */ - ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id, - &root->hierarchy_id); - if (ret == -ENOSPC) - /* Try again starting from 0 */ - ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id); - if (!ret) { - next_hierarchy_id = root->hierarchy_id + 1; - } else if (ret != -EAGAIN) { - /* Can only get here if the 31-bit IDR is full ... */ - BUG_ON(ret); - } - } while (ret); + id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 2, 0, GFP_KERNEL); + if (id < 0) + return id; + + root->hierarchy_id = id; return 0; } @@ -1462,7 +1450,7 @@ static void cgroup_exit_root_id(struct cgroupfs_root *root) lockdep_assert_held(&cgroup_root_mutex); if (root->hierarchy_id) { - ida_remove(&hierarchy_ida, root->hierarchy_id); + idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); root->hierarchy_id = 0; } } -- cgit v0.10.2 From 857a2beb09ab83e9a8185821ae16db7dfbe8b837 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 14 Apr 2013 20:50:08 -0700 Subject: cgroup: implement task_cgroup_path_from_hierarchy() kdbus folks want a sane way to determine the cgroup path that a given task belongs to on a given hierarchy, which is a reasonble thing to expect from cgroup core. Implement task_cgroup_path_from_hierarchy(). v2: Dropped unnecessary NULL check on the return value of task_cgroup_from_root() as suggested by Li Zefan. Signed-off-by: Tejun Heo Acked-by: Greg Kroah-Hartman Acked-by: Li Zefan Cc: Kay Sievers Cc: Lennart Poettering Cc: Daniel Mack diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 5047355..383c630 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -542,6 +542,8 @@ int cgroup_is_removed(const struct cgroup *cgrp); bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); +int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, + char *buf, size_t buflen); int cgroup_task_count(const struct cgroup *cgrp); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index dcb417c..6b2b1d9 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1827,6 +1827,38 @@ out: } EXPORT_SYMBOL_GPL(cgroup_path); +/** + * task_cgroup_path_from_hierarchy - cgroup path of a task on a hierarchy + * @task: target task + * @hierarchy_id: the hierarchy to look up @task's cgroup from + * @buf: the buffer to write the path into + * @buflen: the length of the buffer + * + * Determine @task's cgroup on the hierarchy specified by @hierarchy_id and + * copy its path into @buf. This function grabs cgroup_mutex and shouldn't + * be used inside locks used by cgroup controller callbacks. + */ +int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, + char *buf, size_t buflen) +{ + struct cgroupfs_root *root; + struct cgroup *cgrp = NULL; + int ret = -ENOENT; + + mutex_lock(&cgroup_mutex); + + root = idr_find(&cgroup_hierarchy_idr, hierarchy_id); + if (root) { + cgrp = task_cgroup_from_root(task, root); + ret = cgroup_path(cgrp, buf, buflen); + } + + mutex_unlock(&cgroup_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(task_cgroup_path_from_hierarchy); + /* * Control Group taskset */ -- cgit v0.10.2 From 23958e729e7029678e746bf8f4094c8863a79c3d Mon Sep 17 00:00:00 2001 From: Greg KH Date: Fri, 3 May 2013 16:26:59 -0700 Subject: cgroup.h: remove some functions that are now gone cgroup_lock() and cgroup_unlock() are now no longer exported, so fix cgroup.h to not declare them if CONFIG_CGROUPS is not enabled. Signed-off-by: Greg Kroah-Hartman Acked-by: Li Zefan Signed-off-by: Tejun Heo diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 383c630..4f6f513 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -840,8 +840,6 @@ static inline void cgroup_fork(struct task_struct *p) {} static inline void cgroup_post_fork(struct task_struct *p) {} static inline void cgroup_exit(struct task_struct *p, int callbacks) {} -static inline void cgroup_lock(void) {} -static inline void cgroup_unlock(void) {} static inline int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) { -- cgit v0.10.2 From bdc7119f1bdd0632d42f435941dc290216a436e7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 24 May 2013 10:55:38 +0900 Subject: cgroup: make cgroup_is_removed() static cgroup_is_removed() no longer has external users and it shouldn't grow any - controllers should deal with cgroup_subsys_state on/offline state instead of cgroup removal state. Make it static. While at it, make it return bool. Signed-off-by: Tejun Heo diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1df5f69..8d9f3c9 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -538,7 +538,6 @@ static inline const char *cgroup_name(const struct cgroup *cgrp) int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); -int cgroup_is_removed(const struct cgroup *cgrp); bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a19419f..5019748 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -226,7 +226,7 @@ static int css_refcnt(struct cgroup_subsys_state *css) } /* convenient tests for these bits */ -inline int cgroup_is_removed(const struct cgroup *cgrp) +static inline bool cgroup_is_removed(const struct cgroup *cgrp) { return test_bit(CGRP_REMOVED, &cgrp->flags); } -- cgit v0.10.2 From 53fa5261747a90746531e8a1c81eeb78fedc2f71 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 24 May 2013 10:55:38 +0900 Subject: cgroup: add cgroup->serial_nr and implement cgroup_next_sibling() Currently, there's no easy way to find out the next sibling cgroup unless it's known that the current cgroup is accessed from the parent's children list in a single RCU critical section. This in turn forces all iterators to require whole iteration to be enclosed in a single RCU critical section, which sometimes is too restrictive. This patch implements cgroup_next_sibling() which can reliably determine the next sibling regardless of the state of the current cgroup as long as it's accessible. It currently is impossible to determine the next sibling after dropping RCU read lock because the cgroup being iterated could be removed anytime and if RCU read lock is dropped, nothing guarantess its ->sibling.next pointer is accessible. A removed cgroup would continue to point to its next sibling for RCU accesses but stop receiving updates from the sibling. IOW, the next sibling could be removed and then complete its grace period while RCU read lock is dropped, making it unsafe to dereference ->sibling.next after dropping and re-acquiring RCU read lock. This can be solved by adding a way to traverse to the next sibling without dereferencing ->sibling.next. This patch adds a monotonically increasing cgroup serial number, cgroup->serial_nr, which guarantees that all cgroup->children lists are kept in increasing serial_nr order. A new function, cgroup_next_sibling(), is implemented, which, if CGRP_REMOVED is not set on the current cgroup, follows ->sibling.next; otherwise, traverses the parent's ->children list until it sees a sibling with higher ->serial_nr. This allows the function to always return the next sibling regardless of the state of the current cgroup without adding overhead in the fast path. Further patches will update the iterators to use cgroup_next_sibling() so that they allow dropping RCU read lock and blocking while iteration is in progress which in turn will be used to simplify controllers. v2: Typo fix as per Serge. Signed-off-by: Tejun Heo Acked-by: Serge E. Hallyn diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 8d9f3c9..ee041a0 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -189,6 +189,14 @@ struct cgroup { struct dentry *dentry; /* cgroup fs entry, RCU protected */ /* + * Monotonically increasing unique serial number which defines a + * uniform order among all cgroups. It's guaranteed that all + * ->children lists are in the ascending order of ->serial_nr. + * It's used to allow interrupting and resuming iterations. + */ + u64 serial_nr; + + /* * This is a copy of dentry->d_name, and it's needed because * we can't use dentry->d_name in cgroup_path(). * @@ -675,6 +683,8 @@ static inline struct cgroup* task_cgroup(struct task_struct *task, return task_subsys_state(task, subsys_id)->cgroup; } +struct cgroup *cgroup_next_sibling(struct cgroup *pos); + /** * cgroup_for_each_child - iterate through children of a cgroup * @pos: the cgroup * to use as the loop cursor diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5019748..b87c7a5 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2976,6 +2976,55 @@ static void cgroup_enable_task_cg_lists(void) } /** + * cgroup_next_sibling - find the next sibling of a given cgroup + * @pos: the current cgroup + * + * This function returns the next sibling of @pos and should be called + * under RCU read lock. The only requirement is that @pos is accessible. + * The next sibling is guaranteed to be returned regardless of @pos's + * state. + */ +struct cgroup *cgroup_next_sibling(struct cgroup *pos) +{ + struct cgroup *next; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + /* + * @pos could already have been removed. Once a cgroup is removed, + * its ->sibling.next is no longer updated when its next sibling + * changes. As CGRP_REMOVED is set on removal which is fully + * serialized, if we see it unasserted, it's guaranteed that the + * next sibling hasn't finished its grace period even if it's + * already removed, and thus safe to dereference from this RCU + * critical section. If ->sibling.next is inaccessible, + * cgroup_is_removed() is guaranteed to be visible as %true here. + */ + if (likely(!cgroup_is_removed(pos))) { + next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); + if (&next->sibling != &pos->parent->children) + return next; + return NULL; + } + + /* + * Can't dereference the next pointer. Each cgroup is given a + * monotonically increasing unique serial number and always + * appended to the sibling list, so the next one can be found by + * walking the parent's children until we see a cgroup with higher + * serial number than @pos's. + * + * While this path can be slow, it's taken only when either the + * current cgroup is removed or iteration and removal race. + */ + list_for_each_entry_rcu(next, &pos->parent->children, sibling) + if (next->serial_nr > pos->serial_nr) + return next; + return NULL; +} +EXPORT_SYMBOL_GPL(cgroup_next_sibling); + +/** * cgroup_next_descendant_pre - find the next descendant for pre-order walk * @pos: the current position (%NULL to initiate traversal) * @cgroup: cgroup whose descendants to walk @@ -4137,6 +4186,7 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp) static long cgroup_create(struct cgroup *parent, struct dentry *dentry, umode_t mode) { + static atomic64_t serial_nr_cursor = ATOMIC64_INIT(0); struct cgroup *cgrp; struct cgroup_name *name; struct cgroupfs_root *root = parent->root; @@ -4217,6 +4267,14 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, goto err_free_all; lockdep_assert_held(&dentry->d_inode->i_mutex); + /* + * Assign a monotonically increasing serial number. With the list + * appending below, it guarantees that sibling cgroups are always + * sorted in the ascending serial number order on the parent's + * ->children. + */ + cgrp->serial_nr = atomic64_inc_return(&serial_nr_cursor); + /* allocation complete, commit to creation */ list_add_tail(&cgrp->allcg_node, &root->allcg_list); list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); @@ -4304,6 +4362,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) * removed. This makes future css_tryget() and child creation * attempts fail thus maintaining the removal conditions verified * above. + * + * Note that CGRP_REMVOED clearing is depended upon by + * cgroup_next_sibling() to resume iteration after dropping RCU + * read lock. See cgroup_next_sibling() for details. */ for_each_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; -- cgit v0.10.2 From 75501a6d59e989e5c286716e5b3b66ace4660e83 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 24 May 2013 10:55:38 +0900 Subject: cgroup: update iterators to use cgroup_next_sibling() This patch converts cgroup_for_each_child(), cgroup_next_descendant_pre/post() and thus cgroup_for_each_descendant_pre/post() to use cgroup_next_sibling() instead of manually dereferencing ->sibling.next. The only reason the iterators couldn't allow dropping RCU read lock while iteration is in progress was because they couldn't determine the next sibling safely once RCU read lock is dropped. Using cgroup_next_sibling() removes that problem and enables all iterators to allow dropping RCU read lock in the middle. Comments are updated accordingly. This makes the iterators easier to use and will simplify controllers. Note that @cgroup argument is renamed to @cgrp in cgroup_for_each_child() because it conflicts with "struct cgroup" used in the new macro body. Signed-off-by: Tejun Heo Acked-by: Serge E. Hallyn Reviewed-by: Michal Hocko diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ee041a0..d0ad379 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -688,9 +688,9 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos); /** * cgroup_for_each_child - iterate through children of a cgroup * @pos: the cgroup * to use as the loop cursor - * @cgroup: cgroup whose children to walk + * @cgrp: cgroup whose children to walk * - * Walk @cgroup's children. Must be called under rcu_read_lock(). A child + * Walk @cgrp's children. Must be called under rcu_read_lock(). A child * cgroup which hasn't finished ->css_online() or already has finished * ->css_offline() may show up during traversal and it's each subsystem's * responsibility to verify that each @pos is alive. @@ -698,9 +698,15 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos); * If a subsystem synchronizes against the parent in its ->css_online() and * before starting iterating, a cgroup which finished ->css_online() is * guaranteed to be visible in the future iterations. + * + * It is allowed to temporarily drop RCU read lock during iteration. The + * caller is responsible for ensuring that @pos remains accessible until + * the start of the next iteration by, for example, bumping the css refcnt. */ -#define cgroup_for_each_child(pos, cgroup) \ - list_for_each_entry_rcu(pos, &(cgroup)->children, sibling) +#define cgroup_for_each_child(pos, cgrp) \ + for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \ + struct cgroup, sibling); \ + (pos); (pos) = cgroup_next_sibling((pos))) struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, struct cgroup *cgroup); @@ -759,6 +765,10 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); * Alternatively, a subsystem may choose to use a single global lock to * synchronize ->css_online() and ->css_offline() against tree-walking * operations. + * + * It is allowed to temporarily drop RCU read lock during iteration. The + * caller is responsible for ensuring that @pos remains accessible until + * the start of the next iteration by, for example, bumping the css refcnt. */ #define cgroup_for_each_descendant_pre(pos, cgroup) \ for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b87c7a5..fefc41c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3031,6 +3031,11 @@ EXPORT_SYMBOL_GPL(cgroup_next_sibling); * * To be used by cgroup_for_each_descendant_pre(). Find the next * descendant to visit for pre-order traversal of @cgroup's descendants. + * + * While this function requires RCU read locking, it doesn't require the + * whole traversal to be contained in a single RCU critical section. This + * function will return the correct next descendant as long as both @pos + * and @cgroup are accessible and @pos is a descendant of @cgroup. */ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, struct cgroup *cgroup) @@ -3050,11 +3055,9 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, /* no child, visit my or the closest ancestor's next sibling */ while (pos != cgroup) { - next = list_entry_rcu(pos->sibling.next, struct cgroup, - sibling); - if (&next->sibling != &pos->parent->children) + next = cgroup_next_sibling(pos); + if (next) return next; - pos = pos->parent; } @@ -3069,6 +3072,11 @@ EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre); * Return the rightmost descendant of @pos. If there's no descendant, * @pos is returned. This can be used during pre-order traversal to skip * subtree of @pos. + * + * While this function requires RCU read locking, it doesn't require the + * whole traversal to be contained in a single RCU critical section. This + * function will return the correct rightmost descendant as long as @pos is + * accessible. */ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos) { @@ -3108,6 +3116,11 @@ static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos) * * To be used by cgroup_for_each_descendant_post(). Find the next * descendant to visit for post-order traversal of @cgroup's descendants. + * + * While this function requires RCU read locking, it doesn't require the + * whole traversal to be contained in a single RCU critical section. This + * function will return the correct next descendant as long as both @pos + * and @cgroup are accessible and @pos is a descendant of @cgroup. */ struct cgroup *cgroup_next_descendant_post(struct cgroup *pos, struct cgroup *cgroup) @@ -3123,8 +3136,8 @@ struct cgroup *cgroup_next_descendant_post(struct cgroup *pos, } /* if there's an unvisited sibling, visit its leftmost descendant */ - next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); - if (&next->sibling != &pos->parent->children) + next = cgroup_next_sibling(pos); + if (next) return cgroup_leftmost_descendant(next); /* no sibling left, visit parent */ -- cgit v0.10.2 From d591fb56618f4f93160b477dfa25bbb1e31b0e85 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 24 May 2013 10:55:38 +0900 Subject: device_cgroup: simplify cgroup tree walk in propagate_exception() During a config change, propagate_exception() needs to traverse the subtree to update config on the subtree. Because such config updates need to allocate memory, it couldn't directly use cgroup_for_each_descendant_pre() which required the whole iteration to be contained in a single RCU read critical section. To work around the limitation, propagate_exception() built a linked list of descendant cgroups while read-locking RCU and then walked the list afterwards, which is safe as the whole iteration is protected by devcgroup_mutex. This works but is cumbersome. With the recent updates, cgroup iterators now allow dropping RCU read lock while iteration is in progress making this workaround no longer necessary. This patch replaces dev_cgroup->propagate_pending list and get_online_devcg() with direct cgroup_for_each_descendant_pre() walk. Signed-off-by: Tejun Heo Cc: Aristeu Rozanski Acked-by: Serge E. Hallyn Reviewed-by: Michal Hocko diff --git a/security/device_cgroup.c b/security/device_cgroup.c index dd0dc57..e8aad69 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -49,8 +49,6 @@ struct dev_cgroup { struct cgroup_subsys_state css; struct list_head exceptions; enum devcg_behavior behavior; - /* temporary list for pending propagation operations */ - struct list_head propagate_pending; }; static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) @@ -241,7 +239,6 @@ static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup) if (!dev_cgroup) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&dev_cgroup->exceptions); - INIT_LIST_HEAD(&dev_cgroup->propagate_pending); dev_cgroup->behavior = DEVCG_DEFAULT_NONE; return &dev_cgroup->css; @@ -445,34 +442,6 @@ static void revalidate_active_exceptions(struct dev_cgroup *devcg) } /** - * get_online_devcg - walks the cgroup tree and fills a list with the online - * groups - * @root: cgroup used as starting point - * @online: list that will be filled with online groups - * - * Must be called with devcgroup_mutex held. Grabs RCU lock. - * Because devcgroup_mutex is held, no devcg will become online or offline - * during the tree walk (see devcgroup_online, devcgroup_offline) - * A separated list is needed because propagate_behavior() and - * propagate_exception() need to allocate memory and can block. - */ -static void get_online_devcg(struct cgroup *root, struct list_head *online) -{ - struct cgroup *pos; - struct dev_cgroup *devcg; - - lockdep_assert_held(&devcgroup_mutex); - - rcu_read_lock(); - cgroup_for_each_descendant_pre(pos, root) { - devcg = cgroup_to_devcgroup(pos); - if (is_devcg_online(devcg)) - list_add_tail(&devcg->propagate_pending, online); - } - rcu_read_unlock(); -} - -/** * propagate_exception - propagates a new exception to the children * @devcg_root: device cgroup that added a new exception * @ex: new exception to be propagated @@ -482,15 +451,24 @@ static void get_online_devcg(struct cgroup *root, struct list_head *online) static int propagate_exception(struct dev_cgroup *devcg_root, struct dev_exception_item *ex) { - struct cgroup *root = devcg_root->css.cgroup; - struct dev_cgroup *devcg, *parent, *tmp; + struct cgroup *root = devcg_root->css.cgroup, *pos; int rc = 0; - LIST_HEAD(pending); - get_online_devcg(root, &pending); + rcu_read_lock(); - list_for_each_entry_safe(devcg, tmp, &pending, propagate_pending) { - parent = cgroup_to_devcgroup(devcg->css.cgroup->parent); + cgroup_for_each_descendant_pre(pos, root) { + struct dev_cgroup *devcg = cgroup_to_devcgroup(pos); + + /* + * Because devcgroup_mutex is held, no devcg will become + * online or offline during the tree walk (see on/offline + * methods), and online ones are safe to access outside RCU + * read lock without bumping refcnt. + */ + if (!is_devcg_online(devcg)) + continue; + + rcu_read_unlock(); /* * in case both root's behavior and devcg is allow, a new @@ -512,8 +490,10 @@ static int propagate_exception(struct dev_cgroup *devcg_root, } revalidate_active_exceptions(devcg); - list_del_init(&devcg->propagate_pending); + rcu_read_lock(); } + + rcu_read_unlock(); return rc; } -- cgit v0.10.2 From f12dc020149fad7087e119e54cffea668272bf7d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Jun 2013 19:13:02 -0700 Subject: cgroup: mark "tasks" cgroup file as insane Some resources controlled by cgroup aren't per-task and cgroup core allowing threads of a single thread_group to be in different cgroups forced memcg do explicitly find the group leader and use it. This is gonna be nasty when transitioning to unified hierarchy and in general we don't want and won't support granularity finer than processes. Mark "tasks" with CFTYPE_INSANE. Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: Johannes Weiner Cc: Michal Hocko Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: cgroups@vger.kernel.org Cc: Vivek Goyal diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fefc41c..1e0f445 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4037,6 +4037,7 @@ static int cgroup_clone_children_write(struct cgroup *cgrp, static struct cftype files[] = { { .name = "tasks", + .flags = CFTYPE_INSANE, /* use "procs" instead */ .open = cgroup_tasks_open, .write_u64 = cgroup_tasks_write, .release = cgroup_pidlist_release, -- cgit v0.10.2 From cc5943a7816ba6c00639837a62131386619548dc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Jun 2013 19:13:55 -0700 Subject: cgroup: mark "notify_on_release" and "release_agent" cgroup files insane The empty cgroup notification mechanism currently implemented in cgroup is tragically outdated. Forking and execing userland process stopped being a viable notification mechanism more than a decade ago. We're gonna have a saner mechanism. Let's make it clear that this abomination is going away. Mark "notify_on_release" and "release_agent" with CFTYPE_INSANE. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1e0f445..b3bb8a3 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4052,6 +4052,7 @@ static struct cftype files[] = { }, { .name = "notify_on_release", + .flags = CFTYPE_INSANE, .read_u64 = cgroup_read_notify_on_release, .write_u64 = cgroup_write_notify_on_release, }, @@ -4073,7 +4074,7 @@ static struct cftype files[] = { }, { .name = "release_agent", - .flags = CFTYPE_ONLY_ON_ROOT, + .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT, .read_seq_string = cgroup_release_agent_show, .write_string = cgroup_release_agent_write, .max_write_len = PATH_MAX, -- cgit v0.10.2 From d5c56ced775f6bdc32b689b01c9c4f9b66e18610 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Jun 2013 19:14:34 -0700 Subject: cgroup: clean up the cftype array for the base cgroup files * Rename it from files[] (really?) to cgroup_base_files[]. * Drop CGROUP_FILE_GENERIC_PREFIX which was defined as "cgroup." and used inconsistently. Just use "cgroup." directly. * Collect insane files at the end. Note that only the insane ones are missing "cgroup." prefix. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b3bb8a3..bc53d50 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4029,35 +4029,16 @@ static int cgroup_clone_children_write(struct cgroup *cgrp, return 0; } -/* - * for the common functions, 'private' gives the type of file - */ -/* for hysterical raisins, we can't put this on the older files */ -#define CGROUP_FILE_GENERIC_PREFIX "cgroup." -static struct cftype files[] = { +static struct cftype cgroup_base_files[] = { { - .name = "tasks", - .flags = CFTYPE_INSANE, /* use "procs" instead */ - .open = cgroup_tasks_open, - .write_u64 = cgroup_tasks_write, - .release = cgroup_pidlist_release, - .mode = S_IRUGO | S_IWUSR, - }, - { - .name = CGROUP_FILE_GENERIC_PREFIX "procs", + .name = "cgroup.procs", .open = cgroup_procs_open, .write_u64 = cgroup_procs_write, .release = cgroup_pidlist_release, .mode = S_IRUGO | S_IWUSR, }, { - .name = "notify_on_release", - .flags = CFTYPE_INSANE, - .read_u64 = cgroup_read_notify_on_release, - .write_u64 = cgroup_write_notify_on_release, - }, - { - .name = CGROUP_FILE_GENERIC_PREFIX "event_control", + .name = "cgroup.event_control", .write_string = cgroup_write_event_control, .mode = S_IWUGO, }, @@ -4072,6 +4053,26 @@ static struct cftype files[] = { .flags = CFTYPE_ONLY_ON_ROOT, .read_seq_string = cgroup_sane_behavior_show, }, + + /* + * Historical crazy stuff. These don't have "cgroup." prefix and + * don't exist if sane_behavior. If you're depending on these, be + * prepared to be burned. + */ + { + .name = "tasks", + .flags = CFTYPE_INSANE, /* use "procs" instead */ + .open = cgroup_tasks_open, + .write_u64 = cgroup_tasks_write, + .release = cgroup_pidlist_release, + .mode = S_IRUGO | S_IWUSR, + }, + { + .name = "notify_on_release", + .flags = CFTYPE_INSANE, + .read_u64 = cgroup_read_notify_on_release, + .write_u64 = cgroup_write_notify_on_release, + }, { .name = "release_agent", .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT, @@ -4095,7 +4096,7 @@ static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files, struct cgroup_subsys *ss; if (base_files) { - err = cgroup_addrm_files(cgrp, NULL, files, true); + err = cgroup_addrm_files(cgrp, NULL, cgroup_base_files, true); if (err < 0) return err; } -- cgit v0.10.2 From 3fc3db9a3ae0ce108badf31a4a00e41b4236f5fc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Jun 2013 21:04:48 -0700 Subject: cgroup: remove now unused css_depth() Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index d0ad379..5830592 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -848,7 +848,6 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg, /* Get id and depth of css */ unsigned short css_id(struct cgroup_subsys_state *css); -unsigned short css_depth(struct cgroup_subsys_state *css); struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id); #else /* !CONFIG_CGROUPS */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index bc53d50..d4a329f 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -5227,18 +5227,6 @@ unsigned short css_id(struct cgroup_subsys_state *css) } EXPORT_SYMBOL_GPL(css_id); -unsigned short css_depth(struct cgroup_subsys_state *css) -{ - struct css_id *cssid; - - cssid = rcu_dereference_check(css->id, css_refcnt(css)); - - if (cssid) - return cssid->depth; - return 0; -} -EXPORT_SYMBOL_GPL(css_depth); - /** * css_is_ancestor - test "root" css is an ancestor of "child" * @child: the css to be tested. -- cgit v0.10.2 From 5abb8855734fd7b3fa7f91c13916d0e35d99763c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Jun 2013 21:04:49 -0700 Subject: cgroup: consistently use @cset for struct css_set variables cgroup.c uses @cg for most struct css_set variables, which in itself could be a bit confusing, but made much worse by the fact that there are places which use @cg for struct cgroup variables. compare_css_sets() epitomizes this confusion - @[old_]cg are struct css_set while @cg[12] are struct cgroup. It's not like the whole deal with cgroup, css_set and cg_cgroup_link isn't already confusing enough. Let's give it some sanity by uniformly using @cset for all struct css_set variables. * s/cg/cset/ for all css_set variables. * s/oldcg/old_cset/ s/oldcgrp/old_cgrp/. The same for the ones prefixed with "new". * s/cg/cgrp/ for cgroup variables in compare_css_sets(). * s/css/cset/ for the cgroup variable in task_cgroup_from_root(). * Whiteline adjustments. This patch is purely cosmetic. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d4a329f..1f5a4e1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -376,30 +376,32 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) * compiled into their kernel but not actually in use */ static int use_task_css_set_links __read_mostly; -static void __put_css_set(struct css_set *cg, int taskexit) +static void __put_css_set(struct css_set *cset, int taskexit) { struct cg_cgroup_link *link; struct cg_cgroup_link *saved_link; + /* * Ensure that the refcount doesn't hit zero while any readers * can see it. Similar to atomic_dec_and_lock(), but for an * rwlock */ - if (atomic_add_unless(&cg->refcount, -1, 1)) + if (atomic_add_unless(&cset->refcount, -1, 1)) return; write_lock(&css_set_lock); - if (!atomic_dec_and_test(&cg->refcount)) { + if (!atomic_dec_and_test(&cset->refcount)) { write_unlock(&css_set_lock); return; } /* This css_set is dead. unlink it and release cgroup refcounts */ - hash_del(&cg->hlist); + hash_del(&cset->hlist); css_set_count--; - list_for_each_entry_safe(link, saved_link, &cg->cg_links, + list_for_each_entry_safe(link, saved_link, &cset->cg_links, cg_link_list) { struct cgroup *cgrp = link->cgrp; + list_del(&link->cg_link_list); list_del(&link->cgrp_link_list); @@ -421,45 +423,45 @@ static void __put_css_set(struct css_set *cg, int taskexit) } write_unlock(&css_set_lock); - kfree_rcu(cg, rcu_head); + kfree_rcu(cset, rcu_head); } /* * refcounted get/put for css_set objects */ -static inline void get_css_set(struct css_set *cg) +static inline void get_css_set(struct css_set *cset) { - atomic_inc(&cg->refcount); + atomic_inc(&cset->refcount); } -static inline void put_css_set(struct css_set *cg) +static inline void put_css_set(struct css_set *cset) { - __put_css_set(cg, 0); + __put_css_set(cset, 0); } -static inline void put_css_set_taskexit(struct css_set *cg) +static inline void put_css_set_taskexit(struct css_set *cset) { - __put_css_set(cg, 1); + __put_css_set(cset, 1); } /* * compare_css_sets - helper function for find_existing_css_set(). - * @cg: candidate css_set being tested - * @old_cg: existing css_set for a task + * @cset: candidate css_set being tested + * @old_cset: existing css_set for a task * @new_cgrp: cgroup that's being entered by the task * @template: desired set of css pointers in css_set (pre-calculated) * * Returns true if "cg" matches "old_cg" except for the hierarchy * which "new_cgrp" belongs to, for which it should match "new_cgrp". */ -static bool compare_css_sets(struct css_set *cg, - struct css_set *old_cg, +static bool compare_css_sets(struct css_set *cset, + struct css_set *old_cset, struct cgroup *new_cgrp, struct cgroup_subsys_state *template[]) { struct list_head *l1, *l2; - if (memcmp(template, cg->subsys, sizeof(cg->subsys))) { + if (memcmp(template, cset->subsys, sizeof(cset->subsys))) { /* Not all subsystems matched */ return false; } @@ -473,28 +475,28 @@ static bool compare_css_sets(struct css_set *cg, * candidates. */ - l1 = &cg->cg_links; - l2 = &old_cg->cg_links; + l1 = &cset->cg_links; + l2 = &old_cset->cg_links; while (1) { struct cg_cgroup_link *cgl1, *cgl2; - struct cgroup *cg1, *cg2; + struct cgroup *cgrp1, *cgrp2; l1 = l1->next; l2 = l2->next; /* See if we reached the end - both lists are equal length. */ - if (l1 == &cg->cg_links) { - BUG_ON(l2 != &old_cg->cg_links); + if (l1 == &cset->cg_links) { + BUG_ON(l2 != &old_cset->cg_links); break; } else { - BUG_ON(l2 == &old_cg->cg_links); + BUG_ON(l2 == &old_cset->cg_links); } /* Locate the cgroups associated with these links. */ cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list); cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list); - cg1 = cgl1->cgrp; - cg2 = cgl2->cgrp; + cgrp1 = cgl1->cgrp; + cgrp2 = cgl2->cgrp; /* Hierarchies should be linked in the same order. */ - BUG_ON(cg1->root != cg2->root); + BUG_ON(cgrp1->root != cgrp2->root); /* * If this hierarchy is the hierarchy of the cgroup @@ -503,11 +505,11 @@ static bool compare_css_sets(struct css_set *cg, * hierarchy, then this css_set should point to the * same cgroup as the old css_set. */ - if (cg1->root == new_cgrp->root) { - if (cg1 != new_cgrp) + if (cgrp1->root == new_cgrp->root) { + if (cgrp1 != new_cgrp) return false; } else { - if (cg1 != cg2) + if (cgrp1 != cgrp2) return false; } } @@ -527,14 +529,13 @@ static bool compare_css_sets(struct css_set *cg, * template: location in which to build the desired set of subsystem * state objects for the new cgroup group */ -static struct css_set *find_existing_css_set( - struct css_set *oldcg, - struct cgroup *cgrp, - struct cgroup_subsys_state *template[]) +static struct css_set *find_existing_css_set(struct css_set *old_cset, + struct cgroup *cgrp, + struct cgroup_subsys_state *template[]) { int i; struct cgroupfs_root *root = cgrp->root; - struct css_set *cg; + struct css_set *cset; unsigned long key; /* @@ -551,17 +552,17 @@ static struct css_set *find_existing_css_set( } else { /* Subsystem is not in this hierarchy, so we * don't want to change the subsystem state */ - template[i] = oldcg->subsys[i]; + template[i] = old_cset->subsys[i]; } } key = css_set_hash(template); - hash_for_each_possible(css_set_table, cg, hlist, key) { - if (!compare_css_sets(cg, oldcg, cgrp, template)) + hash_for_each_possible(css_set_table, cset, hlist, key) { + if (!compare_css_sets(cset, old_cset, cgrp, template)) continue; /* This css_set matches what we need */ - return cg; + return cset; } /* No existing cgroup group matched */ @@ -603,18 +604,18 @@ static int allocate_cg_links(int count, struct list_head *tmp) /** * link_css_set - a helper function to link a css_set to a cgroup * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links() - * @cg: the css_set to be linked + * @cset: the css_set to be linked * @cgrp: the destination cgroup */ static void link_css_set(struct list_head *tmp_cg_links, - struct css_set *cg, struct cgroup *cgrp) + struct css_set *cset, struct cgroup *cgrp) { struct cg_cgroup_link *link; BUG_ON(list_empty(tmp_cg_links)); link = list_first_entry(tmp_cg_links, struct cg_cgroup_link, cgrp_link_list); - link->cg = cg; + link->cg = cset; link->cgrp = cgrp; atomic_inc(&cgrp->count); list_move(&link->cgrp_link_list, &cgrp->css_sets); @@ -622,7 +623,7 @@ static void link_css_set(struct list_head *tmp_cg_links, * Always add links to the tail of the list so that the list * is sorted by order of hierarchy creation */ - list_add_tail(&link->cg_link_list, &cg->cg_links); + list_add_tail(&link->cg_link_list, &cset->cg_links); } /* @@ -632,10 +633,10 @@ static void link_css_set(struct list_head *tmp_cg_links, * substituted into the appropriate hierarchy. Must be called with * cgroup_mutex held */ -static struct css_set *find_css_set( - struct css_set *oldcg, struct cgroup *cgrp) +static struct css_set *find_css_set(struct css_set *old_cset, + struct cgroup *cgrp) { - struct css_set *res; + struct css_set *cset; struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; struct list_head tmp_cg_links; @@ -646,40 +647,40 @@ static struct css_set *find_css_set( /* First see if we already have a cgroup group that matches * the desired set */ read_lock(&css_set_lock); - res = find_existing_css_set(oldcg, cgrp, template); - if (res) - get_css_set(res); + cset = find_existing_css_set(old_cset, cgrp, template); + if (cset) + get_css_set(cset); read_unlock(&css_set_lock); - if (res) - return res; + if (cset) + return cset; - res = kmalloc(sizeof(*res), GFP_KERNEL); - if (!res) + cset = kmalloc(sizeof(*cset), GFP_KERNEL); + if (!cset) return NULL; /* Allocate all the cg_cgroup_link objects that we'll need */ if (allocate_cg_links(root_count, &tmp_cg_links) < 0) { - kfree(res); + kfree(cset); return NULL; } - atomic_set(&res->refcount, 1); - INIT_LIST_HEAD(&res->cg_links); - INIT_LIST_HEAD(&res->tasks); - INIT_HLIST_NODE(&res->hlist); + atomic_set(&cset->refcount, 1); + INIT_LIST_HEAD(&cset->cg_links); + INIT_LIST_HEAD(&cset->tasks); + INIT_HLIST_NODE(&cset->hlist); /* Copy the set of subsystem state objects generated in * find_existing_css_set() */ - memcpy(res->subsys, template, sizeof(res->subsys)); + memcpy(cset->subsys, template, sizeof(cset->subsys)); write_lock(&css_set_lock); /* Add reference counts and links from the new css_set. */ - list_for_each_entry(link, &oldcg->cg_links, cg_link_list) { + list_for_each_entry(link, &old_cset->cg_links, cg_link_list) { struct cgroup *c = link->cgrp; if (c->root == cgrp->root) c = cgrp; - link_css_set(&tmp_cg_links, res, c); + link_css_set(&tmp_cg_links, cset, c); } BUG_ON(!list_empty(&tmp_cg_links)); @@ -687,12 +688,12 @@ static struct css_set *find_css_set( css_set_count++; /* Add this cgroup group to the hash table */ - key = css_set_hash(res->subsys); - hash_add(css_set_table, &res->hlist, key); + key = css_set_hash(cset->subsys); + hash_add(css_set_table, &cset->hlist, key); write_unlock(&css_set_lock); - return res; + return cset; } /* @@ -702,7 +703,7 @@ static struct css_set *find_css_set( static struct cgroup *task_cgroup_from_root(struct task_struct *task, struct cgroupfs_root *root) { - struct css_set *css; + struct css_set *cset; struct cgroup *res = NULL; BUG_ON(!mutex_is_locked(&cgroup_mutex)); @@ -712,12 +713,12 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, * task can't change groups, so the only thing that can happen * is that it exits and its css is set back to init_css_set. */ - css = task->cgroups; - if (css == &init_css_set) { + cset = task->cgroups; + if (cset == &init_css_set) { res = &root->top_cgroup; } else { struct cg_cgroup_link *link; - list_for_each_entry(link, &css->cg_links, cg_link_list) { + list_for_each_entry(link, &cset->cg_links, cg_link_list) { struct cgroup *c = link->cgrp; if (c->root == root) { res = c; @@ -1608,7 +1609,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, struct cgroupfs_root *existing_root; const struct cred *cred; int i; - struct css_set *cg; + struct css_set *cset; BUG_ON(sb->s_root != NULL); @@ -1666,8 +1667,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, /* Link the top cgroup in this hierarchy into all * the css_set objects */ write_lock(&css_set_lock); - hash_for_each(css_set_table, i, cg, hlist) - link_css_set(&tmp_cg_links, cg, root_cgrp); + hash_for_each(css_set_table, i, cset, hlist) + link_css_set(&tmp_cg_links, cset, root_cgrp); write_unlock(&css_set_lock); free_cg_links(&tmp_cg_links); @@ -1944,10 +1945,11 @@ EXPORT_SYMBOL_GPL(cgroup_taskset_size); * * Must be called with cgroup_mutex and threadgroup locked. */ -static void cgroup_task_migrate(struct cgroup *oldcgrp, - struct task_struct *tsk, struct css_set *newcg) +static void cgroup_task_migrate(struct cgroup *old_cgrp, + struct task_struct *tsk, + struct css_set *new_cset) { - struct css_set *oldcg; + struct css_set *old_cset; /* * We are synchronized through threadgroup_lock() against PF_EXITING @@ -1955,25 +1957,25 @@ static void cgroup_task_migrate(struct cgroup *oldcgrp, * css_set to init_css_set and dropping the old one. */ WARN_ON_ONCE(tsk->flags & PF_EXITING); - oldcg = tsk->cgroups; + old_cset = tsk->cgroups; task_lock(tsk); - rcu_assign_pointer(tsk->cgroups, newcg); + rcu_assign_pointer(tsk->cgroups, new_cset); task_unlock(tsk); /* Update the css_set linked lists if we're using them */ write_lock(&css_set_lock); if (!list_empty(&tsk->cg_list)) - list_move(&tsk->cg_list, &newcg->tasks); + list_move(&tsk->cg_list, &new_cset->tasks); write_unlock(&css_set_lock); /* - * We just gained a reference on oldcg by taking it from the task. As - * trading it for newcg is protected by cgroup_mutex, we're safe to drop - * it here; it will be freed under RCU. + * We just gained a reference on old_cset by taking it from the + * task. As trading it for new_cset is protected by cgroup_mutex, + * we're safe to drop it here; it will be freed under RCU. */ - set_bit(CGRP_RELEASABLE, &oldcgrp->flags); - put_css_set(oldcg); + set_bit(CGRP_RELEASABLE, &old_cgrp->flags); + put_css_set(old_cset); } /** @@ -2925,7 +2927,7 @@ static void cgroup_advance_iter(struct cgroup *cgrp, { struct list_head *l = it->cg_link; struct cg_cgroup_link *link; - struct css_set *cg; + struct css_set *cset; /* Advance to the next non-empty css_set */ do { @@ -2935,10 +2937,10 @@ static void cgroup_advance_iter(struct cgroup *cgrp, return; } link = list_entry(l, struct cg_cgroup_link, cgrp_link_list); - cg = link->cg; - } while (list_empty(&cg->tasks)); + cset = link->cg; + } while (list_empty(&cset->tasks)); it->cg_link = l; - it->task = cg->tasks.next; + it->task = cset->tasks.next; } /* @@ -4516,7 +4518,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) struct cgroup_subsys_state *css; int i, ret; struct hlist_node *tmp; - struct css_set *cg; + struct css_set *cset; unsigned long key; /* check name and function validity */ @@ -4583,17 +4585,17 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) * this is all done under the css_set_lock. */ write_lock(&css_set_lock); - hash_for_each_safe(css_set_table, i, tmp, cg, hlist) { + hash_for_each_safe(css_set_table, i, tmp, cset, hlist) { /* skip entries that we already rehashed */ - if (cg->subsys[ss->subsys_id]) + if (cset->subsys[ss->subsys_id]) continue; /* remove existing entry */ - hash_del(&cg->hlist); + hash_del(&cset->hlist); /* set new value */ - cg->subsys[ss->subsys_id] = css; + cset->subsys[ss->subsys_id] = css; /* recompute hash and restore entry */ - key = css_set_hash(cg->subsys); - hash_add(css_set_table, &cg->hlist, key); + key = css_set_hash(cset->subsys); + hash_add(css_set_table, &cset->hlist, key); } write_unlock(&css_set_lock); @@ -4653,13 +4655,13 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) */ write_lock(&css_set_lock); list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) { - struct css_set *cg = link->cg; + struct css_set *cset = link->cg; unsigned long key; - hash_del(&cg->hlist); - cg->subsys[ss->subsys_id] = NULL; - key = css_set_hash(cg->subsys); - hash_add(css_set_table, &cg->hlist, key); + hash_del(&cset->hlist); + cset->subsys[ss->subsys_id] = NULL; + key = css_set_hash(cset->subsys); + hash_add(css_set_table, &cset->hlist, key); } write_unlock(&css_set_lock); @@ -5006,7 +5008,7 @@ void cgroup_post_fork(struct task_struct *child) */ void cgroup_exit(struct task_struct *tsk, int run_callbacks) { - struct css_set *cg; + struct css_set *cset; int i; /* @@ -5023,7 +5025,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) /* Reassign the task to the init_css_set. */ task_lock(tsk); - cg = tsk->cgroups; + cset = tsk->cgroups; tsk->cgroups = &init_css_set; if (run_callbacks && need_forkexit_callback) { @@ -5036,7 +5038,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) if (ss->exit) { struct cgroup *old_cgrp = - rcu_dereference_raw(cg->subsys[i])->cgroup; + rcu_dereference_raw(cset->subsys[i])->cgroup; struct cgroup *cgrp = task_cgroup(tsk, i); ss->exit(cgrp, old_cgrp, tsk); } @@ -5044,7 +5046,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) } task_unlock(tsk); - put_css_set_taskexit(cg); + put_css_set_taskexit(cset); } static void check_for_release(struct cgroup *cgrp) @@ -5453,12 +5455,12 @@ static int current_css_set_cg_links_read(struct cgroup *cont, struct seq_file *seq) { struct cg_cgroup_link *link; - struct css_set *cg; + struct css_set *cset; read_lock(&css_set_lock); rcu_read_lock(); - cg = rcu_dereference(current->cgroups); - list_for_each_entry(link, &cg->cg_links, cg_link_list) { + cset = rcu_dereference(current->cgroups); + list_for_each_entry(link, &cset->cg_links, cg_link_list) { struct cgroup *c = link->cgrp; const char *name; @@ -5483,11 +5485,11 @@ static int cgroup_css_links_read(struct cgroup *cont, read_lock(&css_set_lock); list_for_each_entry(link, &cont->css_sets, cgrp_link_list) { - struct css_set *cg = link->cg; + struct css_set *cset = link->cg; struct task_struct *task; int count = 0; - seq_printf(seq, "css_set %p\n", cg); - list_for_each_entry(task, &cg->tasks, cg_list) { + seq_printf(seq, "css_set %p\n", cset); + list_for_each_entry(task, &cset->tasks, cg_list) { if (count++ > MAX_TASKS_SHOWN_PER_CSS) { seq_puts(seq, " ...\n"); break; -- cgit v0.10.2 From 69d0206c793a17431eacee2694ee7a4b25df76b7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Jun 2013 21:04:50 -0700 Subject: cgroup: bring some sanity to naming around cg_cgroup_link cgroups and css_sets are mapped M:N and this M:N mapping is represented by struct cg_cgroup_link which forms linked lists on both sides. The naming around this mapping is already confusing and struct cg_cgroup_link exacerbates the situation quite a bit. >From cgroup side, it starts off ->css_sets and runs through ->cgrp_link_list. From css_set side, it starts off ->cg_links and runs through ->cg_link_list. This is rather reversed as cgrp_link_list is used to iterate css_sets and cg_link_list cgroups. Also, this is the only place which is still using the confusing "cg" for css_sets. This patch cleans it up a bit. * s/cgroup->css_sets/cgroup->cset_links/ s/css_set->cg_links/css_set->cgrp_links/ s/cgroup_iter->cg_link/cgroup_iter->cset_link/ * s/cg_cgroup_link/cgrp_cset_link/ * s/cgrp_cset_link->cg/cgrp_cset_link->cset/ s/cgrp_cset_link->cgrp_link_list/cgrp_cset_link->cset_link/ s/cgrp_cset_link->cg_link_list/cgrp_cset_link->cgrp_link/ * s/init_css_set_link/init_cgrp_cset_link/ s/free_cg_links/free_cgrp_cset_links/ s/allocate_cg_links/allocate_cgrp_cset_links/ * s/cgl[12]/link[12]/ in compare_css_sets() * s/saved_link/tmp_link/ s/tmp/tmp_links/ and a couple similar adustments. * Comment and whiteline adjustments. After the changes, we have list_for_each_entry(link, &cont->cset_links, cset_link) { struct css_set *cset = link->cset; instead of list_for_each_entry(link, &cont->css_sets, cgrp_link_list) { struct css_set *cset = link->cg; This patch is purely cosmetic. v2: Fix broken sentences in the patch description. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 5830592..0e32855 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -215,10 +215,10 @@ struct cgroup { struct cgroupfs_root *root; /* - * List of cg_cgroup_links pointing at css_sets with - * tasks in this cgroup. Protected by css_set_lock + * List of cgrp_cset_links pointing at css_sets with tasks in this + * cgroup. Protected by css_set_lock. */ - struct list_head css_sets; + struct list_head cset_links; struct list_head allcg_node; /* cgroupfs_root->allcg_list */ struct list_head cft_q_node; /* used during cftype add/rm */ @@ -365,11 +365,10 @@ struct css_set { struct list_head tasks; /* - * List of cg_cgroup_link objects on link chains from - * cgroups referenced from this css_set. Protected by - * css_set_lock + * List of cgrp_cset_links pointing at cgroups referenced from this + * css_set. Protected by css_set_lock. */ - struct list_head cg_links; + struct list_head cgrp_links; /* * Set of subsystem states, one for each subsystem. This array @@ -792,7 +791,7 @@ struct cgroup *cgroup_next_descendant_post(struct cgroup *pos, /* A cgroup_iter should be treated as an opaque object */ struct cgroup_iter { - struct list_head *cg_link; + struct list_head *cset_link; struct list_head *task; }; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1f5a4e1..ef97bd0c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -315,20 +315,24 @@ static void cgroup_release_agent(struct work_struct *work); static DECLARE_WORK(release_agent_work, cgroup_release_agent); static void check_for_release(struct cgroup *cgrp); -/* Link structure for associating css_set objects with cgroups */ -struct cg_cgroup_link { - /* - * List running through cg_cgroup_links associated with a - * cgroup, anchored on cgroup->css_sets - */ - struct list_head cgrp_link_list; - struct cgroup *cgrp; - /* - * List running through cg_cgroup_links pointing at a - * single css_set object, anchored on css_set->cg_links - */ - struct list_head cg_link_list; - struct css_set *cg; +/* + * A cgroup can be associated with multiple css_sets as different tasks may + * belong to different cgroups on different hierarchies. In the other + * direction, a css_set is naturally associated with multiple cgroups. + * This M:N relationship is represented by the following link structure + * which exists for each association and allows traversing the associations + * from both sides. + */ +struct cgrp_cset_link { + /* the cgroup and css_set this link associates */ + struct cgroup *cgrp; + struct css_set *cset; + + /* list of cgrp_cset_links anchored at cgrp->cset_links */ + struct list_head cset_link; + + /* list of cgrp_cset_links anchored at css_set->cgrp_links */ + struct list_head cgrp_link; }; /* The default css_set - used by init and its children prior to any @@ -339,7 +343,7 @@ struct cg_cgroup_link { */ static struct css_set init_css_set; -static struct cg_cgroup_link init_css_set_link; +static struct cgrp_cset_link init_cgrp_cset_link; static int cgroup_init_idr(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); @@ -378,8 +382,7 @@ static int use_task_css_set_links __read_mostly; static void __put_css_set(struct css_set *cset, int taskexit) { - struct cg_cgroup_link *link; - struct cg_cgroup_link *saved_link; + struct cgrp_cset_link *link, *tmp_link; /* * Ensure that the refcount doesn't hit zero while any readers @@ -398,12 +401,11 @@ static void __put_css_set(struct css_set *cset, int taskexit) hash_del(&cset->hlist); css_set_count--; - list_for_each_entry_safe(link, saved_link, &cset->cg_links, - cg_link_list) { + list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) { struct cgroup *cgrp = link->cgrp; - list_del(&link->cg_link_list); - list_del(&link->cgrp_link_list); + list_del(&link->cset_link); + list_del(&link->cgrp_link); /* * We may not be holding cgroup_mutex, and if cgrp->count is @@ -475,26 +477,26 @@ static bool compare_css_sets(struct css_set *cset, * candidates. */ - l1 = &cset->cg_links; - l2 = &old_cset->cg_links; + l1 = &cset->cgrp_links; + l2 = &old_cset->cgrp_links; while (1) { - struct cg_cgroup_link *cgl1, *cgl2; + struct cgrp_cset_link *link1, *link2; struct cgroup *cgrp1, *cgrp2; l1 = l1->next; l2 = l2->next; /* See if we reached the end - both lists are equal length. */ - if (l1 == &cset->cg_links) { - BUG_ON(l2 != &old_cset->cg_links); + if (l1 == &cset->cgrp_links) { + BUG_ON(l2 != &old_cset->cgrp_links); break; } else { - BUG_ON(l2 == &old_cset->cg_links); + BUG_ON(l2 == &old_cset->cgrp_links); } /* Locate the cgroups associated with these links. */ - cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list); - cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list); - cgrp1 = cgl1->cgrp; - cgrp2 = cgl2->cgrp; + link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link); + link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link); + cgrp1 = link1->cgrp; + cgrp2 = link2->cgrp; /* Hierarchies should be linked in the same order. */ BUG_ON(cgrp1->root != cgrp2->root); @@ -569,61 +571,64 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, return NULL; } -static void free_cg_links(struct list_head *tmp) +static void free_cgrp_cset_links(struct list_head *links_to_free) { - struct cg_cgroup_link *link; - struct cg_cgroup_link *saved_link; + struct cgrp_cset_link *link, *tmp_link; - list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) { - list_del(&link->cgrp_link_list); + list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) { + list_del(&link->cset_link); kfree(link); } } -/* - * allocate_cg_links() allocates "count" cg_cgroup_link structures - * and chains them on tmp through their cgrp_link_list fields. Returns 0 on - * success or a negative error +/** + * allocate_cgrp_cset_links - allocate cgrp_cset_links + * @count: the number of links to allocate + * @tmp_links: list_head the allocated links are put on + * + * Allocate @count cgrp_cset_link structures and chain them on @tmp_links + * through ->cset_link. Returns 0 on success or -errno. */ -static int allocate_cg_links(int count, struct list_head *tmp) +static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) { - struct cg_cgroup_link *link; + struct cgrp_cset_link *link; int i; - INIT_LIST_HEAD(tmp); + + INIT_LIST_HEAD(tmp_links); + for (i = 0; i < count; i++) { link = kmalloc(sizeof(*link), GFP_KERNEL); if (!link) { - free_cg_links(tmp); + free_cgrp_cset_links(tmp_links); return -ENOMEM; } - list_add(&link->cgrp_link_list, tmp); + list_add(&link->cset_link, tmp_links); } return 0; } /** * link_css_set - a helper function to link a css_set to a cgroup - * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links() + * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links() * @cset: the css_set to be linked * @cgrp: the destination cgroup */ -static void link_css_set(struct list_head *tmp_cg_links, - struct css_set *cset, struct cgroup *cgrp) +static void link_css_set(struct list_head *tmp_links, struct css_set *cset, + struct cgroup *cgrp) { - struct cg_cgroup_link *link; + struct cgrp_cset_link *link; - BUG_ON(list_empty(tmp_cg_links)); - link = list_first_entry(tmp_cg_links, struct cg_cgroup_link, - cgrp_link_list); - link->cg = cset; + BUG_ON(list_empty(tmp_links)); + link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link); + link->cset = cset; link->cgrp = cgrp; atomic_inc(&cgrp->count); - list_move(&link->cgrp_link_list, &cgrp->css_sets); + list_move(&link->cset_link, &cgrp->cset_links); /* * Always add links to the tail of the list so that the list * is sorted by order of hierarchy creation */ - list_add_tail(&link->cg_link_list, &cset->cg_links); + list_add_tail(&link->cgrp_link, &cset->cgrp_links); } /* @@ -638,10 +643,8 @@ static struct css_set *find_css_set(struct css_set *old_cset, { struct css_set *cset; struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; - - struct list_head tmp_cg_links; - - struct cg_cgroup_link *link; + struct list_head tmp_links; + struct cgrp_cset_link *link; unsigned long key; /* First see if we already have a cgroup group that matches @@ -659,14 +662,14 @@ static struct css_set *find_css_set(struct css_set *old_cset, if (!cset) return NULL; - /* Allocate all the cg_cgroup_link objects that we'll need */ - if (allocate_cg_links(root_count, &tmp_cg_links) < 0) { + /* Allocate all the cgrp_cset_link objects that we'll need */ + if (allocate_cgrp_cset_links(root_count, &tmp_links) < 0) { kfree(cset); return NULL; } atomic_set(&cset->refcount, 1); - INIT_LIST_HEAD(&cset->cg_links); + INIT_LIST_HEAD(&cset->cgrp_links); INIT_LIST_HEAD(&cset->tasks); INIT_HLIST_NODE(&cset->hlist); @@ -676,14 +679,15 @@ static struct css_set *find_css_set(struct css_set *old_cset, write_lock(&css_set_lock); /* Add reference counts and links from the new css_set. */ - list_for_each_entry(link, &old_cset->cg_links, cg_link_list) { + list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; + if (c->root == cgrp->root) c = cgrp; - link_css_set(&tmp_cg_links, cset, c); + link_css_set(&tmp_links, cset, c); } - BUG_ON(!list_empty(&tmp_cg_links)); + BUG_ON(!list_empty(&tmp_links)); css_set_count++; @@ -717,9 +721,11 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, if (cset == &init_css_set) { res = &root->top_cgroup; } else { - struct cg_cgroup_link *link; - list_for_each_entry(link, &cset->cg_links, cg_link_list) { + struct cgrp_cset_link *link; + + list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; + if (c->root == root) { res = c; break; @@ -1405,7 +1411,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) INIT_LIST_HEAD(&cgrp->sibling); INIT_LIST_HEAD(&cgrp->children); INIT_LIST_HEAD(&cgrp->files); - INIT_LIST_HEAD(&cgrp->css_sets); + INIT_LIST_HEAD(&cgrp->cset_links); INIT_LIST_HEAD(&cgrp->allcg_node); INIT_LIST_HEAD(&cgrp->release_list); INIT_LIST_HEAD(&cgrp->pidlists); @@ -1604,7 +1610,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, BUG_ON(!root); if (root == opts.new_root) { /* We used the new root structure, so this is a new hierarchy */ - struct list_head tmp_cg_links; + struct list_head tmp_links; struct cgroup *root_cgrp = &root->top_cgroup; struct cgroupfs_root *existing_root; const struct cred *cred; @@ -1636,7 +1642,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, * that's us. The worst that can happen is that we * have some link structures left over */ - ret = allocate_cg_links(css_set_count, &tmp_cg_links); + ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); if (ret) goto unlock_drop; @@ -1646,7 +1652,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, ret = rebind_subsystems(root, root->subsys_mask); if (ret == -EBUSY) { - free_cg_links(&tmp_cg_links); + free_cgrp_cset_links(&tmp_links); goto unlock_drop; } /* @@ -1668,10 +1674,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, * the css_set objects */ write_lock(&css_set_lock); hash_for_each(css_set_table, i, cset, hlist) - link_css_set(&tmp_cg_links, cset, root_cgrp); + link_css_set(&tmp_links, cset, root_cgrp); write_unlock(&css_set_lock); - free_cg_links(&tmp_cg_links); + free_cgrp_cset_links(&tmp_links); BUG_ON(!list_empty(&root_cgrp->children)); BUG_ON(root->number_of_cgroups != 1); @@ -1722,9 +1728,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, static void cgroup_kill_sb(struct super_block *sb) { struct cgroupfs_root *root = sb->s_fs_info; struct cgroup *cgrp = &root->top_cgroup; + struct cgrp_cset_link *link, *tmp_link; int ret; - struct cg_cgroup_link *link; - struct cg_cgroup_link *saved_link; BUG_ON(!root); @@ -1740,15 +1745,14 @@ static void cgroup_kill_sb(struct super_block *sb) { BUG_ON(ret); /* - * Release all the links from css_sets to this hierarchy's + * Release all the links from cset_links to this hierarchy's * root cgroup */ write_lock(&css_set_lock); - list_for_each_entry_safe(link, saved_link, &cgrp->css_sets, - cgrp_link_list) { - list_del(&link->cg_link_list); - list_del(&link->cgrp_link_list); + list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { + list_del(&link->cset_link); + list_del(&link->cgrp_link); kfree(link); } write_unlock(&css_set_lock); @@ -2908,12 +2912,11 @@ int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) int cgroup_task_count(const struct cgroup *cgrp) { int count = 0; - struct cg_cgroup_link *link; + struct cgrp_cset_link *link; read_lock(&css_set_lock); - list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) { - count += atomic_read(&link->cg->refcount); - } + list_for_each_entry(link, &cgrp->cset_links, cset_link) + count += atomic_read(&link->cset->refcount); read_unlock(&css_set_lock); return count; } @@ -2922,24 +2925,23 @@ int cgroup_task_count(const struct cgroup *cgrp) * Advance a list_head iterator. The iterator should be positioned at * the start of a css_set */ -static void cgroup_advance_iter(struct cgroup *cgrp, - struct cgroup_iter *it) +static void cgroup_advance_iter(struct cgroup *cgrp, struct cgroup_iter *it) { - struct list_head *l = it->cg_link; - struct cg_cgroup_link *link; + struct list_head *l = it->cset_link; + struct cgrp_cset_link *link; struct css_set *cset; /* Advance to the next non-empty css_set */ do { l = l->next; - if (l == &cgrp->css_sets) { - it->cg_link = NULL; + if (l == &cgrp->cset_links) { + it->cset_link = NULL; return; } - link = list_entry(l, struct cg_cgroup_link, cgrp_link_list); - cset = link->cg; + link = list_entry(l, struct cgrp_cset_link, cset_link); + cset = link->cset; } while (list_empty(&cset->tasks)); - it->cg_link = l; + it->cset_link = l; it->task = cset->tasks.next; } @@ -3160,7 +3162,7 @@ void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) cgroup_enable_task_cg_lists(); read_lock(&css_set_lock); - it->cg_link = &cgrp->css_sets; + it->cset_link = &cgrp->cset_links; cgroup_advance_iter(cgrp, it); } @@ -3169,16 +3171,16 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, { struct task_struct *res; struct list_head *l = it->task; - struct cg_cgroup_link *link; + struct cgrp_cset_link *link; /* If the iterator cg is NULL, we have no tasks */ - if (!it->cg_link) + if (!it->cset_link) return NULL; res = list_entry(l, struct task_struct, cg_list); /* Advance iterator to find next entry */ l = l->next; - link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list); - if (l == &link->cg->tasks) { + link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link); + if (l == &link->cset->tasks) { /* We reached the end of this task list - move on to * the next cg_cgroup_link */ cgroup_advance_iter(cgrp, it); @@ -4625,7 +4627,7 @@ EXPORT_SYMBOL_GPL(cgroup_load_subsys); */ void cgroup_unload_subsys(struct cgroup_subsys *ss) { - struct cg_cgroup_link *link; + struct cgrp_cset_link *link; BUG_ON(ss->module == NULL); @@ -4654,8 +4656,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) * in loading, we need to pay our respects to the hashtable gods. */ write_lock(&css_set_lock); - list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) { - struct css_set *cset = link->cg; + list_for_each_entry(link, &dummytop->cset_links, cset_link) { + struct css_set *cset = link->cset; unsigned long key; hash_del(&cset->hlist); @@ -4688,7 +4690,7 @@ int __init cgroup_init_early(void) { int i; atomic_set(&init_css_set.refcount, 1); - INIT_LIST_HEAD(&init_css_set.cg_links); + INIT_LIST_HEAD(&init_css_set.cgrp_links); INIT_LIST_HEAD(&init_css_set.tasks); INIT_HLIST_NODE(&init_css_set.hlist); css_set_count = 1; @@ -4696,12 +4698,10 @@ int __init cgroup_init_early(void) root_count = 1; init_task.cgroups = &init_css_set; - init_css_set_link.cg = &init_css_set; - init_css_set_link.cgrp = dummytop; - list_add(&init_css_set_link.cgrp_link_list, - &rootnode.top_cgroup.css_sets); - list_add(&init_css_set_link.cg_link_list, - &init_css_set.cg_links); + init_cgrp_cset_link.cset = &init_css_set; + init_cgrp_cset_link.cgrp = dummytop; + list_add(&init_cgrp_cset_link.cset_link, &rootnode.top_cgroup.cset_links); + list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { struct cgroup_subsys *ss = subsys[i]; @@ -5454,13 +5454,13 @@ static int current_css_set_cg_links_read(struct cgroup *cont, struct cftype *cft, struct seq_file *seq) { - struct cg_cgroup_link *link; + struct cgrp_cset_link *link; struct css_set *cset; read_lock(&css_set_lock); rcu_read_lock(); cset = rcu_dereference(current->cgroups); - list_for_each_entry(link, &cset->cg_links, cg_link_list) { + list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; const char *name; @@ -5481,11 +5481,11 @@ static int cgroup_css_links_read(struct cgroup *cont, struct cftype *cft, struct seq_file *seq) { - struct cg_cgroup_link *link; + struct cgrp_cset_link *link; read_lock(&css_set_lock); - list_for_each_entry(link, &cont->css_sets, cgrp_link_list) { - struct css_set *cset = link->cg; + list_for_each_entry(link, &cont->cset_links, cset_link) { + struct css_set *cset = link->cset; struct task_struct *task; int count = 0; seq_printf(seq, "css_set %p\n", cset); -- cgit v0.10.2 From f4f4be2bd2889c69a8698edef8dbfd4f6759aa87 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Jun 2013 21:04:51 -0700 Subject: cgroup: use kzalloc() instead of kmalloc() There's no point in using kmalloc() instead of the clearing variant for trivial stuff. We can live dangerously elsewhere. Use kzalloc() instead and drop 0 inits. While at it, do trivial code reorganization in cgroup_file_open(). This patch doesn't introduce any functional changes. v2: I was caught in the very distant past where list_del() didn't poison and the initial version converted list_del()s to list_del_init()s too. Li and Kent took me out of the stasis chamber. Signed-off-by: Tejun Heo Cc: Kent Overstreet Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ef97bd0c..d86a847 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -597,7 +597,7 @@ static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) INIT_LIST_HEAD(tmp_links); for (i = 0; i < count; i++) { - link = kmalloc(sizeof(*link), GFP_KERNEL); + link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { free_cgrp_cset_links(tmp_links); return -ENOMEM; @@ -658,7 +658,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, if (cset) return cset; - cset = kmalloc(sizeof(*cset), GFP_KERNEL); + cset = kzalloc(sizeof(*cset), GFP_KERNEL); if (!cset) return NULL; @@ -2475,10 +2475,12 @@ static int cgroup_file_open(struct inode *inode, struct file *file) cft = __d_cft(file->f_dentry); if (cft->read_map || cft->read_seq_string) { - struct cgroup_seqfile_state *state = - kzalloc(sizeof(*state), GFP_USER); + struct cgroup_seqfile_state *state; + + state = kzalloc(sizeof(*state), GFP_USER); if (!state) return -ENOMEM; + state->cft = cft; state->cgroup = __d_cgrp(file->f_dentry->d_parent); file->f_op = &cgroup_seqfile_operations; @@ -3511,7 +3513,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, } } /* entry not found; create a new one */ - l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); + l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); if (!l) { mutex_unlock(&cgrp->pidlist_mutex); return l; @@ -3520,8 +3522,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, down_write(&l->mutex); l->key.type = type; l->key.ns = get_pid_ns(ns); - l->use_count = 0; /* don't increment here */ - l->list = NULL; l->owner = cgrp; list_add(&l->links, &cgrp->pidlists); mutex_unlock(&cgrp->pidlist_mutex); -- cgit v0.10.2 From 5de0107e634ce862f16360139709d9d3a656463e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Jun 2013 21:04:52 -0700 Subject: cgroup: clean up css_[try]get() and css_put() * __css_get() isn't used by anyone. Fold it into css_get(). * Add proper function comments to all css reference functions. This patch is purely cosmetic. v2: Typo fix as per Li. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0e32855..a494636 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -94,33 +94,31 @@ enum { CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ }; -/* Caller must verify that the css is not for root cgroup */ -static inline void __css_get(struct cgroup_subsys_state *css, int count) -{ - atomic_add(count, &css->refcnt); -} - -/* - * Call css_get() to hold a reference on the css; it can be used - * for a reference obtained via: - * - an existing ref-counted reference to the css - * - task->cgroups for a locked task +/** + * css_get - obtain a reference on the specified css + * @css: target css + * + * The caller must already have a reference. */ - static inline void css_get(struct cgroup_subsys_state *css) { /* We don't need to reference count the root state */ if (!(css->flags & CSS_ROOT)) - __css_get(css, 1); + atomic_inc(&css->refcnt); } -/* - * Call css_tryget() to take a reference on a css if your existing - * (known-valid) reference isn't already ref-counted. Returns false if - * the css has been destroyed. - */ - extern bool __css_tryget(struct cgroup_subsys_state *css); + +/** + * css_tryget - try to obtain a reference on the specified css + * @css: target css + * + * Obtain a reference on @css if it's alive. The caller naturally needs to + * ensure that @css is accessible but doesn't have to be holding a + * reference on it - IOW, RCU protected access is good enough for this + * function. Returns %true if a reference count was successfully obtained; + * %false otherwise. + */ static inline bool css_tryget(struct cgroup_subsys_state *css) { if (css->flags & CSS_ROOT) @@ -128,12 +126,14 @@ static inline bool css_tryget(struct cgroup_subsys_state *css) return __css_tryget(css); } -/* - * css_put() should be called to release a reference taken by - * css_get() or css_tryget() - */ - extern void __css_put(struct cgroup_subsys_state *css); + +/** + * css_put - put a css reference + * @css: target css + * + * Put a reference obtained via css_get() and css_tryget(). + */ static inline void css_put(struct cgroup_subsys_state *css) { if (!(css->flags & CSS_ROOT)) -- cgit v0.10.2 From 54766d4a1d3d6f84ff8fa475cd8f165c0a0000eb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Jun 2013 21:04:53 -0700 Subject: cgroup: rename CGRP_REMOVED to CGRP_DEAD We will add another flag indicating that the cgroup is in the process of being killed. REMOVING / REMOVED is more difficult to distinguish and cgroup_is_removing()/cgroup_is_removed() are a bit awkward. Also, later percpu_ref usage will involve "kill"ing the refcnt. s/CGRP_REMOVED/CGRP_DEAD/ s/cgroup_is_removed()/cgroup_is_dead() This patch is purely cosmetic. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index a494636..c86a93a 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -143,7 +143,7 @@ static inline void css_put(struct cgroup_subsys_state *css) /* bits in struct cgroup flags field */ enum { /* Control Group is dead */ - CGRP_REMOVED, + CGRP_DEAD, /* * Control Group has previously had a child cgroup or a task, * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d86a847..84efb34 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -226,9 +226,9 @@ static int css_refcnt(struct cgroup_subsys_state *css) } /* convenient tests for these bits */ -static inline bool cgroup_is_removed(const struct cgroup *cgrp) +static inline bool cgroup_is_dead(const struct cgroup *cgrp) { - return test_bit(CGRP_REMOVED, &cgrp->flags); + return test_bit(CGRP_DEAD, &cgrp->flags); } /** @@ -300,7 +300,7 @@ static inline struct cftype *__d_cft(struct dentry *dentry) static bool cgroup_lock_live_group(struct cgroup *cgrp) { mutex_lock(&cgroup_mutex); - if (cgroup_is_removed(cgrp)) { + if (cgroup_is_dead(cgrp)) { mutex_unlock(&cgroup_mutex); return false; } @@ -892,7 +892,7 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) if (S_ISDIR(inode->i_mode)) { struct cgroup *cgrp = dentry->d_fsdata; - BUG_ON(!(cgroup_is_removed(cgrp))); + BUG_ON(!(cgroup_is_dead(cgrp))); call_rcu(&cgrp->rcu_head, cgroup_free_rcu); } else { struct cfent *cfe = __d_cfe(dentry); @@ -2363,7 +2363,7 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf, struct cftype *cft = __d_cft(file->f_dentry); struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); - if (cgroup_is_removed(cgrp)) + if (cgroup_is_dead(cgrp)) return -ENODEV; if (cft->write) return cft->write(cgrp, cft, file, buf, nbytes, ppos); @@ -2408,7 +2408,7 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf, struct cftype *cft = __d_cft(file->f_dentry); struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); - if (cgroup_is_removed(cgrp)) + if (cgroup_is_dead(cgrp)) return -ENODEV; if (cft->read) @@ -2831,7 +2831,7 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss, mutex_lock(&inode->i_mutex); mutex_lock(&cgroup_mutex); - if (!cgroup_is_removed(cgrp)) + if (!cgroup_is_dead(cgrp)) cgroup_addrm_files(cgrp, ss, cfts, is_add); mutex_unlock(&cgroup_mutex); mutex_unlock(&inode->i_mutex); @@ -2999,14 +2999,14 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos) /* * @pos could already have been removed. Once a cgroup is removed, * its ->sibling.next is no longer updated when its next sibling - * changes. As CGRP_REMOVED is set on removal which is fully + * changes. As CGRP_DEAD is set on removal which is fully * serialized, if we see it unasserted, it's guaranteed that the * next sibling hasn't finished its grace period even if it's * already removed, and thus safe to dereference from this RCU * critical section. If ->sibling.next is inaccessible, - * cgroup_is_removed() is guaranteed to be visible as %true here. + * cgroup_is_dead() is guaranteed to be visible as %true here. */ - if (likely(!cgroup_is_removed(pos))) { + if (likely(!cgroup_is_dead(pos))) { next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); if (&next->sibling != &pos->parent->children) return next; @@ -4383,7 +4383,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) * attempts fail thus maintaining the removal conditions verified * above. * - * Note that CGRP_REMVOED clearing is depended upon by + * Note that CGRP_DEAD assertion is depended upon by * cgroup_next_sibling() to resume iteration after dropping RCU * read lock. See cgroup_next_sibling() for details. */ @@ -4393,7 +4393,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) WARN_ON(atomic_read(&css->refcnt) < 0); atomic_add(CSS_DEACT_BIAS, &css->refcnt); } - set_bit(CGRP_REMOVED, &cgrp->flags); + set_bit(CGRP_DEAD, &cgrp->flags); /* tell subsystems to initate destruction */ for_each_subsys(cgrp->root, ss) @@ -5063,7 +5063,7 @@ static void check_for_release(struct cgroup *cgrp) int need_schedule_work = 0; raw_spin_lock(&release_list_lock); - if (!cgroup_is_removed(cgrp) && + if (!cgroup_is_dead(cgrp) && list_empty(&cgrp->release_list)) { list_add(&cgrp->release_list, &release_list); need_schedule_work = 1; @@ -5209,9 +5209,7 @@ __setup("cgroup_disable=", cgroup_disable); * Functons for CSS ID. */ -/* - *To get ID other than 0, this should be called when !cgroup_is_removed(). - */ +/* to get ID other than 0, this should be called when !cgroup_is_dead() */ unsigned short css_id(struct cgroup_subsys_state *css) { struct css_id *cssid; -- cgit v0.10.2 From ddd69148bdc45e5e3e55bfde3571daecd5a96d75 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Jun 2013 21:04:54 -0700 Subject: cgroup: drop unnecessary RCU dancing from __put_css_set() __put_css_set() does RCU read access on @cgrp across dropping @cgrp->count so that it can continue accessing @cgrp even if the count reached zero and destruction of the cgroup commenced. Given that both sides - __css_put() and cgroup_destroy_locked() - are cold paths, this is unnecessary. Just making cgroup_destroy_locked() grab css_set_lock while checking @cgrp->count is enough. Remove the RCU read locking from __put_css_set() and make cgroup_destroy_locked() read-lock css_set_lock when checking @cgrp->count. This will also allow removing @cgrp->count. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 84efb34..1a68241 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -407,19 +407,13 @@ static void __put_css_set(struct css_set *cset, int taskexit) list_del(&link->cset_link); list_del(&link->cgrp_link); - /* - * We may not be holding cgroup_mutex, and if cgrp->count is - * dropped to 0 the cgroup can be destroyed at any time, hence - * rcu_read_lock is used to keep it alive. - */ - rcu_read_lock(); + /* @cgrp can't go away while we're holding css_set_lock */ if (atomic_dec_and_test(&cgrp->count) && notify_on_release(cgrp)) { if (taskexit) set_bit(CGRP_RELEASABLE, &cgrp->flags); check_for_release(cgrp); } - rcu_read_unlock(); kfree(link); } @@ -4370,11 +4364,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) struct cgroup *parent = cgrp->parent; struct cgroup_event *event, *tmp; struct cgroup_subsys *ss; + bool empty; lockdep_assert_held(&d->d_inode->i_mutex); lockdep_assert_held(&cgroup_mutex); - if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) + /* + * css_set_lock prevents @cgrp from being removed while + * __put_css_set() is in progress. + */ + read_lock(&css_set_lock); + empty = !atomic_read(&cgrp->count) && list_empty(&cgrp->children); + read_unlock(&css_set_lock); + if (!empty) return -EBUSY; /* @@ -5051,8 +5053,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) static void check_for_release(struct cgroup *cgrp) { - /* All of these checks rely on RCU to keep the cgroup - * structure alive */ if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) { /* -- cgit v0.10.2 From 6f3d828f0fb7fdaffc6f32cb8a1cb7fcf8824598 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Jun 2013 21:04:55 -0700 Subject: cgroup: remove cgroup->count and use cgroup->count tracks the number of css_sets associated with the cgroup and used only to verify that no css_set is associated when the cgroup is being destroyed. It's superflous as the destruction path can simply check whether cgroup->cset_links is empty instead. Drop cgroup->count and check ->cset_links directly from cgroup_destroy_locked(). Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c86a93a..81bfd02 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -169,12 +169,6 @@ struct cgroup_name { struct cgroup { unsigned long flags; /* "unsigned long" so bitops work */ - /* - * count users of this cgroup. >0 means busy, but doesn't - * necessarily indicate the number of tasks in the cgroup - */ - atomic_t count; - int id; /* ida allocated in-hierarchy ID */ /* diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1a68241..49bfd7b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -408,8 +408,7 @@ static void __put_css_set(struct css_set *cset, int taskexit) list_del(&link->cgrp_link); /* @cgrp can't go away while we're holding css_set_lock */ - if (atomic_dec_and_test(&cgrp->count) && - notify_on_release(cgrp)) { + if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) { if (taskexit) set_bit(CGRP_RELEASABLE, &cgrp->flags); check_for_release(cgrp); @@ -616,7 +615,6 @@ static void link_css_set(struct list_head *tmp_links, struct css_set *cset, link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link); link->cset = cset; link->cgrp = cgrp; - atomic_inc(&cgrp->count); list_move(&link->cset_link, &cgrp->cset_links); /* * Always add links to the tail of the list so that the list @@ -4370,11 +4368,11 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) lockdep_assert_held(&cgroup_mutex); /* - * css_set_lock prevents @cgrp from being removed while - * __put_css_set() is in progress. + * css_set_lock synchronizes access to ->cset_links and prevents + * @cgrp from being removed while __put_css_set() is in progress. */ read_lock(&css_set_lock); - empty = !atomic_read(&cgrp->count) && list_empty(&cgrp->children); + empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children); read_unlock(&css_set_lock); if (!empty) return -EBUSY; @@ -5054,7 +5052,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) static void check_for_release(struct cgroup *cgrp) { if (cgroup_is_releasable(cgrp) && - !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) { + list_empty(&cgrp->cset_links) && list_empty(&cgrp->children)) { /* * Control Group is currently removeable. If it's not * already queued for a userspace notification, queue @@ -5422,11 +5420,6 @@ static void debug_css_free(struct cgroup *cont) kfree(cont->subsys[debug_subsys_id]); } -static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft) -{ - return atomic_read(&cont->count); -} - static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft) { return cgroup_task_count(cont); @@ -5508,10 +5501,6 @@ static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft) static struct cftype debug_files[] = { { - .name = "cgroup_refcount", - .read_u64 = cgroup_refcount_read, - }, - { .name = "taskcount", .read_u64 = debug_taskcount_read, }, -- cgit v0.10.2 From 455050d23e1bfc47ca98e943ad5b2f3a9bbe45fb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Jun 2013 19:27:41 -0700 Subject: cgroup: reorder the operations in cgroup_destroy_locked() This patch reorders the operations in cgroup_destroy_locked() such that the userland visible parts happen before css offlining and removal from the ->sibling list. This will be used to make css use percpu refcnt. While at it, split out CGRP_DEAD related comment from the refcnt deactivation one and correct / clarify how different guarantees are met. While this patch changes the specific order of operations, it shouldn't cause any noticeable behavior difference. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 49bfd7b..5a1ddec 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4379,13 +4379,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) /* * Block new css_tryget() by deactivating refcnt and mark @cgrp - * removed. This makes future css_tryget() and child creation - * attempts fail thus maintaining the removal conditions verified - * above. - * - * Note that CGRP_DEAD assertion is depended upon by - * cgroup_next_sibling() to resume iteration after dropping RCU - * read lock. See cgroup_next_sibling() for details. + * removed. This makes future css_tryget() attempts fail which we + * guarantee to ->css_offline() callbacks. */ for_each_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; @@ -4393,8 +4388,41 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) WARN_ON(atomic_read(&css->refcnt) < 0); atomic_add(CSS_DEACT_BIAS, &css->refcnt); } + + /* + * Mark @cgrp dead. This prevents further task migration and child + * creation by disabling cgroup_lock_live_group(). Note that + * CGRP_DEAD assertion is depended upon by cgroup_next_sibling() to + * resume iteration after dropping RCU read lock. See + * cgroup_next_sibling() for details. + */ set_bit(CGRP_DEAD, &cgrp->flags); + /* CGRP_DEAD is set, remove from ->release_list for the last time */ + raw_spin_lock(&release_list_lock); + if (!list_empty(&cgrp->release_list)) + list_del_init(&cgrp->release_list); + raw_spin_unlock(&release_list_lock); + + /* + * Remove @cgrp directory. The removal puts the base ref but we + * aren't quite done with @cgrp yet, so hold onto it. + */ + dget(d); + cgroup_d_remove_dir(d); + + /* + * Unregister events and notify userspace. + * Notify userspace about cgroup removing only after rmdir of cgroup + * directory to avoid race between userspace and kernelspace. + */ + spin_lock(&cgrp->event_list_lock); + list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) { + list_del_init(&event->list); + schedule_work(&event->remove); + } + spin_unlock(&cgrp->event_list_lock); + /* tell subsystems to initate destruction */ for_each_subsys(cgrp->root, ss) offline_css(ss, cgrp); @@ -4409,34 +4437,15 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) for_each_subsys(cgrp->root, ss) css_put(cgrp->subsys[ss->subsys_id]); - raw_spin_lock(&release_list_lock); - if (!list_empty(&cgrp->release_list)) - list_del_init(&cgrp->release_list); - raw_spin_unlock(&release_list_lock); - /* delete this cgroup from parent->children */ list_del_rcu(&cgrp->sibling); list_del_init(&cgrp->allcg_node); - dget(d); - cgroup_d_remove_dir(d); dput(d); set_bit(CGRP_RELEASABLE, &parent->flags); check_for_release(parent); - /* - * Unregister events and notify userspace. - * Notify userspace about cgroup removing only after rmdir of cgroup - * directory to avoid race between userspace and kernelspace. - */ - spin_lock(&cgrp->event_list_lock); - list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) { - list_del_init(&event->list); - schedule_work(&event->remove); - } - spin_unlock(&cgrp->event_list_lock); - return 0; } -- cgit v0.10.2 From ea15f8ccdb430af1e8bc9b4e19a230eb4c356777 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Jun 2013 19:27:42 -0700 Subject: cgroup: split cgroup destruction into two steps Split cgroup_destroy_locked() into two steps and put the latter half into cgroup_offline_fn() which is executed from a work item. The latter half is responsible for offlining the css's, removing the cgroup from internal lists, and propagating release notification to the parent. The separation is to allow using percpu refcnt for css. Note that this allows for other cgroup operations to happen between the first and second halves of destruction, including creating a new cgroup with the same name. As the target cgroup is marked DEAD in the first half and cgroup internals don't care about the names of cgroups, this should be fine. A comment explaining this will be added by the next patch which implements the actual percpu refcnting. As RCU freeing is guaranteed to happen after the second step of destruction, we can use the same work item for both. This patch renames cgroup->free_work to ->destroy_work and uses it for both purposes. INIT_WORK() is now performed right before queueing the work item. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 81bfd02..e345d8b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -233,7 +233,7 @@ struct cgroup { /* For RCU-protected deletion */ struct rcu_head rcu_head; - struct work_struct free_work; + struct work_struct destroy_work; /* List of events which userspace want to receive */ struct list_head event_list; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5a1ddec..df68147 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -208,6 +208,7 @@ static struct cgroup_name root_cgroup_name = { .name = "/" }; */ static int need_forkexit_callback __read_mostly; +static void cgroup_offline_fn(struct work_struct *work); static int cgroup_destroy_locked(struct cgroup *cgrp); static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys, struct cftype cfts[], bool is_add); @@ -830,7 +831,7 @@ static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry) static void cgroup_free_fn(struct work_struct *work) { - struct cgroup *cgrp = container_of(work, struct cgroup, free_work); + struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); struct cgroup_subsys *ss; mutex_lock(&cgroup_mutex); @@ -875,7 +876,8 @@ static void cgroup_free_rcu(struct rcu_head *head) { struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head); - schedule_work(&cgrp->free_work); + INIT_WORK(&cgrp->destroy_work, cgroup_free_fn); + schedule_work(&cgrp->destroy_work); } static void cgroup_diput(struct dentry *dentry, struct inode *inode) @@ -1407,7 +1409,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) INIT_LIST_HEAD(&cgrp->allcg_node); INIT_LIST_HEAD(&cgrp->release_list); INIT_LIST_HEAD(&cgrp->pidlists); - INIT_WORK(&cgrp->free_work, cgroup_free_fn); mutex_init(&cgrp->pidlist_mutex); INIT_LIST_HEAD(&cgrp->event_list); spin_lock_init(&cgrp->event_list_lock); @@ -2991,12 +2992,13 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos) /* * @pos could already have been removed. Once a cgroup is removed, * its ->sibling.next is no longer updated when its next sibling - * changes. As CGRP_DEAD is set on removal which is fully - * serialized, if we see it unasserted, it's guaranteed that the - * next sibling hasn't finished its grace period even if it's - * already removed, and thus safe to dereference from this RCU - * critical section. If ->sibling.next is inaccessible, - * cgroup_is_dead() is guaranteed to be visible as %true here. + * changes. As CGRP_DEAD assertion is serialized and happens + * before the cgroup is taken off the ->sibling list, if we see it + * unasserted, it's guaranteed that the next sibling hasn't + * finished its grace period even if it's already removed, and thus + * safe to dereference from this RCU critical section. If + * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed + * to be visible as %true here. */ if (likely(!cgroup_is_dead(pos))) { next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); @@ -4359,7 +4361,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) __releases(&cgroup_mutex) __acquires(&cgroup_mutex) { struct dentry *d = cgrp->dentry; - struct cgroup *parent = cgrp->parent; struct cgroup_event *event, *tmp; struct cgroup_subsys *ss; bool empty; @@ -4423,6 +4424,21 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) } spin_unlock(&cgrp->event_list_lock); + INIT_WORK(&cgrp->destroy_work, cgroup_offline_fn); + schedule_work(&cgrp->destroy_work); + + return 0; +}; + +static void cgroup_offline_fn(struct work_struct *work) +{ + struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); + struct cgroup *parent = cgrp->parent; + struct dentry *d = cgrp->dentry; + struct cgroup_subsys *ss; + + mutex_lock(&cgroup_mutex); + /* tell subsystems to initate destruction */ for_each_subsys(cgrp->root, ss) offline_css(ss, cgrp); @@ -4446,7 +4462,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) set_bit(CGRP_RELEASABLE, &parent->flags); check_for_release(parent); - return 0; + mutex_unlock(&cgroup_mutex); } static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) -- cgit v0.10.2 From d3daf28da16a30af95bfb303189a634a87606725 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Jun 2013 19:39:16 -0700 Subject: cgroup: use percpu refcnt for cgroup_subsys_states A css (cgroup_subsys_state) is how each cgroup is represented to a controller. As such, it can be used in hot paths across the various subsystems different controllers are associated with. One of the common operations is reference counting, which up until now has been implemented using a global atomic counter and can have significant adverse impact on scalability. For example, css refcnt can be gotten and put multiple times by blkcg for each IO request. For highops configurations which try to do as much per-cpu as possible, the global frequent refcnting can be very expensive. In general, given the various and hugely diverse paths css's end up being used from, we need to make it cheap and highly scalable. In its usage, css refcnting isn't very different from module refcnting. This patch converts css refcnting to use the recently added percpu_ref. css_get/tryget/put() directly maps to the matching percpu_ref operations and the deactivation logic is no longer necessary as percpu_ref already has refcnt killing. The only complication is that as the refcnt is per-cpu, percpu_ref_kill() in itself doesn't ensure that further tryget operations will fail, which we need to guarantee before invoking ->css_offline()'s. This is resolved collecting kill confirmation using percpu_ref_kill_and_confirm() and initiating the offline phase of destruction after all css refcnt's are confirmed to be seen as killed on all CPUs. The previous patches already splitted destruction into two phases, so percpu_ref_kill_and_confirm() can be hooked up easily. This patch removes css_refcnt() which is used for rcu dereference sanity check in css_id(). While we can add a percpu refcnt API to ask the same question, css_id() itself is scheduled to be removed fairly soon, so let's not bother with it. Just drop the sanity check and use rcu_dereference_raw() instead. v2: - init_cgroup_css() was calling percpu_ref_init() without checking the return value. This causes two problems - the obvious lack of error handling and percpu_ref_init() being called from cgroup_init_subsys() before the allocators are up, which triggers warnings but doesn't cause actual problems as the refcnt isn't used for roots anyway. Fix both by moving percpu_ref_init() to cgroup_create(). - The base references were put too early by percpu_ref_kill_and_confirm() and cgroup_offline_fn() put the refs one extra time. This wasn't noticeable because css's go through another RCU grace period before being freed. Update cgroup_destroy_locked() to grab an extra reference before killing the refcnts. This problem was noticed by Kent. Signed-off-by: Tejun Heo Reviewed-by: Kent Overstreet Acked-by: Li Zefan Cc: Michal Hocko Cc: Mike Snitzer Cc: Vivek Goyal Cc: "Alasdair G. Kergon" Cc: Jens Axboe Cc: Mikulas Patocka Cc: Glauber Costa diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e345d8b..b7bd4be 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef CONFIG_CGROUPS @@ -72,13 +73,8 @@ struct cgroup_subsys_state { */ struct cgroup *cgroup; - /* - * State maintained by the cgroup system to allow subsystems - * to be "busy". Should be accessed via css_get(), - * css_tryget() and css_put(). - */ - - atomic_t refcnt; + /* reference count - access via css_[try]get() and css_put() */ + struct percpu_ref refcnt; unsigned long flags; /* ID for this css, if possible */ @@ -104,11 +100,9 @@ static inline void css_get(struct cgroup_subsys_state *css) { /* We don't need to reference count the root state */ if (!(css->flags & CSS_ROOT)) - atomic_inc(&css->refcnt); + percpu_ref_get(&css->refcnt); } -extern bool __css_tryget(struct cgroup_subsys_state *css); - /** * css_tryget - try to obtain a reference on the specified css * @css: target css @@ -123,11 +117,9 @@ static inline bool css_tryget(struct cgroup_subsys_state *css) { if (css->flags & CSS_ROOT) return true; - return __css_tryget(css); + return percpu_ref_tryget(&css->refcnt); } -extern void __css_put(struct cgroup_subsys_state *css); - /** * css_put - put a css reference * @css: target css @@ -137,7 +129,7 @@ extern void __css_put(struct cgroup_subsys_state *css); static inline void css_put(struct cgroup_subsys_state *css) { if (!(css->flags & CSS_ROOT)) - __css_put(css); + percpu_ref_put(&css->refcnt); } /* bits in struct cgroup flags field */ @@ -231,9 +223,10 @@ struct cgroup { struct list_head pidlists; struct mutex pidlist_mutex; - /* For RCU-protected deletion */ + /* For css percpu_ref killing and RCU-protected deletion */ struct rcu_head rcu_head; struct work_struct destroy_work; + atomic_t css_kill_cnt; /* List of events which userspace want to receive */ struct list_head event_list; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ebbfc04..2e9da7b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -63,9 +63,6 @@ #include -/* css deactivation bias, makes css->refcnt negative to deny new trygets */ -#define CSS_DEACT_BIAS INT_MIN - /* * cgroup_mutex is the master lock. Any modification to cgroup or its * hierarchy must be performed while holding it. @@ -213,19 +210,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp); static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys, struct cftype cfts[], bool is_add); -static int css_unbias_refcnt(int refcnt) -{ - return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS; -} - -/* the current nr of refs, always >= 0 whether @css is deactivated or not */ -static int css_refcnt(struct cgroup_subsys_state *css) -{ - int v = atomic_read(&css->refcnt); - - return css_unbias_refcnt(v); -} - /* convenient tests for these bits */ static inline bool cgroup_is_dead(const struct cgroup *cgrp) { @@ -4139,12 +4123,19 @@ static void css_dput_fn(struct work_struct *work) deactivate_super(sb); } +static void css_release(struct percpu_ref *ref) +{ + struct cgroup_subsys_state *css = + container_of(ref, struct cgroup_subsys_state, refcnt); + + schedule_work(&css->dput_work); +} + static void init_cgroup_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss, struct cgroup *cgrp) { css->cgroup = cgrp; - atomic_set(&css->refcnt, 1); css->flags = 0; css->id = NULL; if (cgrp == dummytop) @@ -4266,7 +4257,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, err = PTR_ERR(css); goto err_free_all; } + + err = percpu_ref_init(&css->refcnt, css_release); + if (err) + goto err_free_all; + init_cgroup_css(css, ss, cgrp); + if (ss->use_id) { err = alloc_css_id(ss, parent, cgrp); if (err) @@ -4331,8 +4328,12 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, err_free_all: for_each_subsys(root, ss) { - if (cgrp->subsys[ss->subsys_id]) + struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; + + if (css) { + percpu_ref_cancel_init(&css->refcnt); ss->css_free(cgrp); + } } mutex_unlock(&cgroup_mutex); /* Release the reference count that we took on the superblock */ @@ -4360,6 +4361,48 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) return cgroup_create(c_parent, dentry, mode | S_IFDIR); } +static void cgroup_css_killed(struct cgroup *cgrp) +{ + if (!atomic_dec_and_test(&cgrp->css_kill_cnt)) + return; + + /* percpu ref's of all css's are killed, kick off the next step */ + INIT_WORK(&cgrp->destroy_work, cgroup_offline_fn); + schedule_work(&cgrp->destroy_work); +} + +static void css_ref_killed_fn(struct percpu_ref *ref) +{ + struct cgroup_subsys_state *css = + container_of(ref, struct cgroup_subsys_state, refcnt); + + cgroup_css_killed(css->cgroup); +} + +/** + * cgroup_destroy_locked - the first stage of cgroup destruction + * @cgrp: cgroup to be destroyed + * + * css's make use of percpu refcnts whose killing latency shouldn't be + * exposed to userland and are RCU protected. Also, cgroup core needs to + * guarantee that css_tryget() won't succeed by the time ->css_offline() is + * invoked. To satisfy all the requirements, destruction is implemented in + * the following two steps. + * + * s1. Verify @cgrp can be destroyed and mark it dying. Remove all + * userland visible parts and start killing the percpu refcnts of + * css's. Set up so that the next stage will be kicked off once all + * the percpu refcnts are confirmed to be killed. + * + * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the + * rest of destruction. Once all cgroup references are gone, the + * cgroup is RCU-freed. + * + * This function implements s1. After this step, @cgrp is gone as far as + * the userland is concerned and a new cgroup with the same name may be + * created. As cgroup doesn't care about the names internally, this + * doesn't cause any problem. + */ static int cgroup_destroy_locked(struct cgroup *cgrp) __releases(&cgroup_mutex) __acquires(&cgroup_mutex) { @@ -4382,16 +4425,34 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) return -EBUSY; /* - * Block new css_tryget() by deactivating refcnt and mark @cgrp - * removed. This makes future css_tryget() attempts fail which we - * guarantee to ->css_offline() callbacks. + * Block new css_tryget() by killing css refcnts. cgroup core + * guarantees that, by the time ->css_offline() is invoked, no new + * css reference will be given out via css_tryget(). We can't + * simply call percpu_ref_kill() and proceed to offlining css's + * because percpu_ref_kill() doesn't guarantee that the ref is seen + * as killed on all CPUs on return. + * + * Use percpu_ref_kill_and_confirm() to get notifications as each + * css is confirmed to be seen as killed on all CPUs. The + * notification callback keeps track of the number of css's to be + * killed and schedules cgroup_offline_fn() to perform the rest of + * destruction once the percpu refs of all css's are confirmed to + * be killed. */ + atomic_set(&cgrp->css_kill_cnt, 1); for_each_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; - WARN_ON(atomic_read(&css->refcnt) < 0); - atomic_add(CSS_DEACT_BIAS, &css->refcnt); + /* + * Killing would put the base ref, but we need to keep it + * alive until after ->css_offline. + */ + percpu_ref_get(&css->refcnt); + + atomic_inc(&cgrp->css_kill_cnt); + percpu_ref_kill_and_confirm(&css->refcnt, css_ref_killed_fn); } + cgroup_css_killed(cgrp); /* * Mark @cgrp dead. This prevents further task migration and child @@ -4427,12 +4488,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) } spin_unlock(&cgrp->event_list_lock); - INIT_WORK(&cgrp->destroy_work, cgroup_offline_fn); - schedule_work(&cgrp->destroy_work); - return 0; }; +/** + * cgroup_offline_fn - the second step of cgroup destruction + * @work: cgroup->destroy_free_work + * + * This function is invoked from a work item for a cgroup which is being + * destroyed after the percpu refcnts of all css's are guaranteed to be + * seen as killed on all CPUs, and performs the rest of destruction. This + * is the second step of destruction described in the comment above + * cgroup_destroy_locked(). + */ static void cgroup_offline_fn(struct work_struct *work) { struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); @@ -4442,16 +4510,19 @@ static void cgroup_offline_fn(struct work_struct *work) mutex_lock(&cgroup_mutex); - /* tell subsystems to initate destruction */ + /* + * css_tryget() is guaranteed to fail now. Tell subsystems to + * initate destruction. + */ for_each_subsys(cgrp->root, ss) offline_css(ss, cgrp); /* - * Put all the base refs. Each css holds an extra reference to the - * cgroup's dentry and cgroup removal proceeds regardless of css - * refs. On the last put of each css, whenever that may be, the - * extra dentry ref is put so that dentry destruction happens only - * after all css's are released. + * Put the css refs from cgroup_destroy_locked(). Each css holds + * an extra reference to the cgroup's dentry and cgroup removal + * proceeds regardless of css refs. On the last put of each css, + * whenever that may be, the extra dentry ref is put so that dentry + * destruction happens only after all css's are released. */ for_each_subsys(cgrp->root, ss) css_put(cgrp->subsys[ss->subsys_id]); @@ -5100,34 +5171,6 @@ static void check_for_release(struct cgroup *cgrp) } } -/* Caller must verify that the css is not for root cgroup */ -bool __css_tryget(struct cgroup_subsys_state *css) -{ - while (true) { - int t, v; - - v = css_refcnt(css); - t = atomic_cmpxchg(&css->refcnt, v, v + 1); - if (likely(t == v)) - return true; - else if (t < 0) - return false; - cpu_relax(); - } -} -EXPORT_SYMBOL_GPL(__css_tryget); - -/* Caller must verify that the css is not for root cgroup */ -void __css_put(struct cgroup_subsys_state *css) -{ - int v; - - v = css_unbias_refcnt(atomic_dec_return(&css->refcnt)); - if (v == 0) - schedule_work(&css->dput_work); -} -EXPORT_SYMBOL_GPL(__css_put); - /* * Notify userspace when a cgroup is released, by running the * configured release agent with the name of the cgroup (path @@ -5245,7 +5288,7 @@ unsigned short css_id(struct cgroup_subsys_state *css) * on this or this is under rcu_read_lock(). Once css->id is allocated, * it's unchanged until freed. */ - cssid = rcu_dereference_check(css->id, css_refcnt(css)); + cssid = rcu_dereference_raw(css->id); if (cssid) return cssid->id; -- cgit v0.10.2 From f63674fd0d6afa1ba24309aee1f8c60195d39041 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Jun 2013 19:58:38 -0700 Subject: cgroup: update sane_behavior documentation f12dc02014 ("cgroup: mark "tasks" cgroup file as insane") and cc5943a781 ("cgroup: mark "notify_on_release" and "release_agent" cgroup files insane") forgot to update the changed behavior documentation in cgroup.h. Update it. Signed-off-by: Tejun Heo diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b7bd4be..1760476 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -264,13 +264,14 @@ enum { * * - Remount is disallowed. * - * - memcg: use_hierarchy is on by default and the cgroup file for - * the flag is not created. + * - "tasks" is removed. Everything should be at process + * granularity. Use "cgroup.procs" instead. * - * The followings are planned changes. + * - "release_agent" and "notify_on_release" are removed. + * Replacement notification mechanism will be implemented. * - * - release_agent will be disallowed once replacement notification - * mechanism is implemented. + * - memcg: use_hierarchy is on by default and the cgroup file for + * the flag is not created. */ CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), -- cgit v0.10.2 From 6db8e85c5c1f89cd0183b76dab027c81009f129f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 14 Jun 2013 11:18:22 -0700 Subject: cgroup: disallow rename(2) if sane_behavior cgroup's rename(2) isn't a proper migration implementation - it can't move the cgroup to a different parent in the hierarchy. All it can do is swapping the name string for that cgroup. This isn't useful and can mislead users to think that cgroup supports proper cgroup-level migration. Disallow rename(2) if sane_behavior. v2: Fail with -EPERM instead of -EINVAL so that it matches the vfs return value when ->rename is not implemented as suggested by Li. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1760476..f975227 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -270,6 +270,8 @@ enum { * - "release_agent" and "notify_on_release" are removed. * Replacement notification mechanism will be implemented. * + * - rename(2) is disallowed. + * * - memcg: use_hierarchy is on by default and the cgroup file for * the flag is not created. */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2e9da7b..c2c64005 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2508,6 +2508,13 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, cgrp = __d_cgrp(old_dentry); + /* + * This isn't a proper migration and its usefulness is very + * limited. Disallow if sane_behavior. + */ + if (cgroup_sane_behavior(cgrp)) + return -EPERM; + name = cgroup_alloc_name(new_dentry); if (!name) return -ENOMEM; -- cgit v0.10.2 From 084457f284abf6789d90509ee11dae383842b23b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 18 Jun 2013 18:40:19 +0800 Subject: cgroup: fix umount vs cgroup_cfts_commit() race cgroup_cfts_commit() uses dget() to keep cgroup alive after cgroup_mutex is dropped, but dget() won't prevent cgroupfs from being umounted. When the race happens, vfs will see some dentries with non-zero refcnt while umount is in process. Keep running this: mount -t cgroup -o blkio xxx /cgroup umount /cgroup And this: modprobe cfq-iosched rmmod cfs-iosched After a while, the BUG() in shrink_dcache_for_umount_subtree() may be triggered: BUG: Dentry xxx{i=0,n=blkio.yyy} still in use (1) [umount of cgroup cgroup] Signed-off-by: Li Zefan Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c2c64005..0224f6b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2798,13 +2798,17 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss, { LIST_HEAD(pending); struct cgroup *cgrp, *n; + struct super_block *sb = ss->root->sb; /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ - if (cfts && ss->root != &rootnode) { + if (cfts && ss->root != &rootnode && + atomic_inc_not_zero(sb->s_active)) { list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) { dget(cgrp->dentry); list_add_tail(&cgrp->cft_q_node, &pending); } + } else { + sb = NULL; } mutex_unlock(&cgroup_mutex); @@ -2827,6 +2831,9 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss, dput(cgrp->dentry); } + if (sb) + deactivate_super(sb); + mutex_unlock(&cgroup_cft_mutex); } -- cgit v0.10.2 From 1c8158eeae0f37d0eee9f1fbe68080df6a408df2 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 18 Jun 2013 18:41:10 +0800 Subject: cgroup: fix umount vs cgroup_event_remove() race commit 5db9a4d99b0157a513944e9a44d29c9cec2e91dc Author: Tejun Heo Date: Sat Jul 7 16:08:18 2012 -0700 cgroup: fix cgroup hierarchy umount race This commit fixed a race caused by the dput() in css_dput_fn(), but the dput() in cgroup_event_remove() can also lead to the same BUG(). Signed-off-by: Li Zefan Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0224f6b..7db2940 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3822,6 +3822,23 @@ static int cgroup_write_notify_on_release(struct cgroup *cgrp, } /* + * When dput() is called asynchronously, if umount has been done and + * then deactivate_super() in cgroup_free_fn() kills the superblock, + * there's a small window that vfs will see the root dentry with non-zero + * refcnt and trigger BUG(). + * + * That's why we hold a reference before dput() and drop it right after. + */ +static void cgroup_dput(struct cgroup *cgrp) +{ + struct super_block *sb = cgrp->root->sb; + + atomic_inc(&sb->s_active); + dput(cgrp->dentry); + deactivate_super(sb); +} + +/* * Unregister event and free resources. * * Gets called from workqueue. @@ -3841,7 +3858,7 @@ static void cgroup_event_remove(struct work_struct *work) eventfd_ctx_put(event->eventfd); kfree(event); - dput(cgrp->dentry); + cgroup_dput(cgrp); } /* @@ -4129,12 +4146,8 @@ static void css_dput_fn(struct work_struct *work) { struct cgroup_subsys_state *css = container_of(work, struct cgroup_subsys_state, dput_work); - struct dentry *dentry = css->cgroup->dentry; - struct super_block *sb = dentry->d_sb; - atomic_inc(&sb->s_active); - dput(dentry); - deactivate_super(sb); + cgroup_dput(css->cgroup); } static void css_release(struct percpu_ref *ref) -- cgit v0.10.2 From f57947d27711451a7739a25bba6cddc8a385e438 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 18 Jun 2013 18:41:53 +0800 Subject: cgroup: fix memory leak in cgroup_rm_cftypes() The memory allocated in cgroup_add_cftypes() should be freed. The effect of this bug is we leak a bit memory everytime we unload cfq-iosched module if blkio cgroup is enabled. Signed-off-by: Li Zefan Signed-off-by: Tejun Heo diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 7db2940..1d4f471 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2889,7 +2889,8 @@ int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) list_for_each_entry(set, &ss->cftsets, node) { if (set->cfts == cfts) { - list_del_init(&set->node); + list_del(&set->node); + kfree(set); cgroup_cfts_commit(ss, cfts, false); return 0; } -- cgit v0.10.2 From 794611a1dfcb055d7d41ce133378dd8197d73e38 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 18 Jun 2013 18:53:53 +0800 Subject: cgroup: make serial_nr_cursor available throughout cgroup.c The next patch will use it to determine if a cgroup is newly created while we're iterating the cgroup hierarchy. tj: Rephrased the comment on top of cgroup_serial_nr_cursor. Signed-off-by: Li Zefan Signed-off-by: Tejun Heo diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1d4f471..e6571ca 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -198,6 +198,15 @@ static DEFINE_IDR(cgroup_hierarchy_idr); static struct cgroup_name root_cgroup_name = { .name = "/" }; +/* + * Assign a monotonically increasing serial number to cgroups. It + * guarantees cgroups with bigger numbers are newer than those with smaller + * numbers. Also, as cgroups are always appended to the parent's + * ->children list, it guarantees that sibling cgroups are always sorted in + * the ascending serial number order on the list. + */ +static atomic64_t cgroup_serial_nr_cursor = ATOMIC64_INIT(0); + /* This flag indicates whether tasks in the fork and exit paths should * check for fork/exit handlers to call. This avoids us having to do * extra work in the fork/exit path if none of the subsystems need to @@ -4222,7 +4231,6 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp) static long cgroup_create(struct cgroup *parent, struct dentry *dentry, umode_t mode) { - static atomic64_t serial_nr_cursor = ATOMIC64_INIT(0); struct cgroup *cgrp; struct cgroup_name *name; struct cgroupfs_root *root = parent->root; @@ -4309,13 +4317,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, goto err_free_all; lockdep_assert_held(&dentry->d_inode->i_mutex); - /* - * Assign a monotonically increasing serial number. With the list - * appending below, it guarantees that sibling cgroups are always - * sorted in the ascending serial number order on the parent's - * ->children. - */ - cgrp->serial_nr = atomic64_inc_return(&serial_nr_cursor); + cgrp->serial_nr = atomic64_inc_return(&cgroup_serial_nr_cursor); /* allocation complete, commit to creation */ list_add_tail(&cgrp->allcg_node, &root->allcg_list); -- cgit v0.10.2 From e8c82d20a9f729cf4b9f73043f7fd4e0872bebfd Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 18 Jun 2013 18:48:37 +0800 Subject: cgroup: convert cgroup_cft_commit() to use cgroup_for_each_descendant_pre() We used root->allcg_list to iterate cgroup hierarchy because at that time cgroup_for_each_descendant_pre() hasn't been invented. tj: In cgroup_cfts_commit(), s/@serial_nr/@update_upto/, move the assignment right above releasing cgroup_mutex and explain what's going on there. Signed-off-by: Li Zefan Signed-off-by: Tejun Heo diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f975227..b283658 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -206,9 +206,6 @@ struct cgroup { */ struct list_head cset_links; - struct list_head allcg_node; /* cgroupfs_root->allcg_list */ - struct list_head cft_q_node; /* used during cftype add/rm */ - /* * Linked list running through all cgroups that can * potentially be reaped by the release agent. Protected by @@ -313,9 +310,6 @@ struct cgroupfs_root { /* A list running through the active hierarchies */ struct list_head root_list; - /* All cgroups on this root, cgroup_mutex protected */ - struct list_head allcg_list; - /* Hierarchy-specific flags */ unsigned long flags; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e6571ca..0ed7d8d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1399,7 +1399,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) INIT_LIST_HEAD(&cgrp->children); INIT_LIST_HEAD(&cgrp->files); INIT_LIST_HEAD(&cgrp->cset_links); - INIT_LIST_HEAD(&cgrp->allcg_node); INIT_LIST_HEAD(&cgrp->release_list); INIT_LIST_HEAD(&cgrp->pidlists); mutex_init(&cgrp->pidlist_mutex); @@ -1414,12 +1413,10 @@ static void init_cgroup_root(struct cgroupfs_root *root) INIT_LIST_HEAD(&root->subsys_list); INIT_LIST_HEAD(&root->root_list); - INIT_LIST_HEAD(&root->allcg_list); root->number_of_cgroups = 1; cgrp->root = root; cgrp->name = &root_cgroup_name; init_cgroup_housekeeping(cgrp); - list_add_tail(&cgrp->allcg_node, &root->allcg_list); } static int cgroup_init_root_id(struct cgroupfs_root *root) @@ -2785,65 +2782,78 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys, return ret; } -static DEFINE_MUTEX(cgroup_cft_mutex); - static void cgroup_cfts_prepare(void) - __acquires(&cgroup_cft_mutex) __acquires(&cgroup_mutex) + __acquires(&cgroup_mutex) { /* * Thanks to the entanglement with vfs inode locking, we can't walk * the existing cgroups under cgroup_mutex and create files. - * Instead, we increment reference on all cgroups and build list of - * them using @cgrp->cft_q_node. Grab cgroup_cft_mutex to ensure - * exclusive access to the field. + * Instead, we use cgroup_for_each_descendant_pre() and drop RCU + * read lock before calling cgroup_addrm_files(). */ - mutex_lock(&cgroup_cft_mutex); mutex_lock(&cgroup_mutex); } static void cgroup_cfts_commit(struct cgroup_subsys *ss, struct cftype *cfts, bool is_add) - __releases(&cgroup_mutex) __releases(&cgroup_cft_mutex) + __releases(&cgroup_mutex) { LIST_HEAD(pending); - struct cgroup *cgrp, *n; + struct cgroup *cgrp, *root = &ss->root->top_cgroup; struct super_block *sb = ss->root->sb; + struct dentry *prev = NULL; + struct inode *inode; + u64 update_upto; /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ - if (cfts && ss->root != &rootnode && - atomic_inc_not_zero(sb->s_active)) { - list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) { - dget(cgrp->dentry); - list_add_tail(&cgrp->cft_q_node, &pending); - } - } else { - sb = NULL; + if (!cfts || ss->root == &rootnode || + !atomic_inc_not_zero(&sb->s_active)) { + mutex_unlock(&cgroup_mutex); + return; } - mutex_unlock(&cgroup_mutex); - /* - * All new cgroups will see @cfts update on @ss->cftsets. Add/rm - * files for all cgroups which were created before. + * All cgroups which are created after we drop cgroup_mutex will + * have the updated set of files, so we only need to update the + * cgroups created before the current @cgroup_serial_nr_cursor. */ - list_for_each_entry_safe(cgrp, n, &pending, cft_q_node) { - struct inode *inode = cgrp->dentry->d_inode; + update_upto = atomic64_read(&cgroup_serial_nr_cursor); + + mutex_unlock(&cgroup_mutex); + + /* @root always needs to be updated */ + inode = root->dentry->d_inode; + mutex_lock(&inode->i_mutex); + mutex_lock(&cgroup_mutex); + cgroup_addrm_files(root, ss, cfts, is_add); + mutex_unlock(&cgroup_mutex); + mutex_unlock(&inode->i_mutex); + + /* add/rm files for all cgroups created before */ + rcu_read_lock(); + cgroup_for_each_descendant_pre(cgrp, root) { + if (cgroup_is_dead(cgrp)) + continue; + + inode = cgrp->dentry->d_inode; + dget(cgrp->dentry); + rcu_read_unlock(); + + dput(prev); + prev = cgrp->dentry; mutex_lock(&inode->i_mutex); mutex_lock(&cgroup_mutex); - if (!cgroup_is_dead(cgrp)) + if (cgrp->serial_nr <= update_upto && !cgroup_is_dead(cgrp)) cgroup_addrm_files(cgrp, ss, cfts, is_add); mutex_unlock(&cgroup_mutex); mutex_unlock(&inode->i_mutex); - list_del_init(&cgrp->cft_q_node); - dput(cgrp->dentry); + rcu_read_lock(); } - - if (sb) - deactivate_super(sb); - - mutex_unlock(&cgroup_cft_mutex); + rcu_read_unlock(); + dput(prev); + deactivate_super(sb); } /** @@ -4320,7 +4330,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, cgrp->serial_nr = atomic64_inc_return(&cgroup_serial_nr_cursor); /* allocation complete, commit to creation */ - list_add_tail(&cgrp->allcg_node, &root->allcg_list); list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); root->number_of_cgroups++; @@ -4559,7 +4568,6 @@ static void cgroup_offline_fn(struct work_struct *work) /* delete this cgroup from parent->children */ list_del_rcu(&cgrp->sibling); - list_del_init(&cgrp->allcg_node); dput(d); -- cgit v0.10.2 From 00356bd5f0f5e04183fb15805eb29e97c2fc20ac Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 18 Jun 2013 11:14:22 -0700 Subject: cgroup: clean up cgroup_serial_nr_cursor cgroup_serial_nr_cursor was created atomic64_t because I thought it was never gonna used for anything other than assigning unique numbers to cgroups and didn't want to worry about synchronization; however, now we're using it as an event-stamp to distinguish cgroups created before and after certain point which assumes that it's protected by cgroup_mutex. Let's make it clear by making it a u64. Also, rename it to cgroup_serial_nr_next and make it point to the next nr to allocate so that where it's pointing to is clear and more conventional. Signed-off-by: Tejun Heo Cc: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0ed7d8d..65f333e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -203,9 +203,10 @@ static struct cgroup_name root_cgroup_name = { .name = "/" }; * guarantees cgroups with bigger numbers are newer than those with smaller * numbers. Also, as cgroups are always appended to the parent's * ->children list, it guarantees that sibling cgroups are always sorted in - * the ascending serial number order on the list. + * the ascending serial number order on the list. Protected by + * cgroup_mutex. */ -static atomic64_t cgroup_serial_nr_cursor = ATOMIC64_INIT(0); +static u64 cgroup_serial_nr_next = 1; /* This flag indicates whether tasks in the fork and exit paths should * check for fork/exit handlers to call. This avoids us having to do @@ -2803,7 +2804,7 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss, struct super_block *sb = ss->root->sb; struct dentry *prev = NULL; struct inode *inode; - u64 update_upto; + u64 update_before; /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ if (!cfts || ss->root == &rootnode || @@ -2815,9 +2816,9 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss, /* * All cgroups which are created after we drop cgroup_mutex will * have the updated set of files, so we only need to update the - * cgroups created before the current @cgroup_serial_nr_cursor. + * cgroups created before the current @cgroup_serial_nr_next. */ - update_upto = atomic64_read(&cgroup_serial_nr_cursor); + update_before = cgroup_serial_nr_next; mutex_unlock(&cgroup_mutex); @@ -2844,7 +2845,7 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss, mutex_lock(&inode->i_mutex); mutex_lock(&cgroup_mutex); - if (cgrp->serial_nr <= update_upto && !cgroup_is_dead(cgrp)) + if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) cgroup_addrm_files(cgrp, ss, cfts, is_add); mutex_unlock(&cgroup_mutex); mutex_unlock(&inode->i_mutex); @@ -4327,7 +4328,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, goto err_free_all; lockdep_assert_held(&dentry->d_inode->i_mutex); - cgrp->serial_nr = atomic64_inc_return(&cgroup_serial_nr_cursor); + cgrp->serial_nr = cgroup_serial_nr_next++; /* allocation complete, commit to creation */ list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); -- cgit v0.10.2 From 03c78cbebb323fc97295ff97dc5e009d56371d57 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 14 Jun 2013 11:17:19 +0800 Subject: cgroup: rename cont to cgrp Cont is short for container. control group was named process container at first, but then people found container already has a meaning in linux kernel. Clean up the leftover variable name @cont. Signed-off-by: Li Zefan Signed-off-by: Tejun Heo diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b283658..6c2ba52 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -433,13 +433,13 @@ struct cftype { * entry. The key/value pairs (and their ordering) should not * change between reboots. */ - int (*read_map)(struct cgroup *cont, struct cftype *cft, + int (*read_map)(struct cgroup *cgrp, struct cftype *cft, struct cgroup_map_cb *cb); /* * read_seq_string() is used for outputting a simple sequence * using seqfile. */ - int (*read_seq_string)(struct cgroup *cont, struct cftype *cft, + int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft, struct seq_file *m); ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft, diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 65f333e..1051c1f 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -5515,7 +5515,7 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id) } #ifdef CONFIG_CGROUP_DEBUG -static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cont) +static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp) { struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); @@ -5525,23 +5525,23 @@ static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cont) return css; } -static void debug_css_free(struct cgroup *cont) +static void debug_css_free(struct cgroup *cgrp) { - kfree(cont->subsys[debug_subsys_id]); + kfree(cgrp->subsys[debug_subsys_id]); } -static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft) +static u64 debug_taskcount_read(struct cgroup *cgrp, struct cftype *cft) { - return cgroup_task_count(cont); + return cgroup_task_count(cgrp); } -static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft) +static u64 current_css_set_read(struct cgroup *cgrp, struct cftype *cft) { return (u64)(unsigned long)current->cgroups; } -static u64 current_css_set_refcount_read(struct cgroup *cont, - struct cftype *cft) +static u64 current_css_set_refcount_read(struct cgroup *cgrp, + struct cftype *cft) { u64 count; @@ -5551,7 +5551,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cont, return count; } -static int current_css_set_cg_links_read(struct cgroup *cont, +static int current_css_set_cg_links_read(struct cgroup *cgrp, struct cftype *cft, struct seq_file *seq) { @@ -5578,14 +5578,14 @@ static int current_css_set_cg_links_read(struct cgroup *cont, } #define MAX_TASKS_SHOWN_PER_CSS 25 -static int cgroup_css_links_read(struct cgroup *cont, +static int cgroup_css_links_read(struct cgroup *cgrp, struct cftype *cft, struct seq_file *seq) { struct cgrp_cset_link *link; read_lock(&css_set_lock); - list_for_each_entry(link, &cont->cset_links, cset_link) { + list_for_each_entry(link, &cgrp->cset_links, cset_link) { struct css_set *cset = link->cset; struct task_struct *task; int count = 0; -- cgit v0.10.2 From 02c402d98588bdfd3bebd267db574e13afdef722 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 24 Jun 2013 15:21:47 -0700 Subject: cgroup: convert CFTYPE_* flags to enums Purely cosmetic. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 6c2ba52..ab27001 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -385,9 +385,11 @@ struct cgroup_map_cb { */ /* cftype->flags */ -#define CFTYPE_ONLY_ON_ROOT (1U << 0) /* only create on root cg */ -#define CFTYPE_NOT_ON_ROOT (1U << 1) /* don't create on root cg */ -#define CFTYPE_INSANE (1U << 2) /* don't create if sane_behavior */ +enum { + CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cg */ + CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cg */ + CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ +}; #define MAX_CFTYPE_NAME 64 -- cgit v0.10.2 From 9871bf9550d25e488cd2f0ce958d3f59f17fa720 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 24 Jun 2013 15:21:47 -0700 Subject: cgroup: prefix global variables with "cgroup_" Global variable names in kernel/cgroup.c are asking for trouble - subsys, roots, rootnode and so on. Rename them to have "cgroup_" prefix. * s/subsys/cgroup_subsys/ * s/rootnode/cgroup_dummy_root/ * s/dummytop/cgroup_cummy_top/ * s/roots/cgroup_roots/ * s/root_count/cgroup_root_count/ This patch is purely cosmetic. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1051c1f..8f296b8 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -96,16 +96,19 @@ static DEFINE_MUTEX(cgroup_root_mutex); */ #define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) -static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = { +static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = { #include }; /* - * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the - * subsystems that are otherwise unattached - it never has more than a - * single cgroup, and all tasks are part of that cgroup. + * The dummy hierarchy, reserved for the subsystems that are otherwise + * unattached - it never has more than a single cgroup, and all tasks are + * part of that cgroup. */ -static struct cgroupfs_root rootnode; +static struct cgroupfs_root cgroup_dummy_root; + +/* dummy_top is a shorthand for the dummy hierarchy's top cgroup */ +static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup; /* * cgroupfs file entry, pointed to from leaf dentry->d_fsdata. @@ -183,8 +186,8 @@ struct cgroup_event { /* The list of hierarchy roots */ -static LIST_HEAD(roots); -static int root_count; +static LIST_HEAD(cgroup_roots); +static int cgroup_root_count; /* * Hierarchy ID allocation and mapping. It follows the same exclusion @@ -193,9 +196,6 @@ static int root_count; */ static DEFINE_IDR(cgroup_hierarchy_idr); -/* dummytop is a shorthand for the dummy hierarchy's top cgroup */ -#define dummytop (&rootnode.top_cgroup) - static struct cgroup_name root_cgroup_name = { .name = "/" }; /* @@ -268,7 +268,7 @@ list_for_each_entry(_ss, &_root->subsys_list, sibling) /* for_each_active_root() allows you to iterate across the active hierarchies */ #define for_each_active_root(_root) \ -list_for_each_entry(_root, &roots, root_list) +list_for_each_entry(_root, &cgroup_roots, root_list) static inline struct cgroup *__d_cgrp(struct dentry *dentry) { @@ -650,7 +650,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, return NULL; /* Allocate all the cgrp_cset_link objects that we'll need */ - if (allocate_cgrp_cset_links(root_count, &tmp_links) < 0) { + if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) { kfree(cset); return NULL; } @@ -1000,7 +1000,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, /* Check that any added subsystems are currently free */ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { unsigned long bit = 1UL << i; - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; if (!(bit & added_mask)) continue; /* @@ -1009,7 +1009,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, * ensure that subsystems won't disappear once selected. */ BUG_ON(ss == NULL); - if (ss->root != &rootnode) { + if (ss->root != &cgroup_dummy_root) { /* Subsystem isn't free */ return -EBUSY; } @@ -1024,15 +1024,15 @@ static int rebind_subsystems(struct cgroupfs_root *root, /* Process each subsystem */ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; unsigned long bit = 1UL << i; if (bit & added_mask) { /* We're binding this subsystem to this hierarchy */ BUG_ON(ss == NULL); BUG_ON(cgrp->subsys[i]); - BUG_ON(!dummytop->subsys[i]); - BUG_ON(dummytop->subsys[i]->cgroup != dummytop); - cgrp->subsys[i] = dummytop->subsys[i]; + BUG_ON(!cgroup_dummy_top->subsys[i]); + BUG_ON(cgroup_dummy_top->subsys[i]->cgroup != cgroup_dummy_top); + cgrp->subsys[i] = cgroup_dummy_top->subsys[i]; cgrp->subsys[i]->cgroup = cgrp; list_move(&ss->sibling, &root->subsys_list); ss->root = root; @@ -1042,14 +1042,14 @@ static int rebind_subsystems(struct cgroupfs_root *root, } else if (bit & removed_mask) { /* We're removing this subsystem */ BUG_ON(ss == NULL); - BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]); + BUG_ON(cgrp->subsys[i] != cgroup_dummy_top->subsys[i]); BUG_ON(cgrp->subsys[i]->cgroup != cgrp); if (ss->bind) - ss->bind(dummytop); - dummytop->subsys[i]->cgroup = dummytop; + ss->bind(cgroup_dummy_top); + cgroup_dummy_top->subsys[i]->cgroup = cgroup_dummy_top; cgrp->subsys[i] = NULL; - subsys[i]->root = &rootnode; - list_move(&ss->sibling, &rootnode.subsys_list); + cgroup_subsys[i]->root = &cgroup_dummy_root; + list_move(&ss->sibling, &cgroup_dummy_root.subsys_list); /* subsystem is now free - drop reference on module */ module_put(ss->module); } else if (bit & final_subsys_mask) { @@ -1112,10 +1112,10 @@ struct cgroup_sb_opts { }; /* - * Convert a hierarchy specifier into a bitmask of subsystems and flags. Call - * with cgroup_mutex held to protect the subsys[] array. This function takes - * refcounts on subsystems to be used, unless it returns error, in which case - * no refcounts are taken. + * Convert a hierarchy specifier into a bitmask of subsystems and + * flags. Call with cgroup_mutex held to protect the cgroup_subsys[] + * array. This function takes refcounts on subsystems to be used, unless it + * returns error, in which case no refcounts are taken. */ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) { @@ -1201,7 +1201,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) } for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; if (ss == NULL) continue; if (strcmp(token, ss->name)) @@ -1228,7 +1228,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) */ if (all_ss || (!one_ss && !opts->none && !opts->name)) { for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; if (ss == NULL) continue; if (ss->disabled) @@ -1284,7 +1284,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) if (!(bit & opts->subsys_mask)) continue; - if (!try_module_get(subsys[i]->module)) { + if (!try_module_get(cgroup_subsys[i]->module)) { module_pin_failed = true; break; } @@ -1301,7 +1301,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) if (!(bit & opts->subsys_mask)) continue; - module_put(subsys[i]->module); + module_put(cgroup_subsys[i]->module); } return -ENOENT; } @@ -1317,7 +1317,7 @@ static void drop_parsed_module_refcounts(unsigned long subsys_mask) if (!(bit & subsys_mask)) continue; - module_put(subsys[i]->module); + module_put(cgroup_subsys[i]->module); } } @@ -1648,8 +1648,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, /* EBUSY should be the only error here */ BUG_ON(ret); - list_add(&root->root_list, &roots); - root_count++; + list_add(&root->root_list, &cgroup_roots); + cgroup_root_count++; sb->s_root->d_fsdata = root_cgrp; root->top_cgroup.dentry = sb->s_root; @@ -1746,7 +1746,7 @@ static void cgroup_kill_sb(struct super_block *sb) { if (!list_empty(&root->root_list)) { list_del(&root->root_list); - root_count--; + cgroup_root_count--; } cgroup_exit_root_id(root); @@ -2807,7 +2807,7 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss, u64 update_before; /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ - if (!cfts || ss->root == &rootnode || + if (!cfts || ss->root == &cgroup_dummy_root || !atomic_inc_not_zero(&sb->s_active)) { mutex_unlock(&cgroup_mutex); return; @@ -4186,7 +4186,7 @@ static void init_cgroup_css(struct cgroup_subsys_state *css, css->cgroup = cgrp; css->flags = 0; css->id = NULL; - if (cgrp == dummytop) + if (cgrp == cgroup_dummy_top) css->flags |= CSS_ROOT; BUG_ON(cgrp->subsys[ss->subsys_id]); cgrp->subsys[ss->subsys_id] = css; @@ -4615,12 +4615,12 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) cgroup_init_cftsets(ss); /* Create the top cgroup state for this subsystem */ - list_add(&ss->sibling, &rootnode.subsys_list); - ss->root = &rootnode; - css = ss->css_alloc(dummytop); + list_add(&ss->sibling, &cgroup_dummy_root.subsys_list); + ss->root = &cgroup_dummy_root; + css = ss->css_alloc(cgroup_dummy_top); /* We don't handle early failures gracefully */ BUG_ON(IS_ERR(css)); - init_cgroup_css(css, ss, dummytop); + init_cgroup_css(css, ss, cgroup_dummy_top); /* Update the init_css_set to contain a subsys * pointer to this state - since the subsystem is @@ -4635,7 +4635,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) * need to invoke fork callbacks here. */ BUG_ON(!list_empty(&init_task.tasks)); - BUG_ON(online_css(ss, dummytop)); + BUG_ON(online_css(ss, cgroup_dummy_top)); mutex_unlock(&cgroup_mutex); @@ -4681,7 +4681,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) */ if (ss->module == NULL) { /* a sanity check */ - BUG_ON(subsys[ss->subsys_id] != ss); + BUG_ON(cgroup_subsys[ss->subsys_id] != ss); return 0; } @@ -4689,26 +4689,26 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) cgroup_init_cftsets(ss); mutex_lock(&cgroup_mutex); - subsys[ss->subsys_id] = ss; + cgroup_subsys[ss->subsys_id] = ss; /* * no ss->css_alloc seems to need anything important in the ss - * struct, so this can happen first (i.e. before the rootnode + * struct, so this can happen first (i.e. before the dummy root * attachment). */ - css = ss->css_alloc(dummytop); + css = ss->css_alloc(cgroup_dummy_top); if (IS_ERR(css)) { - /* failure case - need to deassign the subsys[] slot. */ - subsys[ss->subsys_id] = NULL; + /* failure case - need to deassign the cgroup_subsys[] slot. */ + cgroup_subsys[ss->subsys_id] = NULL; mutex_unlock(&cgroup_mutex); return PTR_ERR(css); } - list_add(&ss->sibling, &rootnode.subsys_list); - ss->root = &rootnode; + list_add(&ss->sibling, &cgroup_dummy_root.subsys_list); + ss->root = &cgroup_dummy_root; /* our new subsystem will be attached to the dummy hierarchy. */ - init_cgroup_css(css, ss, dummytop); + init_cgroup_css(css, ss, cgroup_dummy_top); /* init_idr must be after init_cgroup_css because it sets css->id. */ if (ss->use_id) { ret = cgroup_init_idr(ss, css); @@ -4739,7 +4739,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) } write_unlock(&css_set_lock); - ret = online_css(ss, dummytop); + ret = online_css(ss, cgroup_dummy_top); if (ret) goto err_unload; @@ -4774,27 +4774,28 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) * try_module_get in parse_cgroupfs_options should ensure that it * doesn't start being used while we're killing it off. */ - BUG_ON(ss->root != &rootnode); + BUG_ON(ss->root != &cgroup_dummy_root); mutex_lock(&cgroup_mutex); - offline_css(ss, dummytop); + offline_css(ss, cgroup_dummy_top); if (ss->use_id) idr_destroy(&ss->idr); /* deassign the subsys_id */ - subsys[ss->subsys_id] = NULL; + cgroup_subsys[ss->subsys_id] = NULL; - /* remove subsystem from rootnode's list of subsystems */ + /* remove subsystem from the dummy root's list of subsystems */ list_del_init(&ss->sibling); /* - * disentangle the css from all css_sets attached to the dummytop. as - * in loading, we need to pay our respects to the hashtable gods. + * disentangle the css from all css_sets attached to the dummy + * top. as in loading, we need to pay our respects to the hashtable + * gods. */ write_lock(&css_set_lock); - list_for_each_entry(link, &dummytop->cset_links, cset_link) { + list_for_each_entry(link, &cgroup_dummy_top->cset_links, cset_link) { struct css_set *cset = link->cset; unsigned long key; @@ -4806,13 +4807,13 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) write_unlock(&css_set_lock); /* - * remove subsystem's css from the dummytop and free it - need to - * free before marking as null because ss->css_free needs the - * cgrp->subsys pointer to find their state. note that this also - * takes care of freeing the css_id. + * remove subsystem's css from the cgroup_dummy_top and free it - + * need to free before marking as null because ss->css_free needs + * the cgrp->subsys pointer to find their state. note that this + * also takes care of freeing the css_id. */ - ss->css_free(dummytop); - dummytop->subsys[ss->subsys_id] = NULL; + ss->css_free(cgroup_dummy_top); + cgroup_dummy_top->subsys[ss->subsys_id] = NULL; mutex_unlock(&cgroup_mutex); } @@ -4832,17 +4833,17 @@ int __init cgroup_init_early(void) INIT_LIST_HEAD(&init_css_set.tasks); INIT_HLIST_NODE(&init_css_set.hlist); css_set_count = 1; - init_cgroup_root(&rootnode); - root_count = 1; + init_cgroup_root(&cgroup_dummy_root); + cgroup_root_count = 1; init_task.cgroups = &init_css_set; init_cgrp_cset_link.cset = &init_css_set; - init_cgrp_cset_link.cgrp = dummytop; - list_add(&init_cgrp_cset_link.cset_link, &rootnode.top_cgroup.cset_links); + init_cgrp_cset_link.cgrp = cgroup_dummy_top; + list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; /* at bootup time, we don't worry about modular subsystems */ if (!ss || ss->module) @@ -4881,7 +4882,7 @@ int __init cgroup_init(void) return err; for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; /* at bootup time, we don't worry about modular subsystems */ if (!ss || ss->module) @@ -4900,7 +4901,7 @@ int __init cgroup_init(void) mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_root_mutex); - BUG_ON(cgroup_init_root_id(&rootnode)); + BUG_ON(cgroup_init_root_id(&cgroup_dummy_root)); mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); @@ -5004,7 +5005,7 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v) */ mutex_lock(&cgroup_mutex); for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; if (ss == NULL) continue; seq_printf(m, "%s\t%d\t%d\t%d\n", @@ -5101,7 +5102,7 @@ void cgroup_post_fork(struct task_struct *child) * can't touch that. */ for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; if (ss->fork) ss->fork(child); @@ -5172,7 +5173,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) * subsystems, see cgroup_post_fork() for details. */ for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; if (ss->exit) { struct cgroup *old_cgrp = @@ -5291,7 +5292,7 @@ static int __init cgroup_disable(char *str) if (!*token) continue; for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys *ss = cgroup_subsys[i]; /* * cgroup_disable, being at boot time, can't -- cgit v0.10.2 From a8a648c4acee2095262f7fa65b0d8a68a03c32e4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 24 Jun 2013 15:21:47 -0700 Subject: cgroup: remove cgroup->actual_subsys_mask cgroup curiously has two subsystem masks, ->subsys_mask and ->actual_subsys_mask. The latter only exists because the new target subsys_mask is passed into rebind_subsystems() via @root>subsys_mask. rebind_subsystems() needs to know what the current mask is to decide how to reach the target mask so ->actual_subsys_mask is used as the temp location to remember the current state. Adding a temporary field to a permanent data structure is rather silly and can be misleading. Update rebind_subsystems() to take @added_mask and @removed_mask instead and remove @root->actual_subsys_mask. This patch shouldn't introduce any behavior changes. v2: Comment and description updated as suggested by Li. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ab27001..4c1eceb 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -286,18 +286,12 @@ enum { struct cgroupfs_root { struct super_block *sb; - /* - * The bitmask of subsystems intended to be attached to this - * hierarchy - */ + /* The bitmask of subsystems attached to this hierarchy */ unsigned long subsys_mask; /* Unique id for this hierarchy. */ int hierarchy_id; - /* The bitmask of subsystems currently attached to this hierarchy */ - unsigned long actual_subsys_mask; - /* A list running through the attached subsystems */ struct list_head subsys_list; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8f296b8..67fc953 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -986,17 +986,14 @@ static void cgroup_d_remove_dir(struct dentry *dentry) * returns an error, no reference counts are touched. */ static int rebind_subsystems(struct cgroupfs_root *root, - unsigned long final_subsys_mask) + unsigned long added_mask, unsigned removed_mask) { - unsigned long added_mask, removed_mask; struct cgroup *cgrp = &root->top_cgroup; int i; BUG_ON(!mutex_is_locked(&cgroup_mutex)); BUG_ON(!mutex_is_locked(&cgroup_root_mutex)); - removed_mask = root->actual_subsys_mask & ~final_subsys_mask; - added_mask = final_subsys_mask & ~root->actual_subsys_mask; /* Check that any added subsystems are currently free */ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { unsigned long bit = 1UL << i; @@ -1032,27 +1029,33 @@ static int rebind_subsystems(struct cgroupfs_root *root, BUG_ON(cgrp->subsys[i]); BUG_ON(!cgroup_dummy_top->subsys[i]); BUG_ON(cgroup_dummy_top->subsys[i]->cgroup != cgroup_dummy_top); + cgrp->subsys[i] = cgroup_dummy_top->subsys[i]; cgrp->subsys[i]->cgroup = cgrp; list_move(&ss->sibling, &root->subsys_list); ss->root = root; if (ss->bind) ss->bind(cgrp); + /* refcount was already taken, and we're keeping it */ + root->subsys_mask |= bit; } else if (bit & removed_mask) { /* We're removing this subsystem */ BUG_ON(ss == NULL); BUG_ON(cgrp->subsys[i] != cgroup_dummy_top->subsys[i]); BUG_ON(cgrp->subsys[i]->cgroup != cgrp); + if (ss->bind) ss->bind(cgroup_dummy_top); cgroup_dummy_top->subsys[i]->cgroup = cgroup_dummy_top; cgrp->subsys[i] = NULL; cgroup_subsys[i]->root = &cgroup_dummy_root; list_move(&ss->sibling, &cgroup_dummy_root.subsys_list); + /* subsystem is now free - drop reference on module */ module_put(ss->module); - } else if (bit & final_subsys_mask) { + root->subsys_mask &= ~bit; + } else if (bit & root->subsys_mask) { /* Subsystem state should already exist */ BUG_ON(ss == NULL); BUG_ON(!cgrp->subsys[i]); @@ -1069,7 +1072,6 @@ static int rebind_subsystems(struct cgroupfs_root *root, BUG_ON(cgrp->subsys[i]); } } - root->subsys_mask = root->actual_subsys_mask = final_subsys_mask; return 0; } @@ -1343,7 +1345,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) if (ret) goto out_unlock; - if (opts.subsys_mask != root->actual_subsys_mask || opts.release_agent) + if (opts.subsys_mask != root->subsys_mask || opts.release_agent) pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", task_tgid_nr(current), current->comm); @@ -1365,7 +1367,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) */ cgroup_clear_directory(cgrp->dentry, false, removed_mask); - ret = rebind_subsystems(root, opts.subsys_mask); + ret = rebind_subsystems(root, added_mask, removed_mask); if (ret) { /* rebind_subsystems failed, re-populate the removed files */ cgroup_populate_dir(cgrp, false, removed_mask); @@ -1634,7 +1636,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (ret) goto unlock_drop; - ret = rebind_subsystems(root, root->subsys_mask); + ret = rebind_subsystems(root, root->subsys_mask, 0); if (ret == -EBUSY) { free_cgrp_cset_links(&tmp_links); goto unlock_drop; @@ -1727,7 +1729,7 @@ static void cgroup_kill_sb(struct super_block *sb) { mutex_lock(&cgroup_root_mutex); /* Rebind all subsystems back to the default hierarchy */ - ret = rebind_subsystems(root, 0); + ret = rebind_subsystems(root, 0, root->subsys_mask); /* Shouldn't be able to fail ... */ BUG_ON(ret); -- cgit v0.10.2 From b326f9d0dbd066b0aafbe88e6011a680a36de6e8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 24 Jun 2013 15:21:48 -0700 Subject: cgroup: clean up find_css_set() and friends find_css_set() passes uninitialized on-stack template[] array to find_existing_css_set() which sets the entries for all subsystems. Passing around an uninitialized array is a bit icky and we want to introduce an iterator which only iterates loaded subsystems. Let's initialize it on definition. While at it, also make the following cosmetic cleanups. * Convert to proper /** comments. * Reorder variable declarations. * Replace comment on synchronization with lockdep_assert_held(). This patch doesn't make any functional differences. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 67fc953..c8d3175 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -434,7 +434,7 @@ static inline void put_css_set_taskexit(struct css_set *cset) __put_css_set(cset, 1); } -/* +/** * compare_css_sets - helper function for find_existing_css_set(). * @cset: candidate css_set being tested * @old_cset: existing css_set for a task @@ -506,27 +506,20 @@ static bool compare_css_sets(struct css_set *cset, return true; } -/* - * find_existing_css_set() is a helper for - * find_css_set(), and checks to see whether an existing - * css_set is suitable. - * - * oldcg: the cgroup group that we're using before the cgroup - * transition - * - * cgrp: the cgroup that we're moving into - * - * template: location in which to build the desired set of subsystem - * state objects for the new cgroup group +/** + * find_existing_css_set - init css array and find the matching css_set + * @old_cset: the css_set that we're using before the cgroup transition + * @cgrp: the cgroup that we're moving into + * @template: out param for the new set of csses, should be clear on entry */ static struct css_set *find_existing_css_set(struct css_set *old_cset, struct cgroup *cgrp, struct cgroup_subsys_state *template[]) { - int i; struct cgroupfs_root *root = cgrp->root; struct css_set *cset; unsigned long key; + int i; /* * Build the set of subsystem state objects that we want to see in the @@ -618,22 +611,25 @@ static void link_css_set(struct list_head *tmp_links, struct css_set *cset, list_add_tail(&link->cgrp_link, &cset->cgrp_links); } -/* - * find_css_set() takes an existing cgroup group and a - * cgroup object, and returns a css_set object that's - * equivalent to the old group, but with the given cgroup - * substituted into the appropriate hierarchy. Must be called with - * cgroup_mutex held +/** + * find_css_set - return a new css_set with one cgroup updated + * @old_cset: the baseline css_set + * @cgrp: the cgroup to be updated + * + * Return a new css_set that's equivalent to @old_cset, but with @cgrp + * substituted into the appropriate hierarchy. */ static struct css_set *find_css_set(struct css_set *old_cset, struct cgroup *cgrp) { + struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { }; struct css_set *cset; - struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; struct list_head tmp_links; struct cgrp_cset_link *link; unsigned long key; + lockdep_assert_held(&cgroup_mutex); + /* First see if we already have a cgroup group that matches * the desired set */ read_lock(&css_set_lock); -- cgit v0.10.2 From 5549c497913ad860d3dff4386c6423268bb85693 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 24 Jun 2013 15:21:48 -0700 Subject: cgroup: s/for_each_subsys()/for_each_root_subsys()/ for_each_subsys() walks over subsystems attached to a hierarchy and we're gonna add iterators which walk over all available subsystems. Rename for_each_subsys() to for_each_root_subsys() so that it's more appropriately named and for_each_subsys() can be used to iterate all subsystems. While at it, remove unnecessary underbar prefix from macro arguments, put them inside parentheses, and adjust indentation for the two for_each_*() macros. This patch is purely cosmetic. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c8d3175..605cb13 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -259,16 +259,13 @@ static int notify_on_release(const struct cgroup *cgrp) return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); } -/* - * for_each_subsys() allows you to iterate on each subsystem attached to - * an active hierarchy - */ -#define for_each_subsys(_root, _ss) \ -list_for_each_entry(_ss, &_root->subsys_list, sibling) +/* iterate each subsystem attached to a hierarchy */ +#define for_each_root_subsys(root, ss) \ + list_for_each_entry((ss), &(root)->subsys_list, sibling) -/* for_each_active_root() allows you to iterate across the active hierarchies */ -#define for_each_active_root(_root) \ -list_for_each_entry(_root, &cgroup_roots, root_list) +/* iterate across the active hierarchies */ +#define for_each_active_root(root) \ + list_for_each_entry((root), &cgroup_roots, root_list) static inline struct cgroup *__d_cgrp(struct dentry *dentry) { @@ -828,7 +825,7 @@ static void cgroup_free_fn(struct work_struct *work) /* * Release the subsystem state objects. */ - for_each_subsys(cgrp->root, ss) + for_each_root_subsys(cgrp->root, ss) ss->css_free(cgrp); cgrp->root->number_of_cgroups--; @@ -944,7 +941,7 @@ static void cgroup_clear_directory(struct dentry *dir, bool base_files, struct cgroup *cgrp = __d_cgrp(dir); struct cgroup_subsys *ss; - for_each_subsys(cgrp->root, ss) { + for_each_root_subsys(cgrp->root, ss) { struct cftype_set *set; if (!test_bit(ss->subsys_id, &subsys_mask)) continue; @@ -1078,7 +1075,7 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) struct cgroup_subsys *ss; mutex_lock(&cgroup_root_mutex); - for_each_subsys(root, ss) + for_each_root_subsys(root, ss) seq_printf(seq, ",%s", ss->name); if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) seq_puts(seq, ",sane_behavior"); @@ -2054,7 +2051,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, /* * step 1: check that we can legitimately attach to the cgroup. */ - for_each_subsys(root, ss) { + for_each_root_subsys(root, ss) { if (ss->can_attach) { retval = ss->can_attach(cgrp, &tset); if (retval) { @@ -2091,7 +2088,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, /* * step 4: do subsystem attach callbacks. */ - for_each_subsys(root, ss) { + for_each_root_subsys(root, ss) { if (ss->attach) ss->attach(cgrp, &tset); } @@ -2111,7 +2108,7 @@ out_put_css_set_refs: } out_cancel_attach: if (retval) { - for_each_subsys(root, ss) { + for_each_root_subsys(root, ss) { if (ss == failed_ss) break; if (ss->cancel_attach) @@ -4137,7 +4134,7 @@ static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files, } /* process cftsets of each subsystem */ - for_each_subsys(cgrp->root, ss) { + for_each_root_subsys(cgrp->root, ss) { struct cftype_set *set; if (!test_bit(ss->subsys_id, &subsys_mask)) continue; @@ -4147,7 +4144,7 @@ static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files, } /* This cgroup is ready now */ - for_each_subsys(cgrp->root, ss) { + for_each_root_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; /* * Update id->css pointer and make this css visible from @@ -4294,7 +4291,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); - for_each_subsys(root, ss) { + for_each_root_subsys(root, ss) { struct cgroup_subsys_state *css; css = ss->css_alloc(cgrp); @@ -4333,14 +4330,14 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, root->number_of_cgroups++; /* each css holds a ref to the cgroup's dentry */ - for_each_subsys(root, ss) + for_each_root_subsys(root, ss) dget(dentry); /* hold a ref to the parent's dentry */ dget(parent->dentry); /* creation succeeded, notify subsystems */ - for_each_subsys(root, ss) { + for_each_root_subsys(root, ss) { err = online_css(ss, cgrp); if (err) goto err_destroy; @@ -4365,7 +4362,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, return 0; err_free_all: - for_each_subsys(root, ss) { + for_each_root_subsys(root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; if (css) { @@ -4478,7 +4475,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) * be killed. */ atomic_set(&cgrp->css_kill_cnt, 1); - for_each_subsys(cgrp->root, ss) { + for_each_root_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; /* @@ -4552,7 +4549,7 @@ static void cgroup_offline_fn(struct work_struct *work) * css_tryget() is guaranteed to fail now. Tell subsystems to * initate destruction. */ - for_each_subsys(cgrp->root, ss) + for_each_root_subsys(cgrp->root, ss) offline_css(ss, cgrp); /* @@ -4562,7 +4559,7 @@ static void cgroup_offline_fn(struct work_struct *work) * whenever that may be, the extra dentry ref is put so that dentry * destruction happens only after all css's are released. */ - for_each_subsys(cgrp->root, ss) + for_each_root_subsys(cgrp->root, ss) css_put(cgrp->subsys[ss->subsys_id]); /* delete this cgroup from parent->children */ @@ -4967,7 +4964,7 @@ int proc_cgroup_show(struct seq_file *m, void *v) int count = 0; seq_printf(m, "%d:", root->hierarchy_id); - for_each_subsys(root, ss) + for_each_root_subsys(root, ss) seq_printf(m, "%s%s", count++ ? "," : "", ss->name); if (strlen(root->name)) seq_printf(m, "%sname=%s", count ? "," : "", -- cgit v0.10.2 From 82fe9b0da0d50e2795a49c268676fd132cbc3eea Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jun 2013 11:53:37 -0700 Subject: cgroup: move init_css_set initialization inside cgroup_mutex cgroup_init() was doing init_css_set initialization outside cgroup_mutex, which is fine but we want to add lockdep annotation on subsystem iterations and cgroup_init() will trigger it spuriously. Move init_css_set initialization inside cgroup_mutex. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 605cb13..3409698 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4888,14 +4888,14 @@ int __init cgroup_init(void) cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]); } - /* Add init_css_set to the hash table */ - key = css_set_hash(init_css_set.subsys); - hash_add(css_set_table, &init_css_set.hlist, key); - /* allocate id for the dummy hierarchy */ mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_root_mutex); + /* Add init_css_set to the hash table */ + key = css_set_hash(init_css_set.subsys); + hash_add(css_set_table, &init_css_set.hlist, key); + BUG_ON(cgroup_init_root_id(&cgroup_dummy_root)); mutex_unlock(&cgroup_root_mutex); -- cgit v0.10.2 From 30159ec7a9db7f3c91e2b27e66389c49302efd5c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jun 2013 11:53:37 -0700 Subject: cgroup: implement for_each_[builtin_]subsys() There are quite a few places where all loaded [builtin] subsys are iterated. Implement for_each_[builtin_]subsys() and replace manual iterations with those to simplify those places a bit. The new iterators automatically skip NULL subsystems. This shouldn't cause any functional difference. Iteration loops which scan all subsystems and then skipping modular ones explicitly are converted to use for_each_builtin_subsys(). While at it, reorder variable declarations and adjust whitespaces a bit in the affected functions. v2: Add lockdep_assert_held() in for_each_subsys() and add comments about synchronization as suggested by Li. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3409698..cef6881 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -259,6 +259,31 @@ static int notify_on_release(const struct cgroup *cgrp) return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); } +/** + * for_each_subsys - iterate all loaded cgroup subsystems + * @ss: the iteration cursor + * @i: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end + * + * Should be called under cgroup_mutex. + */ +#define for_each_subsys(ss, i) \ + for ((i) = 0; (i) < CGROUP_SUBSYS_COUNT; (i)++) \ + if (({ lockdep_assert_held(&cgroup_mutex); \ + !((ss) = cgroup_subsys[i]); })) { } \ + else + +/** + * for_each_builtin_subsys - iterate all built-in cgroup subsystems + * @ss: the iteration cursor + * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end + * + * Bulit-in subsystems are always present and iteration itself doesn't + * require any synchronization. + */ +#define for_each_builtin_subsys(ss, i) \ + for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT && \ + (((ss) = cgroup_subsys[i]) || true); (i)++) + /* iterate each subsystem attached to a hierarchy */ #define for_each_root_subsys(root, ss) \ list_for_each_entry((ss), &(root)->subsys_list, sibling) @@ -356,10 +381,11 @@ static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS); static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) { - int i; unsigned long key = 0UL; + struct cgroup_subsys *ss; + int i; - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) + for_each_subsys(ss, i) key += (unsigned long)css[i]; key = (key >> 16) ^ key; @@ -514,6 +540,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, struct cgroup_subsys_state *template[]) { struct cgroupfs_root *root = cgrp->root; + struct cgroup_subsys *ss; struct css_set *cset; unsigned long key; int i; @@ -523,7 +550,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, * new css_set. while subsystems can change globally, the entries here * won't change, so no need for locking. */ - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + for_each_subsys(ss, i) { if (root->subsys_mask & (1UL << i)) { /* Subsystem is in this hierarchy. So we want * the subsystem state from the new @@ -982,23 +1009,19 @@ static int rebind_subsystems(struct cgroupfs_root *root, unsigned long added_mask, unsigned removed_mask) { struct cgroup *cgrp = &root->top_cgroup; + struct cgroup_subsys *ss; int i; BUG_ON(!mutex_is_locked(&cgroup_mutex)); BUG_ON(!mutex_is_locked(&cgroup_root_mutex)); /* Check that any added subsystems are currently free */ - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + for_each_subsys(ss, i) { unsigned long bit = 1UL << i; - struct cgroup_subsys *ss = cgroup_subsys[i]; + if (!(bit & added_mask)) continue; - /* - * Nobody should tell us to do a subsys that doesn't exist: - * parse_cgroupfs_options should catch that case and refcounts - * ensure that subsystems won't disappear once selected. - */ - BUG_ON(ss == NULL); + if (ss->root != &cgroup_dummy_root) { /* Subsystem isn't free */ return -EBUSY; @@ -1013,12 +1036,11 @@ static int rebind_subsystems(struct cgroupfs_root *root, return -EBUSY; /* Process each subsystem */ - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; + for_each_subsys(ss, i) { unsigned long bit = 1UL << i; + if (bit & added_mask) { /* We're binding this subsystem to this hierarchy */ - BUG_ON(ss == NULL); BUG_ON(cgrp->subsys[i]); BUG_ON(!cgroup_dummy_top->subsys[i]); BUG_ON(cgroup_dummy_top->subsys[i]->cgroup != cgroup_dummy_top); @@ -1034,7 +1056,6 @@ static int rebind_subsystems(struct cgroupfs_root *root, root->subsys_mask |= bit; } else if (bit & removed_mask) { /* We're removing this subsystem */ - BUG_ON(ss == NULL); BUG_ON(cgrp->subsys[i] != cgroup_dummy_top->subsys[i]); BUG_ON(cgrp->subsys[i]->cgroup != cgrp); @@ -1050,7 +1071,6 @@ static int rebind_subsystems(struct cgroupfs_root *root, root->subsys_mask &= ~bit; } else if (bit & root->subsys_mask) { /* Subsystem state should already exist */ - BUG_ON(ss == NULL); BUG_ON(!cgrp->subsys[i]); /* * a refcount was taken, but we already had one, so @@ -1117,8 +1137,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) char *token, *o = data; bool all_ss = false, one_ss = false; unsigned long mask = (unsigned long)-1; - int i; bool module_pin_failed = false; + struct cgroup_subsys *ss; + int i; BUG_ON(!mutex_is_locked(&cgroup_mutex)); @@ -1195,10 +1216,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) continue; } - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; - if (ss == NULL) - continue; + for_each_subsys(ss, i) { if (strcmp(token, ss->name)) continue; if (ss->disabled) @@ -1221,16 +1239,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) * otherwise if 'none', 'name=' and a subsystem name options * were not specified, let's default to 'all' */ - if (all_ss || (!one_ss && !opts->none && !opts->name)) { - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; - if (ss == NULL) - continue; - if (ss->disabled) - continue; - set_bit(i, &opts->subsys_mask); - } - } + if (all_ss || (!one_ss && !opts->none && !opts->name)) + for_each_subsys(ss, i) + if (!ss->disabled) + set_bit(i, &opts->subsys_mask); /* Consistency checks */ @@ -1274,10 +1286,8 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) * take duplicate reference counts on a subsystem that's already used, * but rebind_subsystems handles this case. */ - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - unsigned long bit = 1UL << i; - - if (!(bit & opts->subsys_mask)) + for_each_subsys(ss, i) { + if (!(opts->subsys_mask & (1UL << i))) continue; if (!try_module_get(cgroup_subsys[i]->module)) { module_pin_failed = true; @@ -1306,11 +1316,11 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) static void drop_parsed_module_refcounts(unsigned long subsys_mask) { + struct cgroup_subsys *ss; int i; - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - unsigned long bit = 1UL << i; - if (!(bit & subsys_mask)) + for_each_subsys(ss, i) { + if (!(subsys_mask & (1UL << i))) continue; module_put(cgroup_subsys[i]->module); } @@ -4822,7 +4832,9 @@ EXPORT_SYMBOL_GPL(cgroup_unload_subsys); */ int __init cgroup_init_early(void) { + struct cgroup_subsys *ss; int i; + atomic_set(&init_css_set.refcount, 1); INIT_LIST_HEAD(&init_css_set.cgrp_links); INIT_LIST_HEAD(&init_css_set.tasks); @@ -4837,13 +4849,8 @@ int __init cgroup_init_early(void) list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; - - /* at bootup time, we don't worry about modular subsystems */ - if (!ss || ss->module) - continue; - + /* at bootup time, we don't worry about modular subsystems */ + for_each_builtin_subsys(ss, i) { BUG_ON(!ss->name); BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); BUG_ON(!ss->css_alloc); @@ -4868,20 +4875,15 @@ int __init cgroup_init_early(void) */ int __init cgroup_init(void) { - int err; - int i; + struct cgroup_subsys *ss; unsigned long key; + int i, err; err = bdi_init(&cgroup_backing_dev_info); if (err) return err; - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; - - /* at bootup time, we don't worry about modular subsystems */ - if (!ss || ss->module) - continue; + for_each_builtin_subsys(ss, i) { if (!ss->early_init) cgroup_init_subsys(ss); if (ss->use_id) @@ -4990,6 +4992,7 @@ out: /* Display information about each subsystem and each hierarchy */ static int proc_cgroupstats_show(struct seq_file *m, void *v) { + struct cgroup_subsys *ss; int i; seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n"); @@ -4999,14 +5002,12 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v) * subsys/hierarchy state. */ mutex_lock(&cgroup_mutex); - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; - if (ss == NULL) - continue; + + for_each_subsys(ss, i) seq_printf(m, "%s\t%d\t%d\t%d\n", ss->name, ss->root->hierarchy_id, ss->root->number_of_cgroups, !ss->disabled); - } + mutex_unlock(&cgroup_mutex); return 0; } @@ -5060,6 +5061,7 @@ void cgroup_fork(struct task_struct *child) */ void cgroup_post_fork(struct task_struct *child) { + struct cgroup_subsys *ss; int i; /* @@ -5096,12 +5098,9 @@ void cgroup_post_fork(struct task_struct *child) * of the array can be freed at module unload, so we * can't touch that. */ - for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; - + for_each_builtin_subsys(ss, i) if (ss->fork) ss->fork(child); - } } } @@ -5142,6 +5141,7 @@ void cgroup_post_fork(struct task_struct *child) */ void cgroup_exit(struct task_struct *tsk, int run_callbacks) { + struct cgroup_subsys *ss; struct css_set *cset; int i; @@ -5167,13 +5167,12 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) * fork/exit callbacks are supported only for builtin * subsystems, see cgroup_post_fork() for details. */ - for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; - + for_each_builtin_subsys(ss, i) { if (ss->exit) { struct cgroup *old_cgrp = rcu_dereference_raw(cset->subsys[i])->cgroup; struct cgroup *cgrp = task_cgroup(tsk, i); + ss->exit(cgrp, old_cgrp, tsk); } } @@ -5280,23 +5279,19 @@ static void cgroup_release_agent(struct work_struct *work) static int __init cgroup_disable(char *str) { - int i; + struct cgroup_subsys *ss; char *token; + int i; while ((token = strsep(&str, ",")) != NULL) { if (!*token) continue; - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { - struct cgroup_subsys *ss = cgroup_subsys[i]; - - /* - * cgroup_disable, being at boot time, can't - * know about module subsystems, so we don't - * worry about them. - */ - if (!ss || ss->module) - continue; + /* + * cgroup_disable, being at boot time, can't know about + * module subsystems, so we don't worry about them. + */ + for_each_builtin_subsys(ss, i) { if (!strcmp(token, ss->name)) { ss->disabled = 1; printk(KERN_INFO "Disabling %s control group" -- cgit v0.10.2 From fc76df706123602214da494ba98bccea83e2cfff Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jun 2013 11:53:37 -0700 Subject: cgroup: reserve ID 0 for dummy_root and 1 for unified hierarchy Before 1a57423166 ("cgroup: make hierarchy_id use cyclic idr"), hierarchy IDs were allocated from 0. As the dummy hierarchy was always the one first initialized, it got assigned 0 and all other hierarchies from 1. The patch accidentally changed the minimum useable ID to 2. Let's restore ID 0 for dummy_root and while at it reserve 1 for unified hierarchy. Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: stable@vger.kernel.org diff --git a/kernel/cgroup.c b/kernel/cgroup.c index cef6881..f9c99ab 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1425,14 +1425,15 @@ static void init_cgroup_root(struct cgroupfs_root *root) init_cgroup_housekeeping(cgrp); } -static int cgroup_init_root_id(struct cgroupfs_root *root) +static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) { int id; lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_root_mutex); - id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 2, 0, GFP_KERNEL); + id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end, + GFP_KERNEL); if (id < 0) return id; @@ -1635,7 +1636,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (ret) goto unlock_drop; - ret = cgroup_init_root_id(root); + /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ + ret = cgroup_init_root_id(root, 2, 0); if (ret) goto unlock_drop; @@ -4898,7 +4900,7 @@ int __init cgroup_init(void) key = css_set_hash(init_css_set.subsys); hash_add(css_set_table, &init_css_set.hlist, key); - BUG_ON(cgroup_init_root_id(&cgroup_dummy_root)); + BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1)); mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); -- cgit v0.10.2 From 1672d040709b789671c0502e7aac9d632c2f9175 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jun 2013 18:04:54 -0700 Subject: cgroup: fix cgroupfs_root early destruction path cgroupfs_root used to have ->actual_subsys_mask in addition to ->subsys_mask. a8a648c4ac ("cgroup: remove cgroup->actual_subsys_mask") removed it noting that the subsys_mask is essentially temporary and doesn't belong in cgroupfs_root; however, the patch made it impossible to tell whether a cgroupfs_root actually has the subsystems bound or just have the bits set leading to the following BUG when trying to mount with subsystems which are already mounted elsewhere. kernel BUG at kernel/cgroup.c:1038! invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ... CPU: 1 PID: 7973 Comm: mount Tainted: G W 3.10.0-rc7-next-20130625-sasha-00011-g1c1dc0e #1105 task: ffff880fc0ae8000 ti: ffff880fc0b9a000 task.ti: ffff880fc0b9a000 RIP: 0010:[] [] rebind_subsystems+0x409/0x5f0 ... Call Trace: [] cgroup_kill_sb+0xff/0x210 [] deactivate_locked_super+0x4f/0x90 [] cgroup_mount+0x673/0x6e0 [] cpuset_mount+0xd9/0x110 [] mount_fs+0xb0/0x2d0 [] vfs_kern_mount+0xbd/0x180 [] do_new_mount+0x145/0x2c0 [] do_mount+0x356/0x3c0 [] SyS_mount+0xfd/0x140 [] tracesys+0xdd/0xe2 We still want rebind_subsystems() to take added/removed masks, so let's fix it by marking whether a cgroupfs_root has finished binding or not. Also, document what's going on around ->subsys_mask initialization so that similar mistakes aren't repeated. Signed-off-by: Tejun Heo Reported-by: Sasha Levin Acked-by: Li Zefan diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 4c1eceb..8e4fd5e 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -276,6 +276,7 @@ enum { CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ + CGRP_ROOT_SUBSYS_BOUND = (1 << 3), /* subsystems finished binding */ }; /* diff --git a/kernel/cgroup.c b/kernel/cgroup.c index f9c99ab..e801ecf 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1086,6 +1086,12 @@ static int rebind_subsystems(struct cgroupfs_root *root, } } + /* + * Mark @root has finished binding subsystems. @root->subsys_mask + * now matches the bound subsystems. + */ + root->flags |= CGRP_ROOT_SUBSYS_BOUND; + return 0; } @@ -1485,6 +1491,14 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) init_cgroup_root(root); + /* + * We need to set @root->subsys_mask now so that @root can be + * matched by cgroup_test_super() before it finishes + * initialization; otherwise, competing mounts with the same + * options may try to bind the same subsystems instead of waiting + * for the first one leading to unexpected mount errors. + * SUBSYS_BOUND will be set once actual binding is complete. + */ root->subsys_mask = opts->subsys_mask; root->flags = opts->flags; ida_init(&root->cgroup_ida); @@ -1734,9 +1748,11 @@ static void cgroup_kill_sb(struct super_block *sb) { mutex_lock(&cgroup_root_mutex); /* Rebind all subsystems back to the default hierarchy */ - ret = rebind_subsystems(root, 0, root->subsys_mask); - /* Shouldn't be able to fail ... */ - BUG_ON(ret); + if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { + ret = rebind_subsystems(root, 0, root->subsys_mask); + /* Shouldn't be able to fail ... */ + BUG_ON(ret); + } /* * Release all the links from cset_links to this hierarchy's -- cgit v0.10.2 From eb178d063324d9c30f673db3877b892a48ade21e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jun 2013 18:05:21 -0700 Subject: cgroup: grab cgroup_mutex in drop_parsed_module_refcounts() This isn't strictly necessary as all subsystems specified in @subsys_mask are guaranteed to be pinned; however, it does spuriously trigger lockdep warning. Let's grab cgroup_mutex around it. Signed-off-by: Tejun Heo Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e801ecf..2d3a132 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1325,11 +1325,11 @@ static void drop_parsed_module_refcounts(unsigned long subsys_mask) struct cgroup_subsys *ss; int i; - for_each_subsys(ss, i) { - if (!(subsys_mask & (1UL << i))) - continue; - module_put(cgroup_subsys[i]->module); - } + mutex_lock(&cgroup_mutex); + for_each_subsys(ss, i) + if (subsys_mask & (1UL << i)) + module_put(cgroup_subsys[i]->module); + mutex_unlock(&cgroup_mutex); } static int cgroup_remount(struct super_block *sb, int *flags, char *data) -- cgit v0.10.2 From 14611e51a57df10240817d8ada510842faf0ec51 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jun 2013 11:48:32 -0700 Subject: cgroup: fix RCU accesses to task->cgroups task->cgroups is a RCU pointer pointing to struct css_set. A task switches to a different css_set on cgroup migration but a css_set doesn't change once created and its pointers to cgroup_subsys_states aren't RCU protected. task_subsys_state[_check]() is the macro to acquire css given a task and subsys_id pair. It RCU-dereferences task->cgroups->subsys[] not task->cgroups, so the RCU pointer task->cgroups ends up being dereferenced without read_barrier_depends() after it. It's broken. Fix it by introducing task_css_set[_check]() which does RCU-dereference on task->cgroups. task_subsys_state[_check]() is reimplemented to directly dereference ->subsys[] of the css_set returned from task_css_set[_check](). This removes some of sparse RCU warnings in cgroup. v2: Fixed unbalanced parenthsis and there's no need to use rcu_dereference_raw() when !CONFIG_PROVE_RCU. Both spotted by Li. Signed-off-by: Tejun Heo Reported-by: Fengguang Wu Acked-by: Li Zefan Cc: stable@vger.kernel.org diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 8e4fd5e..ad3555b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -635,22 +635,60 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state( return cgrp->subsys[subsys_id]; } -/* - * function to get the cgroup_subsys_state which allows for extra - * rcu_dereference_check() conditions, such as locks used during the - * cgroup_subsys::attach() methods. +/** + * task_css_set_check - obtain a task's css_set with extra access conditions + * @task: the task to obtain css_set for + * @__c: extra condition expression to be passed to rcu_dereference_check() + * + * A task's css_set is RCU protected, initialized and exited while holding + * task_lock(), and can only be modified while holding both cgroup_mutex + * and task_lock() while the task is alive. This macro verifies that the + * caller is inside proper critical section and returns @task's css_set. + * + * The caller can also specify additional allowed conditions via @__c, such + * as locks used during the cgroup_subsys::attach() methods. */ #ifdef CONFIG_PROVE_RCU extern struct mutex cgroup_mutex; -#define task_subsys_state_check(task, subsys_id, __c) \ - rcu_dereference_check((task)->cgroups->subsys[(subsys_id)], \ - lockdep_is_held(&(task)->alloc_lock) || \ - lockdep_is_held(&cgroup_mutex) || (__c)) +#define task_css_set_check(task, __c) \ + rcu_dereference_check((task)->cgroups, \ + lockdep_is_held(&(task)->alloc_lock) || \ + lockdep_is_held(&cgroup_mutex) || (__c)) #else -#define task_subsys_state_check(task, subsys_id, __c) \ - rcu_dereference((task)->cgroups->subsys[(subsys_id)]) +#define task_css_set_check(task, __c) \ + rcu_dereference((task)->cgroups) #endif +/** + * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds + * @task: the target task + * @subsys_id: the target subsystem ID + * @__c: extra condition expression to be passed to rcu_dereference_check() + * + * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The + * synchronization rules are the same as task_css_set_check(). + */ +#define task_subsys_state_check(task, subsys_id, __c) \ + task_css_set_check((task), (__c))->subsys[(subsys_id)] + +/** + * task_css_set - obtain a task's css_set + * @task: the task to obtain css_set for + * + * See task_css_set_check(). + */ +static inline struct css_set *task_css_set(struct task_struct *task) +{ + return task_css_set_check(task, false); +} + +/** + * task_subsys_state - obtain css for (task, subsys) + * @task: the target task + * @subsys_id: the target subsystem ID + * + * See task_subsys_state_check(). + */ static inline struct cgroup_subsys_state * task_subsys_state(struct task_struct *task, int subsys_id) { -- cgit v0.10.2 From a8ad805cfde00be8fe3b3dae8890996dbeb91e2c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 21 Jun 2013 15:52:04 -0700 Subject: cgroup: fix RCU accesses around task->cgroups There are several places in kernel/cgroup.c where task->cgroups is accessed and modified without going through proper RCU accessors. None is broken as they're all lock protected accesses; however, this still triggers sparse RCU address space warnings. * Consistently use task_css_set() for task->cgroups dereferencing. * Use RCU_INIT_POINTER() to clear task->cgroups to &init_css_set on exit. * Remove unnecessary rcu_dereference_raw() from cset->subsys[] dereference in cgroup_exit(). Signed-off-by: Tejun Heo Reported-by: Fengguang Wu Acked-by: Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2d3a132..ee9f0c1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -724,7 +724,7 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, * task can't change groups, so the only thing that can happen * is that it exits and its css is set back to init_css_set. */ - cset = task->cgroups; + cset = task_css_set(task); if (cset == &init_css_set) { res = &root->top_cgroup; } else { @@ -1971,7 +1971,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, * css_set to init_css_set and dropping the old one. */ WARN_ON_ONCE(tsk->flags & PF_EXITING); - old_cset = tsk->cgroups; + old_cset = task_css_set(tsk); task_lock(tsk); rcu_assign_pointer(tsk->cgroups, new_cset); @@ -2094,8 +2094,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, * we use find_css_set, which allocates a new one if necessary. */ for (i = 0; i < group_size; i++) { + struct css_set *old_cset; + tc = flex_array_get(group, i); - tc->cg = find_css_set(tc->task->cgroups, cgrp); + old_cset = task_css_set(tc->task); + tc->cg = find_css_set(old_cset, cgrp); if (!tc->cg) { retval = -ENOMEM; goto out_put_css_set_refs; @@ -3012,7 +3015,7 @@ static void cgroup_enable_task_cg_lists(void) * entry won't be deleted though the process has exited. */ if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) - list_add(&p->cg_list, &p->cgroups->tasks); + list_add(&p->cg_list, &task_css_set(p)->tasks); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); @@ -5061,8 +5064,8 @@ static const struct file_operations proc_cgroupstats_operations = { void cgroup_fork(struct task_struct *child) { task_lock(current); + get_css_set(task_css_set(current)); child->cgroups = current->cgroups; - get_css_set(child->cgroups); task_unlock(current); INIT_LIST_HEAD(&child->cg_list); } @@ -5097,7 +5100,7 @@ void cgroup_post_fork(struct task_struct *child) write_lock(&css_set_lock); task_lock(child); if (list_empty(&child->cg_list)) - list_add(&child->cg_list, &child->cgroups->tasks); + list_add(&child->cg_list, &task_css_set(child)->tasks); task_unlock(child); write_unlock(&css_set_lock); } @@ -5177,8 +5180,8 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) /* Reassign the task to the init_css_set. */ task_lock(tsk); - cset = tsk->cgroups; - tsk->cgroups = &init_css_set; + cset = task_css_set(tsk); + RCU_INIT_POINTER(tsk->cgroups, &init_css_set); if (run_callbacks && need_forkexit_callback) { /* @@ -5187,8 +5190,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) */ for_each_builtin_subsys(ss, i) { if (ss->exit) { - struct cgroup *old_cgrp = - rcu_dereference_raw(cset->subsys[i])->cgroup; + struct cgroup *old_cgrp = cset->subsys[i]->cgroup; struct cgroup *cgrp = task_cgroup(tsk, i); ss->exit(cgrp, old_cgrp, tsk); @@ -5555,7 +5557,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cgrp, u64 count; rcu_read_lock(); - count = atomic_read(¤t->cgroups->refcount); + count = atomic_read(&task_css_set(current)->refcount); rcu_read_unlock(); return count; } -- cgit v0.10.2 From a4ea1cc90604df08d471ae84eb9627319d10c844 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 21 Jun 2013 15:52:33 -0700 Subject: cgroup: always use RCU accessors for protected accesses kernel/cgroup.c still has places where a RCU pointer is set and accessed directly without going through RCU_INIT_POINTER() or rcu_dereference_protected(). They're all properly protected accesses so nothing is broken but it leads to spurious sparse RCU address space warnings. Substitute direct accesses with RCU_INIT_POINTER() and rcu_dereference_protected(). Note that %true is specified as the extra condition for all derference updates. This isn't ideal as all it does is suppressing warning without actually policing synchronization rules; however, most are scheduled to be removed pretty soon along with css_id itself, so no reason to be more elaborate. Combined with the previous changes, this removes all RCU related sparse warnings from cgroup. Signed-off-by: Tejun Heo Reported-by: Fengguang Wu Acked-by; Li Zefan diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ee9f0c1..4ed8677 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1427,7 +1427,7 @@ static void init_cgroup_root(struct cgroupfs_root *root) INIT_LIST_HEAD(&root->root_list); root->number_of_cgroups = 1; cgrp->root = root; - cgrp->name = &root_cgroup_name; + RCU_INIT_POINTER(cgrp->name, &root_cgroup_name); init_cgroup_housekeeping(cgrp); } @@ -2558,7 +2558,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, return ret; } - old_name = cgrp->name; + old_name = rcu_dereference_protected(cgrp->name, true); rcu_assign_pointer(cgrp->name, name); kfree_rcu(old_name, rcu_head); @@ -4177,13 +4177,15 @@ static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files, /* This cgroup is ready now */ for_each_root_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; + struct css_id *id = rcu_dereference_protected(css->id, true); + /* * Update id->css pointer and make this css visible from * CSS ID functions. This pointer will be dereferened * from RCU-read-side without locks. */ - if (css->id) - rcu_assign_pointer(css->id->css, css); + if (id) + rcu_assign_pointer(id->css, css); } return 0; @@ -4863,7 +4865,7 @@ int __init cgroup_init_early(void) css_set_count = 1; init_cgroup_root(&cgroup_dummy_root); cgroup_root_count = 1; - init_task.cgroups = &init_css_set; + RCU_INIT_POINTER(init_task.cgroups, &init_css_set); init_cgrp_cset_link.cset = &init_css_set; init_cgrp_cset_link.cgrp = cgroup_dummy_top; @@ -5380,7 +5382,8 @@ bool css_is_ancestor(struct cgroup_subsys_state *child, void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) { - struct css_id *id = css->id; + struct css_id *id = rcu_dereference_protected(css->id, true); + /* When this is called before css_id initialization, id can be NULL */ if (!id) return; @@ -5446,8 +5449,8 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss, return PTR_ERR(newid); newid->stack[0] = newid->id; - newid->css = rootcss; - rootcss->id = newid; + RCU_INIT_POINTER(newid->css, rootcss); + RCU_INIT_POINTER(rootcss->id, newid); return 0; } @@ -5461,7 +5464,7 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent, subsys_id = ss->subsys_id; parent_css = parent->subsys[subsys_id]; child_css = child->subsys[subsys_id]; - parent_id = parent_css->id; + parent_id = rcu_dereference_protected(parent_css->id, true); depth = parent_id->depth + 1; child_id = get_new_cssid(ss, depth); -- cgit v0.10.2 From e2bd416f6246d11be29999c177d2534943a5c2df Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 27 Jun 2013 19:37:23 -0700 Subject: cgroup: fix deadlock on cgroup_mutex via drop_parsed_module_refcounts() eb178d06332 ("cgroup: grab cgroup_mutex in drop_parsed_module_refcounts()") made drop_parsed_module_refcounts() grab cgroup_mutex to make lockdep assertion in for_each_subsys() happy. Unfortunately, cgroup_remount() calls the function while holding cgroup_mutex in its failure path leading to the following deadlock. # mount -t cgroup -o remount,memory,blkio cgroup blkio cgroup: option changes via remount are deprecated (pid=525 comm=mount) ============================================= [ INFO: possible recursive locking detected ] 3.10.0-rc4-work+ #1 Not tainted --------------------------------------------- mount/525 is trying to acquire lock: (cgroup_mutex){+.+.+.}, at: [] drop_parsed_module_refcounts+0x21/0xb0 but task is already holding lock: (cgroup_mutex){+.+.+.}, at: [] cgroup_remount+0x51/0x200 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(cgroup_mutex); lock(cgroup_mutex); *** DEADLOCK *** May be due to missing lock nesting notation 4 locks held by mount/525: #0: (&type->s_umount_key#30){+.+...}, at: [] do_mount+0x2bd/0xa30 #1: (&sb->s_type->i_mutex_key#9){+.+.+.}, at: [] cgroup_remount+0x43/0x200 #2: (cgroup_mutex){+.+.+.}, at: [] cgroup_remount+0x51/0x200 #3: (cgroup_root_mutex){+.+.+.}, at: [] cgroup_remount+0x5f/0x200 stack backtrace: CPU: 2 PID: 525 Comm: mount Not tainted 3.10.0-rc4-work+ #1 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 ffffffff829651f0 ffff88000ec2fc28 ffffffff81c24bb1 ffff88000ec2fce8 ffffffff810f420d 0000000000000006 0000000000000001 0000000000000056 ffff8800153b4640 ffff880000000000 ffffffff81c2e468 ffff8800153b4640 Call Trace: [] dump_stack+0x19/0x1b [] __lock_acquire+0x15dd/0x1e60 [] lock_acquire+0x9c/0x1f0 [] mutex_lock_nested+0x65/0x410 [] drop_parsed_module_refcounts+0x21/0xb0 [] cgroup_remount+0x1ae/0x200 [] do_remount_sb+0x82/0x190 [] do_mount+0x5f1/0xa30 [] SyS_mount+0x83/0xc0 [] system_call_fastpath+0x16/0x1b Fix it by moving the drop_parsed_module_refcounts() invocation outside cgroup_mutex. Signed-off-by: Tejun Heo diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 4ed8677..1b7b567 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1365,7 +1365,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) if (opts.flags != root->flags || (opts.name && strcmp(opts.name, root->name))) { ret = -EINVAL; - drop_parsed_module_refcounts(opts.subsys_mask); goto out_unlock; } @@ -1380,7 +1379,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) if (ret) { /* rebind_subsystems failed, re-populate the removed files */ cgroup_populate_dir(cgrp, false, removed_mask); - drop_parsed_module_refcounts(opts.subsys_mask); goto out_unlock; } @@ -1395,6 +1393,8 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); mutex_unlock(&cgrp->dentry->d_inode->i_mutex); + if (ret) + drop_parsed_module_refcounts(opts.subsys_mask); return ret; } -- cgit v0.10.2 From 0ce6cba35777cf96a54ce0d5856dc962566b8717 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 27 Jun 2013 19:37:26 -0700 Subject: cgroup: CGRP_ROOT_SUBSYS_BOUND should be ignored when comparing mount options 1672d04070 ("cgroup: fix cgroupfs_root early destruction path") introduced CGRP_ROOT_SUBSYS_BOUND which is used to mark completion of subsys binding on a new root; however, this broke remounts. cgroup_remount() doesn't allow changing root options via remount and CGRP_ROOT_SUBSYS_BOUND, which is set on all fully initialized roots, makes the function reject all remounts. Fix it by putting the options part in the lower 16 bits of root->flags and masking the comparions. While at it, make cgroup_remount() emit an error message explaining why it's rejecting a remount request, so that it's less of a mystery. Signed-off-by: Tejun Heo diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ad3555b..8db5397 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -276,7 +276,11 @@ enum { CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ - CGRP_ROOT_SUBSYS_BOUND = (1 << 3), /* subsystems finished binding */ + + /* mount options live below bit 16 */ + CGRP_ROOT_OPTION_MASK = (1 << 16) - 1, + + CGRP_ROOT_SUBSYS_BOUND = (1 << 16), /* subsystems finished binding */ }; /* diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1b7b567..5a2fcf5 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1362,8 +1362,11 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) removed_mask = root->subsys_mask & ~opts.subsys_mask; /* Don't allow flags or name to change at remount */ - if (opts.flags != root->flags || + if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) || (opts.name && strcmp(opts.name, root->name))) { + pr_err("cgroup: option or name mismatch, new: 0x%lx \"%s\", old: 0x%lx \"%s\"\n", + opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "", + root->flags & CGRP_ROOT_OPTION_MASK, root->name); ret = -EINVAL; goto out_unlock; } -- cgit v0.10.2 From c7ba8287cd11f2fc9e2feee9e1fac34b7293658f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 29 Jun 2013 14:06:10 -0700 Subject: cgroup: CGRP_ROOT_SUBSYS_BOUND should also be ignored when mounting an existing hierarchy 0ce6cba357 ("cgroup: CGRP_ROOT_SUBSYS_BOUND should be ignored when comparing mount options") only updated the remount path but CGRP_ROOT_SUBSYS_BOUND should also be ignored when comparing options while mounting an existing hierarchy. As option mismatch triggers a warning but doesn't fail the mount without sane_behavior, this only triggers a spurious warning message. Fix it by only comparing CGRP_ROOT_OPTION_MASK bits when comparing new and existing root options. Signed-off-by: Tejun Heo diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5a2fcf5..e5583d1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1703,7 +1703,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, */ cgroup_free_root(opts.new_root); - if (root->flags != opts.flags) { + if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) { if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); ret = -EINVAL; -- cgit v0.10.2