summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-12-10 23:42:42 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 01:41:05 (GMT)
commite8ea14cc6eadfe2ea63e9989e16e62625a2619f8 (patch)
tree8109a731e199928c2fd87d3633a56a3251d85245 /include
parent5ac8fb31ad2ebd6492d1c5e8f31846b532f03945 (diff)
downloadlinux-e8ea14cc6eadfe2ea63e9989e16e62625a2619f8.tar.xz
mm: memcontrol: take a css reference for each charged page
Charges currently pin the css indirectly by playing tricks during css_offline(): user pages stall the offlining process until all of them have been reparented, whereas kmemcg acquires a keep-alive reference if outstanding kernel pages are detected at that point. In preparation for removing all this complexity, make the pinning explicit and acquire a css references for every charged page. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/cgroup.h26
-rw-r--r--include/linux/percpu-refcount.h47
2 files changed, 64 insertions, 9 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 1d51968..9f96b25 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css)
}
/**
+ * css_get_many - obtain references on the specified css
+ * @css: target css
+ * @n: number of references to get
+ *
+ * The caller must already have a reference.
+ */
+static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get_many(&css->refcnt, n);
+}
+
+/**
* css_tryget - try to obtain a reference on the specified css
* @css: target css
*
@@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css)
percpu_ref_put(&css->refcnt);
}
+/**
+ * css_put_many - put css references
+ * @css: target css
+ * @n: number of references to put
+ *
+ * Put references obtained via css_get() and css_tryget_online().
+ */
+static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put_many(&css->refcnt, n);
+}
+
/* bits in struct cgroup flags field */
enum {
/* Control Group requires release notifications to userspace */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 51ce60c..530b249 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -147,28 +147,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
}
/**
- * percpu_ref_get - increment a percpu refcount
+ * percpu_ref_get_many - increment a percpu refcount
* @ref: percpu_ref to get
+ * @nr: number of references to get
*
- * Analagous to atomic_long_inc().
+ * Analogous to atomic_long_add().
*
* This function is safe to call as long as @ref is between init and exit.
*/
-static inline void percpu_ref_get(struct percpu_ref *ref)
+static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count))
- this_cpu_inc(*percpu_count);
+ this_cpu_add(*percpu_count, nr);
else
- atomic_long_inc(&ref->count);
+ atomic_long_add(nr, &ref->count);
rcu_read_unlock_sched();
}
/**
+ * percpu_ref_get - increment a percpu refcount
+ * @ref: percpu_ref to get
+ *
+ * Analagous to atomic_long_inc().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_get(struct percpu_ref *ref)
+{
+ percpu_ref_get_many(ref, 1);
+}
+
+/**
* percpu_ref_tryget - try to increment a percpu refcount
* @ref: percpu_ref to try-get
*
@@ -231,29 +245,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
}
/**
- * percpu_ref_put - decrement a percpu refcount
+ * percpu_ref_put_many - decrement a percpu refcount
* @ref: percpu_ref to put
+ * @nr: number of references to put
*
* Decrement the refcount, and if 0, call the release function (which was passed
* to percpu_ref_init())
*
* This function is safe to call as long as @ref is between init and exit.
*/
-static inline void percpu_ref_put(struct percpu_ref *ref)
+static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count))
- this_cpu_dec(*percpu_count);
- else if (unlikely(atomic_long_dec_and_test(&ref->count)))
+ this_cpu_sub(*percpu_count, nr);
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
ref->release(ref);
rcu_read_unlock_sched();
}
/**
+ * percpu_ref_put - decrement a percpu refcount
+ * @ref: percpu_ref to put
+ *
+ * Decrement the refcount, and if 0, call the release function (which was passed
+ * to percpu_ref_init())
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_put(struct percpu_ref *ref)
+{
+ percpu_ref_put_many(ref, 1);
+}
+
+/**
* percpu_ref_is_zero - test whether a percpu refcount reached zero
* @ref: percpu_ref to test
*