2013-06-01 05:26:45 +07:00
|
|
|
/*
|
|
|
|
* Percpu refcounts:
|
|
|
|
* (C) 2012 Google, Inc.
|
|
|
|
* Author: Kent Overstreet <koverstreet@google.com>
|
|
|
|
*
|
|
|
|
* This implements a refcount with similar semantics to atomic_t - atomic_inc(),
|
|
|
|
* atomic_dec_and_test() - but percpu.
|
|
|
|
*
|
|
|
|
* There's one important difference between percpu refs and normal atomic_t
|
|
|
|
* refcounts; you have to keep track of your initial refcount, and then when you
|
|
|
|
* start shutting down you call percpu_ref_kill() _before_ dropping the initial
|
|
|
|
* refcount.
|
|
|
|
*
|
|
|
|
* The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
|
|
|
|
* than an atomic_t - this is because of the way shutdown works, see
|
2014-09-25 00:31:48 +07:00
|
|
|
* percpu_ref_kill()/PERCPU_COUNT_BIAS.
|
2013-06-01 05:26:45 +07:00
|
|
|
*
|
|
|
|
* Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
|
|
|
|
* refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
|
|
|
|
* puts the ref back in single atomic_t mode, collecting the per cpu refs and
|
|
|
|
* issuing the appropriate barriers, and then marks the ref as shutting down so
|
|
|
|
* that percpu_ref_put() will check for the ref hitting 0. After it returns,
|
|
|
|
* it's safe to drop the initial ref.
|
|
|
|
*
|
|
|
|
* USAGE:
|
|
|
|
*
|
|
|
|
* See fs/aio.c for some example usage; it's used there for struct kioctx, which
|
|
|
|
* is created when userspaces calls io_setup(), and destroyed when userspace
|
|
|
|
* calls io_destroy() or the process exits.
|
|
|
|
*
|
|
|
|
* In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
|
|
|
|
* calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove
|
|
|
|
* the kioctx from the proccess's list of kioctxs - after that, there can't be
|
|
|
|
* any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
|
|
|
|
* the initial ref with percpu_ref_put().
|
|
|
|
*
|
|
|
|
* Code that does a two stage shutdown like this often needs some kind of
|
|
|
|
* explicit synchronization to ensure the initial refcount can only be dropped
|
|
|
|
* once - percpu_ref_kill() does this for you, it returns true once and false if
|
|
|
|
* someone else already called it. The aio code uses it this way, but it's not
|
|
|
|
* necessary if the code has some other mechanism to synchronize teardown.
|
|
|
|
* around.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_PERCPU_REFCOUNT_H
|
|
|
|
#define _LINUX_PERCPU_REFCOUNT_H
|
|
|
|
|
|
|
|
#include <linux/atomic.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/rcupdate.h>
|
2014-09-08 07:51:30 +07:00
|
|
|
#include <linux/gfp.h>
|
2013-06-01 05:26:45 +07:00
|
|
|
|
|
|
|
struct percpu_ref;
|
2013-06-13 10:43:06 +07:00
|
|
|
typedef void (percpu_ref_func_t)(struct percpu_ref *);
|
2013-06-01 05:26:45 +07:00
|
|
|
|
2014-09-25 00:31:48 +07:00
|
|
|
/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
|
|
|
|
enum {
|
|
|
|
__PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
|
2014-09-25 00:31:49 +07:00
|
|
|
__PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
|
|
|
|
__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
|
|
|
|
|
|
|
|
__PERCPU_REF_FLAG_BITS = 2,
|
2014-09-25 00:31:48 +07:00
|
|
|
};
|
|
|
|
|
2013-06-01 05:26:45 +07:00
|
|
|
struct percpu_ref {
|
2014-09-20 12:27:25 +07:00
|
|
|
atomic_long_t count;
|
2013-06-01 05:26:45 +07:00
|
|
|
/*
|
|
|
|
* The low bit of the pointer indicates whether the ref is in percpu
|
2014-06-28 19:10:14 +07:00
|
|
|
* mode; if set, then get/put will manipulate the atomic_t.
|
2013-06-01 05:26:45 +07:00
|
|
|
*/
|
2014-09-25 00:31:48 +07:00
|
|
|
unsigned long percpu_count_ptr;
|
2013-06-13 10:43:06 +07:00
|
|
|
percpu_ref_func_t *release;
|
2014-09-25 00:31:48 +07:00
|
|
|
percpu_ref_func_t *confirm_switch;
|
2013-06-01 05:26:45 +07:00
|
|
|
struct rcu_head rcu;
|
|
|
|
};
|
|
|
|
|
2013-06-13 10:52:01 +07:00
|
|
|
int __must_check percpu_ref_init(struct percpu_ref *ref,
|
2014-09-08 07:51:30 +07:00
|
|
|
percpu_ref_func_t *release, gfp_t gfp);
|
2014-06-28 19:10:14 +07:00
|
|
|
void percpu_ref_exit(struct percpu_ref *ref);
|
2013-06-14 09:23:53 +07:00
|
|
|
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
|
|
|
percpu_ref_func_t *confirm_kill);
|
2014-09-25 00:31:48 +07:00
|
|
|
void percpu_ref_reinit(struct percpu_ref *ref);
|
2013-06-14 09:23:53 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* percpu_ref_kill - drop the initial ref
|
|
|
|
* @ref: percpu_ref to kill
|
|
|
|
*
|
|
|
|
* Must be used to drop the initial ref on a percpu refcount; must be called
|
|
|
|
* precisely once before shutdown.
|
|
|
|
*
|
|
|
|
* Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
|
|
|
|
* percpu counters and dropping the initial ref.
|
|
|
|
*/
|
|
|
|
static inline void percpu_ref_kill(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
return percpu_ref_kill_and_confirm(ref, NULL);
|
|
|
|
}
|
2013-06-01 05:26:45 +07:00
|
|
|
|
2014-06-28 19:10:13 +07:00
|
|
|
/*
|
|
|
|
* Internal helper. Don't use outside percpu-refcount proper. The
|
|
|
|
* function doesn't return the pointer and let the caller test it for NULL
|
|
|
|
* because doing so forces the compiler to generate two conditional
|
2014-09-25 00:31:48 +07:00
|
|
|
* branches as it can't assume that @ref->percpu_count is not NULL.
|
2014-06-28 19:10:13 +07:00
|
|
|
*/
|
2014-09-25 00:31:48 +07:00
|
|
|
static inline bool __ref_is_percpu(struct percpu_ref *ref,
|
|
|
|
unsigned long __percpu **percpu_countp)
|
2014-06-28 19:10:13 +07:00
|
|
|
{
|
2014-09-25 00:31:48 +07:00
|
|
|
unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
|
2014-06-28 19:10:13 +07:00
|
|
|
|
2014-06-28 19:10:14 +07:00
|
|
|
/* paired with smp_store_release() in percpu_ref_reinit() */
|
|
|
|
smp_read_barrier_depends();
|
|
|
|
|
2014-09-25 00:31:49 +07:00
|
|
|
if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
|
2014-06-28 19:10:13 +07:00
|
|
|
return false;
|
|
|
|
|
2014-09-25 00:31:48 +07:00
|
|
|
*percpu_countp = (unsigned long __percpu *)percpu_ptr;
|
2014-06-28 19:10:13 +07:00
|
|
|
return true;
|
|
|
|
}
|
2013-06-01 05:26:45 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* percpu_ref_get - increment a percpu refcount
|
2013-06-13 10:43:06 +07:00
|
|
|
* @ref: percpu_ref to get
|
2013-06-01 05:26:45 +07:00
|
|
|
*
|
2014-09-25 00:31:48 +07:00
|
|
|
* Analagous to atomic_long_inc().
|
|
|
|
*
|
|
|
|
* This function is safe to call as long as @ref is between init and exit.
|
|
|
|
*/
|
2013-06-01 05:26:45 +07:00
|
|
|
static inline void percpu_ref_get(struct percpu_ref *ref)
|
|
|
|
{
|
2014-09-25 00:31:48 +07:00
|
|
|
unsigned long __percpu *percpu_count;
|
2013-06-01 05:26:45 +07:00
|
|
|
|
2013-06-17 06:12:26 +07:00
|
|
|
rcu_read_lock_sched();
|
2013-06-01 05:26:45 +07:00
|
|
|
|
2014-09-25 00:31:48 +07:00
|
|
|
if (__ref_is_percpu(ref, &percpu_count))
|
2014-09-25 00:31:48 +07:00
|
|
|
this_cpu_inc(*percpu_count);
|
2013-06-01 05:26:45 +07:00
|
|
|
else
|
2014-09-20 12:27:25 +07:00
|
|
|
atomic_long_inc(&ref->count);
|
2013-06-01 05:26:45 +07:00
|
|
|
|
2013-06-17 06:12:26 +07:00
|
|
|
rcu_read_unlock_sched();
|
2013-06-01 05:26:45 +07:00
|
|
|
}
|
|
|
|
|
2014-05-10 02:11:53 +07:00
|
|
|
/**
|
|
|
|
* percpu_ref_tryget - try to increment a percpu refcount
|
|
|
|
* @ref: percpu_ref to try-get
|
|
|
|
*
|
|
|
|
* Increment a percpu refcount unless its count already reached zero.
|
|
|
|
* Returns %true on success; %false on failure.
|
|
|
|
*
|
2014-09-25 00:31:48 +07:00
|
|
|
* This function is safe to call as long as @ref is between init and exit.
|
2014-05-10 02:11:53 +07:00
|
|
|
*/
|
|
|
|
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
|
|
|
|
{
|
2014-09-25 00:31:48 +07:00
|
|
|
unsigned long __percpu *percpu_count;
|
2014-09-25 00:31:48 +07:00
|
|
|
int ret;
|
2014-05-10 02:11:53 +07:00
|
|
|
|
|
|
|
rcu_read_lock_sched();
|
|
|
|
|
2014-09-25 00:31:48 +07:00
|
|
|
if (__ref_is_percpu(ref, &percpu_count)) {
|
2014-09-25 00:31:48 +07:00
|
|
|
this_cpu_inc(*percpu_count);
|
2014-05-10 02:11:53 +07:00
|
|
|
ret = true;
|
|
|
|
} else {
|
2014-09-20 12:27:25 +07:00
|
|
|
ret = atomic_long_inc_not_zero(&ref->count);
|
2014-05-10 02:11:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock_sched();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-14 09:23:53 +07:00
|
|
|
/**
|
2014-05-10 02:11:53 +07:00
|
|
|
* percpu_ref_tryget_live - try to increment a live percpu refcount
|
2013-06-14 09:23:53 +07:00
|
|
|
* @ref: percpu_ref to try-get
|
|
|
|
*
|
|
|
|
* Increment a percpu refcount unless it has already been killed. Returns
|
|
|
|
* %true on success; %false on failure.
|
|
|
|
*
|
2014-09-25 00:31:48 +07:00
|
|
|
* Completion of percpu_ref_kill() in itself doesn't guarantee that this
|
|
|
|
* function will fail. For such guarantee, percpu_ref_kill_and_confirm()
|
|
|
|
* should be used. After the confirm_kill callback is invoked, it's
|
|
|
|
* guaranteed that no new reference will be given out by
|
|
|
|
* percpu_ref_tryget_live().
|
2014-05-10 02:11:53 +07:00
|
|
|
*
|
2014-09-25 00:31:48 +07:00
|
|
|
* This function is safe to call as long as @ref is between init and exit.
|
2013-06-14 09:23:53 +07:00
|
|
|
*/
|
2014-05-10 02:11:53 +07:00
|
|
|
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
|
2013-06-14 09:23:53 +07:00
|
|
|
{
|
2014-09-25 00:31:48 +07:00
|
|
|
unsigned long __percpu *percpu_count;
|
2013-06-14 09:23:53 +07:00
|
|
|
int ret = false;
|
|
|
|
|
2013-06-17 06:12:26 +07:00
|
|
|
rcu_read_lock_sched();
|
2013-06-14 09:23:53 +07:00
|
|
|
|
2014-09-25 00:31:48 +07:00
|
|
|
if (__ref_is_percpu(ref, &percpu_count)) {
|
2014-09-25 00:31:48 +07:00
|
|
|
this_cpu_inc(*percpu_count);
|
2013-06-14 09:23:53 +07:00
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
2013-06-17 06:12:26 +07:00
|
|
|
rcu_read_unlock_sched();
|
2013-06-14 09:23:53 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-01 05:26:45 +07:00
|
|
|
/**
|
|
|
|
* percpu_ref_put - decrement a percpu refcount
|
2013-06-13 10:43:06 +07:00
|
|
|
* @ref: percpu_ref to put
|
2013-06-01 05:26:45 +07:00
|
|
|
*
|
|
|
|
* Decrement the refcount, and if 0, call the release function (which was passed
|
|
|
|
* to percpu_ref_init())
|
2014-09-25 00:31:48 +07:00
|
|
|
*
|
|
|
|
* This function is safe to call as long as @ref is between init and exit.
|
2013-06-01 05:26:45 +07:00
|
|
|
*/
|
|
|
|
static inline void percpu_ref_put(struct percpu_ref *ref)
|
|
|
|
{
|
2014-09-25 00:31:48 +07:00
|
|
|
unsigned long __percpu *percpu_count;
|
2013-06-01 05:26:45 +07:00
|
|
|
|
2013-06-17 06:12:26 +07:00
|
|
|
rcu_read_lock_sched();
|
2013-06-01 05:26:45 +07:00
|
|
|
|
2014-09-25 00:31:48 +07:00
|
|
|
if (__ref_is_percpu(ref, &percpu_count))
|
2014-09-25 00:31:48 +07:00
|
|
|
this_cpu_dec(*percpu_count);
|
2014-09-20 12:27:25 +07:00
|
|
|
else if (unlikely(atomic_long_dec_and_test(&ref->count)))
|
2013-06-01 05:26:45 +07:00
|
|
|
ref->release(ref);
|
|
|
|
|
2013-06-17 06:12:26 +07:00
|
|
|
rcu_read_unlock_sched();
|
2013-06-01 05:26:45 +07:00
|
|
|
}
|
|
|
|
|
2014-06-28 19:10:14 +07:00
|
|
|
/**
|
|
|
|
* percpu_ref_is_zero - test whether a percpu refcount reached zero
|
|
|
|
* @ref: percpu_ref to test
|
|
|
|
*
|
|
|
|
* Returns %true if @ref reached zero.
|
2014-09-25 00:31:48 +07:00
|
|
|
*
|
|
|
|
* This function is safe to call as long as @ref is between init and exit.
|
2014-06-28 19:10:14 +07:00
|
|
|
*/
|
|
|
|
static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
|
|
|
|
{
|
2014-09-25 00:31:48 +07:00
|
|
|
unsigned long __percpu *percpu_count;
|
2014-06-28 19:10:14 +07:00
|
|
|
|
2014-09-25 00:31:48 +07:00
|
|
|
if (__ref_is_percpu(ref, &percpu_count))
|
2014-06-28 19:10:14 +07:00
|
|
|
return false;
|
2014-09-20 12:27:25 +07:00
|
|
|
return !atomic_long_read(&ref->count);
|
2014-06-28 19:10:14 +07:00
|
|
|
}
|
|
|
|
|
2013-06-01 05:26:45 +07:00
|
|
|
#endif
|