diff options
| author | Tejun Heo <tj@kernel.org> | 2014-05-09 15:11:53 -0400 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2014-05-09 15:42:35 -0400 |
| commit | 4fb6e25049cb6fa0accc7f1b7c192b952fad7ac8 (patch) | |
| tree | 6be13f79be6f0bb6dc51ca10431c9737af8aee92 | |
| parent | 2070d50e1cbe3d7f157cbf8e63279c893f375d7f (diff) | |
percpu-refcount: implement percpu_ref_tryget()
Implement percpu_ref_tryget() which fails if the refcnt already
reached zero. Note that this is different from the recently renamed
percpu_ref_tryget_live() which fails if the refcnt has been killed and
is draining the remaining references. percpu_ref_tryget() succeeds on
a killed refcnt as long as its current refcnt is above zero.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Kent Overstreet <kmo@daterainc.com>
| -rw-r--r-- | include/linux/percpu-refcount.h | 32 |
1 files changed, 32 insertions, 0 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index e22d15597cc3..dba35c411e8c 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
| @@ -118,6 +118,36 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | /** | 120 | /** |
| 121 | * percpu_ref_tryget - try to increment a percpu refcount | ||
| 122 | * @ref: percpu_ref to try-get | ||
| 123 | * | ||
| 124 | * Increment a percpu refcount unless its count already reached zero. | ||
| 125 | * Returns %true on success; %false on failure. | ||
| 126 | * | ||
| 127 | * The caller is responsible for ensuring that @ref stays accessible. | ||
| 128 | */ | ||
| 129 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | ||
| 130 | { | ||
| 131 | unsigned __percpu *pcpu_count; | ||
| 132 | int ret = false; | ||
| 133 | |||
| 134 | rcu_read_lock_sched(); | ||
| 135 | |||
| 136 | pcpu_count = ACCESS_ONCE(ref->pcpu_count); | ||
| 137 | |||
| 138 | if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { | ||
| 139 | __this_cpu_inc(*pcpu_count); | ||
| 140 | ret = true; | ||
| 141 | } else { | ||
| 142 | ret = atomic_inc_not_zero(&ref->count); | ||
| 143 | } | ||
| 144 | |||
| 145 | rcu_read_unlock_sched(); | ||
| 146 | |||
| 147 | return ret; | ||
| 148 | } | ||
| 149 | |||
| 150 | /** | ||
| 121 | * percpu_ref_tryget_live - try to increment a live percpu refcount | 151 | * percpu_ref_tryget_live - try to increment a live percpu refcount |
| 122 | * @ref: percpu_ref to try-get | 152 | * @ref: percpu_ref to try-get |
| 123 | * | 153 | * |
| @@ -128,6 +158,8 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
| 128 | * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be | 158 | * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be |
| 129 | * used. After the confirm_kill callback is invoked, it's guaranteed that | 159 | * used. After the confirm_kill callback is invoked, it's guaranteed that |
| 130 | * no new reference will be given out by percpu_ref_tryget(). | 160 | * no new reference will be given out by percpu_ref_tryget(). |
| 161 | * | ||
| 162 | * The caller is responsible for ensuring that @ref stays accessible. | ||
| 131 | */ | 163 | */ |
| 132 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | 164 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
| 133 | { | 165 | { |
