diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:48 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:48 -0400 |
commit | 6251f9976af7656b6970a8820153f356430f5de2 (patch) | |
tree | aa2d83279dfb125b9bbd88083a32bfa5e99bf3da /lib | |
parent | a2237370194484ee6aeeff04b617e4b14d178966 (diff) |
percpu_ref: minor code and comment updates
* Some comments became stale. Updated.
* percpu_ref_tryget() unnecessarily initializes @ret. Removed.
* A blank line removed from percpu_ref_kill_rcu().
* Explicit function name in a WARN format string replaced with __func__.
* WARN_ON() in percpu_ref_reinit() converted to WARN_ON_ONCE().
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/percpu-refcount.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 070dab5e7d77..8ef3f5c20df6 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -108,7 +108,6 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
108 | * reaching 0 before we add the percpu counts. But doing it at the same | 108 | * reaching 0 before we add the percpu counts. But doing it at the same |
109 | * time is equivalent and saves us atomic operations: | 109 | * time is equivalent and saves us atomic operations: |
110 | */ | 110 | */ |
111 | |||
112 | atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count); | 111 | atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count); |
113 | 112 | ||
114 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, | 113 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, |
@@ -120,8 +119,8 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
120 | ref->confirm_kill(ref); | 119 | ref->confirm_kill(ref); |
121 | 120 | ||
122 | /* | 121 | /* |
123 | * Now we're in single atomic_t mode with a consistent refcount, so it's | 122 | * Now we're in single atomic_long_t mode with a consistent |
124 | * safe to drop our initial ref: | 123 | * refcount, so it's safe to drop our initial ref: |
125 | */ | 124 | */ |
126 | percpu_ref_put(ref); | 125 | percpu_ref_put(ref); |
127 | } | 126 | } |
@@ -134,8 +133,8 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
134 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if | 133 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if |
135 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be | 134 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be |
136 | * called after @ref is seen as dead from all CPUs - all further | 135 | * called after @ref is seen as dead from all CPUs - all further |
137 | * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget() | 136 | * invocations of percpu_ref_tryget_live() will fail. See |
138 | * for more details. | 137 | * percpu_ref_tryget_live() for more details. |
139 | * | 138 | * |
140 | * Due to the way percpu_ref is implemented, @confirm_kill will be called | 139 | * Due to the way percpu_ref is implemented, @confirm_kill will be called |
141 | * after at least one full RCU grace period has passed but this is an | 140 | * after at least one full RCU grace period has passed but this is an |
@@ -145,8 +144,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | |||
145 | percpu_ref_func_t *confirm_kill) | 144 | percpu_ref_func_t *confirm_kill) |
146 | { | 145 | { |
147 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, | 146 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, |
148 | "percpu_ref_kill() called more than once on %pf!", | 147 | "%s called more than once on %pf!", __func__, ref->release); |
149 | ref->release); | ||
150 | 148 | ||
151 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; | 149 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; |
152 | ref->confirm_kill = confirm_kill; | 150 | ref->confirm_kill = confirm_kill; |
@@ -172,7 +170,7 @@ void percpu_ref_reinit(struct percpu_ref *ref) | |||
172 | int cpu; | 170 | int cpu; |
173 | 171 | ||
174 | BUG_ON(!pcpu_count); | 172 | BUG_ON(!pcpu_count); |
175 | WARN_ON(!percpu_ref_is_zero(ref)); | 173 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); |
176 | 174 | ||
177 | atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); | 175 | atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); |
178 | 176 | ||