diff options
author | Jakub Kicinski <jakub.kicinski@netronome.com> | 2019-03-29 20:08:53 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-04-29 02:29:21 -0400 |
commit | b92e793bbe4a1c49dbf78d8d526561e7a7dd568a (patch) | |
tree | 981f93f26cadc142d2b2280f1d9d8f60a9890e1c /kernel/jump_label.c | |
parent | ad282a8117d5048398f506f20b092c14b3b3c43f (diff) |
locking/static_key: Factor out the fast path of static_key_slow_dec()
static_key_slow_dec() checks if the atomic enable count is larger
than 1, and if so there decrements it before taking the jump_label_lock.
Move this logic into a helper for reuse in rate limitted keys.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Simon Horman <simon.horman@netronome.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: alexei.starovoitov@gmail.com
Cc: ard.biesheuvel@linaro.org
Cc: oss-drivers@netronome.com
Cc: yamada.masahiro@socionext.com
Link: https://lkml.kernel.org/r/20190330000854.30142-3-jakub.kicinski@netronome.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/jump_label.c')
-rw-r--r-- | kernel/jump_label.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 73bbbaddbd9c..02c3d11264dd 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -202,13 +202,13 @@ void static_key_disable(struct static_key *key) | |||
202 | } | 202 | } |
203 | EXPORT_SYMBOL_GPL(static_key_disable); | 203 | EXPORT_SYMBOL_GPL(static_key_disable); |
204 | 204 | ||
205 | static void __static_key_slow_dec_cpuslocked(struct static_key *key, | 205 | static bool static_key_slow_try_dec(struct static_key *key) |
206 | unsigned long rate_limit, | ||
207 | struct delayed_work *work) | ||
208 | { | 206 | { |
209 | int val; | 207 | int val; |
210 | 208 | ||
211 | lockdep_assert_cpus_held(); | 209 | val = atomic_fetch_add_unless(&key->enabled, -1, 1); |
210 | if (val == 1) | ||
211 | return false; | ||
212 | 212 | ||
213 | /* | 213 | /* |
214 | * The negative count check is valid even when a negative | 214 | * The negative count check is valid even when a negative |
@@ -217,11 +217,18 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key, | |||
217 | * returns is unbalanced, because all other static_key_slow_inc() | 217 | * returns is unbalanced, because all other static_key_slow_inc() |
218 | * instances block while the update is in progress. | 218 | * instances block while the update is in progress. |
219 | */ | 219 | */ |
220 | val = atomic_fetch_add_unless(&key->enabled, -1, 1); | 220 | WARN(val < 0, "jump label: negative count!\n"); |
221 | if (val != 1) { | 221 | return true; |
222 | WARN(val < 0, "jump label: negative count!\n"); | 222 | } |
223 | |||
224 | static void __static_key_slow_dec_cpuslocked(struct static_key *key, | ||
225 | unsigned long rate_limit, | ||
226 | struct delayed_work *work) | ||
227 | { | ||
228 | lockdep_assert_cpus_held(); | ||
229 | |||
230 | if (static_key_slow_try_dec(key)) | ||
223 | return; | 231 | return; |
224 | } | ||
225 | 232 | ||
226 | jump_label_lock(); | 233 | jump_label_lock(); |
227 | if (atomic_dec_and_test(&key->enabled)) { | 234 | if (atomic_dec_and_test(&key->enabled)) { |