diff options
Diffstat (limited to 'kernel/jump_label.c')
-rw-r--r-- | kernel/jump_label.c | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 05254eeb4b4e..4b353e0be121 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key); | |||
58 | 58 | ||
59 | void static_key_slow_inc(struct static_key *key) | 59 | void static_key_slow_inc(struct static_key *key) |
60 | { | 60 | { |
61 | int v, v1; | ||
62 | |||
61 | STATIC_KEY_CHECK_USE(); | 63 | STATIC_KEY_CHECK_USE(); |
62 | if (atomic_inc_not_zero(&key->enabled)) | 64 | |
63 | return; | 65 | /* |
66 | * Careful if we get concurrent static_key_slow_inc() calls; | ||
67 | * later calls must wait for the first one to _finish_ the | ||
68 | * jump_label_update() process. At the same time, however, | ||
69 | * the jump_label_update() call below wants to see | ||
70 | * static_key_enabled(&key) for jumps to be updated properly. | ||
71 | * | ||
72 | * So give a special meaning to negative key->enabled: it sends | ||
73 | * static_key_slow_inc() down the slow path, and it is non-zero | ||
74 | * so it counts as "enabled" in jump_label_update(). Note that | ||
75 | * atomic_inc_unless_negative() checks >= 0, so roll our own. | ||
76 | */ | ||
77 | for (v = atomic_read(&key->enabled); v > 0; v = v1) { | ||
78 | v1 = atomic_cmpxchg(&key->enabled, v, v + 1); | ||
79 | if (likely(v1 == v)) | ||
80 | return; | ||
81 | } | ||
64 | 82 | ||
65 | jump_label_lock(); | 83 | jump_label_lock(); |
66 | if (atomic_inc_return(&key->enabled) == 1) | 84 | if (atomic_read(&key->enabled) == 0) { |
85 | atomic_set(&key->enabled, -1); | ||
67 | jump_label_update(key); | 86 | jump_label_update(key); |
87 | atomic_set(&key->enabled, 1); | ||
88 | } else { | ||
89 | atomic_inc(&key->enabled); | ||
90 | } | ||
68 | jump_label_unlock(); | 91 | jump_label_unlock(); |
69 | } | 92 | } |
70 | EXPORT_SYMBOL_GPL(static_key_slow_inc); | 93 | EXPORT_SYMBOL_GPL(static_key_slow_inc); |
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); | |||
72 | static void __static_key_slow_dec(struct static_key *key, | 95 | static void __static_key_slow_dec(struct static_key *key, |
73 | unsigned long rate_limit, struct delayed_work *work) | 96 | unsigned long rate_limit, struct delayed_work *work) |
74 | { | 97 | { |
98 | /* | ||
99 | * The negative count check is valid even when a negative | ||
100 | * key->enabled is in use by static_key_slow_inc(); a | ||
101 | * __static_key_slow_dec() before the first static_key_slow_inc() | ||
102 | * returns is unbalanced, because all other static_key_slow_inc() | ||
103 | * instances block while the update is in progress. | ||
104 | */ | ||
75 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { | 105 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { |
76 | WARN(atomic_read(&key->enabled) < 0, | 106 | WARN(atomic_read(&key->enabled) < 0, |
77 | "jump label: negative count!\n"); | 107 | "jump label: negative count!\n"); |