diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-25 09:14:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-25 09:14:44 -0400 |
commit | e3b22bc3d705b4a265247a9e2a1dea9ecf01a0cd (patch) | |
tree | 834f54242f3296b5874110bbe20bde4b04f6e0fc | |
parent | 2de23071f5f7eb80a6cccf45438b271da81246af (diff) | |
parent | 4c5ea0a9cd02d6aa8adc86e100b2a4cff8d614ff (diff) |
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fix from Thomas Gleixner:
"A single fix to address a race in the static key logic"
* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/static_key: Fix concurrent static_key_slow_inc()
-rw-r--r-- | include/linux/jump_label.h | 16 | ||||
-rw-r--r-- | kernel/jump_label.c | 36 |
2 files changed, 46 insertions, 6 deletions
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 0536524bb9eb..68904469fba1 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -117,13 +117,18 @@ struct module; | |||
117 | 117 | ||
118 | #include <linux/atomic.h> | 118 | #include <linux/atomic.h> |
119 | 119 | ||
120 | #ifdef HAVE_JUMP_LABEL | ||
121 | |||
120 | static inline int static_key_count(struct static_key *key) | 122 | static inline int static_key_count(struct static_key *key) |
121 | { | 123 | { |
122 | return atomic_read(&key->enabled); | 124 | /* |
125 | * -1 means the first static_key_slow_inc() is in progress. | ||
126 | * static_key_enabled() must return true, so return 1 here. | ||
127 | */ | ||
128 | int n = atomic_read(&key->enabled); | ||
129 | return n >= 0 ? n : 1; | ||
123 | } | 130 | } |
124 | 131 | ||
125 | #ifdef HAVE_JUMP_LABEL | ||
126 | |||
127 | #define JUMP_TYPE_FALSE 0UL | 132 | #define JUMP_TYPE_FALSE 0UL |
128 | #define JUMP_TYPE_TRUE 1UL | 133 | #define JUMP_TYPE_TRUE 1UL |
129 | #define JUMP_TYPE_MASK 1UL | 134 | #define JUMP_TYPE_MASK 1UL |
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod); | |||
162 | 167 | ||
163 | #else /* !HAVE_JUMP_LABEL */ | 168 | #else /* !HAVE_JUMP_LABEL */ |
164 | 169 | ||
170 | static inline int static_key_count(struct static_key *key) | ||
171 | { | ||
172 | return atomic_read(&key->enabled); | ||
173 | } | ||
174 | |||
165 | static __always_inline void jump_label_init(void) | 175 | static __always_inline void jump_label_init(void) |
166 | { | 176 | { |
167 | static_key_initialized = true; | 177 | static_key_initialized = true; |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 05254eeb4b4e..4b353e0be121 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key); | |||
58 | 58 | ||
59 | void static_key_slow_inc(struct static_key *key) | 59 | void static_key_slow_inc(struct static_key *key) |
60 | { | 60 | { |
61 | int v, v1; | ||
62 | |||
61 | STATIC_KEY_CHECK_USE(); | 63 | STATIC_KEY_CHECK_USE(); |
62 | if (atomic_inc_not_zero(&key->enabled)) | 64 | |
63 | return; | 65 | /* |
66 | * Careful if we get concurrent static_key_slow_inc() calls; | ||
67 | * later calls must wait for the first one to _finish_ the | ||
68 | * jump_label_update() process. At the same time, however, | ||
69 | * the jump_label_update() call below wants to see | ||
70 | * static_key_enabled(&key) for jumps to be updated properly. | ||
71 | * | ||
72 | * So give a special meaning to negative key->enabled: it sends | ||
73 | * static_key_slow_inc() down the slow path, and it is non-zero | ||
74 | * so it counts as "enabled" in jump_label_update(). Note that | ||
75 | * atomic_inc_unless_negative() checks >= 0, so roll our own. | ||
76 | */ | ||
77 | for (v = atomic_read(&key->enabled); v > 0; v = v1) { | ||
78 | v1 = atomic_cmpxchg(&key->enabled, v, v + 1); | ||
79 | if (likely(v1 == v)) | ||
80 | return; | ||
81 | } | ||
64 | 82 | ||
65 | jump_label_lock(); | 83 | jump_label_lock(); |
66 | if (atomic_inc_return(&key->enabled) == 1) | 84 | if (atomic_read(&key->enabled) == 0) { |
85 | atomic_set(&key->enabled, -1); | ||
67 | jump_label_update(key); | 86 | jump_label_update(key); |
87 | atomic_set(&key->enabled, 1); | ||
88 | } else { | ||
89 | atomic_inc(&key->enabled); | ||
90 | } | ||
68 | jump_label_unlock(); | 91 | jump_label_unlock(); |
69 | } | 92 | } |
70 | EXPORT_SYMBOL_GPL(static_key_slow_inc); | 93 | EXPORT_SYMBOL_GPL(static_key_slow_inc); |
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); | |||
72 | static void __static_key_slow_dec(struct static_key *key, | 95 | static void __static_key_slow_dec(struct static_key *key, |
73 | unsigned long rate_limit, struct delayed_work *work) | 96 | unsigned long rate_limit, struct delayed_work *work) |
74 | { | 97 | { |
98 | /* | ||
99 | * The negative count check is valid even when a negative | ||
100 | * key->enabled is in use by static_key_slow_inc(); a | ||
101 | * __static_key_slow_dec() before the first static_key_slow_inc() | ||
102 | * returns is unbalanced, because all other static_key_slow_inc() | ||
103 | * instances block while the update is in progress. | ||
104 | */ | ||
75 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { | 105 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { |
76 | WARN(atomic_read(&key->enabled) < 0, | 106 | WARN(atomic_read(&key->enabled) < 0, |
77 | "jump label: negative count!\n"); | 107 | "jump label: negative count!\n"); |