diff options
author | Ingo Molnar <mingo@elte.hu> | 2012-02-24 02:31:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-02-24 04:05:59 -0500 |
commit | c5905afb0ee6550b42c49213da1c22d67316c194 (patch) | |
tree | 253fdb322e6e5b257ffda3b9b66bce90a473a6f7 /kernel | |
parent | 1cfa60dc7d7c7cc774a44eee47ff135a644a1f31 (diff) |
static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]()
So here's a boot tested patch on top of Jason's series that does
all the cleanups I talked about and turns jump labels into a
more intuitive to use facility. It should also address the
various misconceptions and confusions that surround jump labels.
Typical usage scenarios:
#include <linux/static_key.h>
struct static_key key = STATIC_KEY_INIT_TRUE;
if (static_key_false(&key))
do unlikely code
else
do likely code
Or:
if (static_key_true(&key))
do likely code
else
do unlikely code
The static key is modified via:
static_key_slow_inc(&key);
...
static_key_slow_dec(&key);
The 'slow' prefix makes it abundantly clear that this is an
expensive operation.
I've updated all in-kernel code to use this everywhere. Note
that I (intentionally) have not pushed through the rename
blindly through to the lowest levels: the actual jump-label
patching arch facility should be named like that, so we want to
decouple jump labels from the static-key facility a bit.
On non-jump-label enabled architectures static keys default to
likely()/unlikely() branches.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: a.p.zijlstra@chello.nl
Cc: mathieu.desnoyers@efficios.com
Cc: davem@davemloft.net
Cc: ddaney.cavm@gmail.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 16 | ||||
-rw-r--r-- | kernel/jump_label.c | 128 | ||||
-rw-r--r-- | kernel/sched/core.c | 18 | ||||
-rw-r--r-- | kernel/sched/fair.c | 8 | ||||
-rw-r--r-- | kernel/sched/sched.h | 14 | ||||
-rw-r--r-- | kernel/tracepoint.c | 20 |
6 files changed, 112 insertions, 92 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 7c3b9de55f6b..5e0f8bb89b2b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -128,7 +128,7 @@ enum event_type_t { | |||
128 | * perf_sched_events : >0 events exist | 128 | * perf_sched_events : >0 events exist |
129 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu | 129 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
130 | */ | 130 | */ |
131 | struct jump_label_key_deferred perf_sched_events __read_mostly; | 131 | struct static_key_deferred perf_sched_events __read_mostly; |
132 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); | 132 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
133 | 133 | ||
134 | static atomic_t nr_mmap_events __read_mostly; | 134 | static atomic_t nr_mmap_events __read_mostly; |
@@ -2769,7 +2769,7 @@ static void free_event(struct perf_event *event) | |||
2769 | 2769 | ||
2770 | if (!event->parent) { | 2770 | if (!event->parent) { |
2771 | if (event->attach_state & PERF_ATTACH_TASK) | 2771 | if (event->attach_state & PERF_ATTACH_TASK) |
2772 | jump_label_dec_deferred(&perf_sched_events); | 2772 | static_key_slow_dec_deferred(&perf_sched_events); |
2773 | if (event->attr.mmap || event->attr.mmap_data) | 2773 | if (event->attr.mmap || event->attr.mmap_data) |
2774 | atomic_dec(&nr_mmap_events); | 2774 | atomic_dec(&nr_mmap_events); |
2775 | if (event->attr.comm) | 2775 | if (event->attr.comm) |
@@ -2780,7 +2780,7 @@ static void free_event(struct perf_event *event) | |||
2780 | put_callchain_buffers(); | 2780 | put_callchain_buffers(); |
2781 | if (is_cgroup_event(event)) { | 2781 | if (is_cgroup_event(event)) { |
2782 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); | 2782 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); |
2783 | jump_label_dec_deferred(&perf_sched_events); | 2783 | static_key_slow_dec_deferred(&perf_sched_events); |
2784 | } | 2784 | } |
2785 | } | 2785 | } |
2786 | 2786 | ||
@@ -4982,7 +4982,7 @@ fail: | |||
4982 | return err; | 4982 | return err; |
4983 | } | 4983 | } |
4984 | 4984 | ||
4985 | struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 4985 | struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
4986 | 4986 | ||
4987 | static void sw_perf_event_destroy(struct perf_event *event) | 4987 | static void sw_perf_event_destroy(struct perf_event *event) |
4988 | { | 4988 | { |
@@ -4990,7 +4990,7 @@ static void sw_perf_event_destroy(struct perf_event *event) | |||
4990 | 4990 | ||
4991 | WARN_ON(event->parent); | 4991 | WARN_ON(event->parent); |
4992 | 4992 | ||
4993 | jump_label_dec(&perf_swevent_enabled[event_id]); | 4993 | static_key_slow_dec(&perf_swevent_enabled[event_id]); |
4994 | swevent_hlist_put(event); | 4994 | swevent_hlist_put(event); |
4995 | } | 4995 | } |
4996 | 4996 | ||
@@ -5020,7 +5020,7 @@ static int perf_swevent_init(struct perf_event *event) | |||
5020 | if (err) | 5020 | if (err) |
5021 | return err; | 5021 | return err; |
5022 | 5022 | ||
5023 | jump_label_inc(&perf_swevent_enabled[event_id]); | 5023 | static_key_slow_inc(&perf_swevent_enabled[event_id]); |
5024 | event->destroy = sw_perf_event_destroy; | 5024 | event->destroy = sw_perf_event_destroy; |
5025 | } | 5025 | } |
5026 | 5026 | ||
@@ -5843,7 +5843,7 @@ done: | |||
5843 | 5843 | ||
5844 | if (!event->parent) { | 5844 | if (!event->parent) { |
5845 | if (event->attach_state & PERF_ATTACH_TASK) | 5845 | if (event->attach_state & PERF_ATTACH_TASK) |
5846 | jump_label_inc(&perf_sched_events.key); | 5846 | static_key_slow_inc(&perf_sched_events.key); |
5847 | if (event->attr.mmap || event->attr.mmap_data) | 5847 | if (event->attr.mmap || event->attr.mmap_data) |
5848 | atomic_inc(&nr_mmap_events); | 5848 | atomic_inc(&nr_mmap_events); |
5849 | if (event->attr.comm) | 5849 | if (event->attr.comm) |
@@ -6081,7 +6081,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
6081 | * - that may need work on context switch | 6081 | * - that may need work on context switch |
6082 | */ | 6082 | */ |
6083 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); | 6083 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); |
6084 | jump_label_inc(&perf_sched_events.key); | 6084 | static_key_slow_inc(&perf_sched_events.key); |
6085 | } | 6085 | } |
6086 | 6086 | ||
6087 | /* | 6087 | /* |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 543782e7cdd2..bf9dcadbb53a 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/sort.h> | 13 | #include <linux/sort.h> |
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/jump_label.h> | 15 | #include <linux/static_key.h> |
16 | 16 | ||
17 | #ifdef HAVE_JUMP_LABEL | 17 | #ifdef HAVE_JUMP_LABEL |
18 | 18 | ||
@@ -29,10 +29,11 @@ void jump_label_unlock(void) | |||
29 | mutex_unlock(&jump_label_mutex); | 29 | mutex_unlock(&jump_label_mutex); |
30 | } | 30 | } |
31 | 31 | ||
32 | bool jump_label_enabled(struct jump_label_key *key) | 32 | bool static_key_enabled(struct static_key *key) |
33 | { | 33 | { |
34 | return !!atomic_read(&key->enabled); | 34 | return (atomic_read(&key->enabled) > 0); |
35 | } | 35 | } |
36 | EXPORT_SYMBOL_GPL(static_key_enabled); | ||
36 | 37 | ||
37 | static int jump_label_cmp(const void *a, const void *b) | 38 | static int jump_label_cmp(const void *a, const void *b) |
38 | { | 39 | { |
@@ -58,22 +59,26 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) | |||
58 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); | 59 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
59 | } | 60 | } |
60 | 61 | ||
61 | static void jump_label_update(struct jump_label_key *key, int enable); | 62 | static void jump_label_update(struct static_key *key, int enable); |
62 | 63 | ||
63 | void jump_label_inc(struct jump_label_key *key) | 64 | void static_key_slow_inc(struct static_key *key) |
64 | { | 65 | { |
65 | if (atomic_inc_not_zero(&key->enabled)) | 66 | if (atomic_inc_not_zero(&key->enabled)) |
66 | return; | 67 | return; |
67 | 68 | ||
68 | jump_label_lock(); | 69 | jump_label_lock(); |
69 | if (atomic_read(&key->enabled) == 0) | 70 | if (atomic_read(&key->enabled) == 0) { |
70 | jump_label_update(key, JUMP_LABEL_ENABLE); | 71 | if (!jump_label_get_branch_default(key)) |
72 | jump_label_update(key, JUMP_LABEL_ENABLE); | ||
73 | else | ||
74 | jump_label_update(key, JUMP_LABEL_DISABLE); | ||
75 | } | ||
71 | atomic_inc(&key->enabled); | 76 | atomic_inc(&key->enabled); |
72 | jump_label_unlock(); | 77 | jump_label_unlock(); |
73 | } | 78 | } |
74 | EXPORT_SYMBOL_GPL(jump_label_inc); | 79 | EXPORT_SYMBOL_GPL(static_key_slow_inc); |
75 | 80 | ||
76 | static void __jump_label_dec(struct jump_label_key *key, | 81 | static void __static_key_slow_dec(struct static_key *key, |
77 | unsigned long rate_limit, struct delayed_work *work) | 82 | unsigned long rate_limit, struct delayed_work *work) |
78 | { | 83 | { |
79 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { | 84 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { |
@@ -85,32 +90,35 @@ static void __jump_label_dec(struct jump_label_key *key, | |||
85 | if (rate_limit) { | 90 | if (rate_limit) { |
86 | atomic_inc(&key->enabled); | 91 | atomic_inc(&key->enabled); |
87 | schedule_delayed_work(work, rate_limit); | 92 | schedule_delayed_work(work, rate_limit); |
88 | } else | 93 | } else { |
89 | jump_label_update(key, JUMP_LABEL_DISABLE); | 94 | if (!jump_label_get_branch_default(key)) |
90 | 95 | jump_label_update(key, JUMP_LABEL_DISABLE); | |
96 | else | ||
97 | jump_label_update(key, JUMP_LABEL_ENABLE); | ||
98 | } | ||
91 | jump_label_unlock(); | 99 | jump_label_unlock(); |
92 | } | 100 | } |
93 | EXPORT_SYMBOL_GPL(jump_label_dec); | ||
94 | 101 | ||
95 | static void jump_label_update_timeout(struct work_struct *work) | 102 | static void jump_label_update_timeout(struct work_struct *work) |
96 | { | 103 | { |
97 | struct jump_label_key_deferred *key = | 104 | struct static_key_deferred *key = |
98 | container_of(work, struct jump_label_key_deferred, work.work); | 105 | container_of(work, struct static_key_deferred, work.work); |
99 | __jump_label_dec(&key->key, 0, NULL); | 106 | __static_key_slow_dec(&key->key, 0, NULL); |
100 | } | 107 | } |
101 | 108 | ||
102 | void jump_label_dec(struct jump_label_key *key) | 109 | void static_key_slow_dec(struct static_key *key) |
103 | { | 110 | { |
104 | __jump_label_dec(key, 0, NULL); | 111 | __static_key_slow_dec(key, 0, NULL); |
105 | } | 112 | } |
113 | EXPORT_SYMBOL_GPL(static_key_slow_dec); | ||
106 | 114 | ||
107 | void jump_label_dec_deferred(struct jump_label_key_deferred *key) | 115 | void static_key_slow_dec_deferred(struct static_key_deferred *key) |
108 | { | 116 | { |
109 | __jump_label_dec(&key->key, key->timeout, &key->work); | 117 | __static_key_slow_dec(&key->key, key->timeout, &key->work); |
110 | } | 118 | } |
119 | EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); | ||
111 | 120 | ||
112 | 121 | void jump_label_rate_limit(struct static_key_deferred *key, | |
113 | void jump_label_rate_limit(struct jump_label_key_deferred *key, | ||
114 | unsigned long rl) | 122 | unsigned long rl) |
115 | { | 123 | { |
116 | key->timeout = rl; | 124 | key->timeout = rl; |
@@ -153,7 +161,7 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry | |||
153 | arch_jump_label_transform(entry, type); | 161 | arch_jump_label_transform(entry, type); |
154 | } | 162 | } |
155 | 163 | ||
156 | static void __jump_label_update(struct jump_label_key *key, | 164 | static void __jump_label_update(struct static_key *key, |
157 | struct jump_entry *entry, | 165 | struct jump_entry *entry, |
158 | struct jump_entry *stop, int enable) | 166 | struct jump_entry *stop, int enable) |
159 | { | 167 | { |
@@ -170,27 +178,40 @@ static void __jump_label_update(struct jump_label_key *key, | |||
170 | } | 178 | } |
171 | } | 179 | } |
172 | 180 | ||
181 | static enum jump_label_type jump_label_type(struct static_key *key) | ||
182 | { | ||
183 | bool true_branch = jump_label_get_branch_default(key); | ||
184 | bool state = static_key_enabled(key); | ||
185 | |||
186 | if ((!true_branch && state) || (true_branch && !state)) | ||
187 | return JUMP_LABEL_ENABLE; | ||
188 | |||
189 | return JUMP_LABEL_DISABLE; | ||
190 | } | ||
191 | |||
173 | void __init jump_label_init(void) | 192 | void __init jump_label_init(void) |
174 | { | 193 | { |
175 | struct jump_entry *iter_start = __start___jump_table; | 194 | struct jump_entry *iter_start = __start___jump_table; |
176 | struct jump_entry *iter_stop = __stop___jump_table; | 195 | struct jump_entry *iter_stop = __stop___jump_table; |
177 | struct jump_label_key *key = NULL; | 196 | struct static_key *key = NULL; |
178 | struct jump_entry *iter; | 197 | struct jump_entry *iter; |
179 | 198 | ||
180 | jump_label_lock(); | 199 | jump_label_lock(); |
181 | jump_label_sort_entries(iter_start, iter_stop); | 200 | jump_label_sort_entries(iter_start, iter_stop); |
182 | 201 | ||
183 | for (iter = iter_start; iter < iter_stop; iter++) { | 202 | for (iter = iter_start; iter < iter_stop; iter++) { |
184 | struct jump_label_key *iterk; | 203 | struct static_key *iterk; |
185 | 204 | ||
186 | iterk = (struct jump_label_key *)(unsigned long)iter->key; | 205 | iterk = (struct static_key *)(unsigned long)iter->key; |
187 | arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ? | 206 | arch_jump_label_transform_static(iter, jump_label_type(iterk)); |
188 | JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE); | ||
189 | if (iterk == key) | 207 | if (iterk == key) |
190 | continue; | 208 | continue; |
191 | 209 | ||
192 | key = iterk; | 210 | key = iterk; |
193 | key->entries = iter; | 211 | /* |
212 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. | ||
213 | */ | ||
214 | *((unsigned long *)&key->entries) += (unsigned long)iter; | ||
194 | #ifdef CONFIG_MODULES | 215 | #ifdef CONFIG_MODULES |
195 | key->next = NULL; | 216 | key->next = NULL; |
196 | #endif | 217 | #endif |
@@ -200,8 +221,8 @@ void __init jump_label_init(void) | |||
200 | 221 | ||
201 | #ifdef CONFIG_MODULES | 222 | #ifdef CONFIG_MODULES |
202 | 223 | ||
203 | struct jump_label_mod { | 224 | struct static_key_mod { |
204 | struct jump_label_mod *next; | 225 | struct static_key_mod *next; |
205 | struct jump_entry *entries; | 226 | struct jump_entry *entries; |
206 | struct module *mod; | 227 | struct module *mod; |
207 | }; | 228 | }; |
@@ -221,9 +242,9 @@ static int __jump_label_mod_text_reserved(void *start, void *end) | |||
221 | start, end); | 242 | start, end); |
222 | } | 243 | } |
223 | 244 | ||
224 | static void __jump_label_mod_update(struct jump_label_key *key, int enable) | 245 | static void __jump_label_mod_update(struct static_key *key, int enable) |
225 | { | 246 | { |
226 | struct jump_label_mod *mod = key->next; | 247 | struct static_key_mod *mod = key->next; |
227 | 248 | ||
228 | while (mod) { | 249 | while (mod) { |
229 | struct module *m = mod->mod; | 250 | struct module *m = mod->mod; |
@@ -254,11 +275,7 @@ void jump_label_apply_nops(struct module *mod) | |||
254 | return; | 275 | return; |
255 | 276 | ||
256 | for (iter = iter_start; iter < iter_stop; iter++) { | 277 | for (iter = iter_start; iter < iter_stop; iter++) { |
257 | struct jump_label_key *iterk; | 278 | arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); |
258 | |||
259 | iterk = (struct jump_label_key *)(unsigned long)iter->key; | ||
260 | arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ? | ||
261 | JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE); | ||
262 | } | 279 | } |
263 | } | 280 | } |
264 | 281 | ||
@@ -267,8 +284,8 @@ static int jump_label_add_module(struct module *mod) | |||
267 | struct jump_entry *iter_start = mod->jump_entries; | 284 | struct jump_entry *iter_start = mod->jump_entries; |
268 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | 285 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
269 | struct jump_entry *iter; | 286 | struct jump_entry *iter; |
270 | struct jump_label_key *key = NULL; | 287 | struct static_key *key = NULL; |
271 | struct jump_label_mod *jlm; | 288 | struct static_key_mod *jlm; |
272 | 289 | ||
273 | /* if the module doesn't have jump label entries, just return */ | 290 | /* if the module doesn't have jump label entries, just return */ |
274 | if (iter_start == iter_stop) | 291 | if (iter_start == iter_stop) |
@@ -277,28 +294,30 @@ static int jump_label_add_module(struct module *mod) | |||
277 | jump_label_sort_entries(iter_start, iter_stop); | 294 | jump_label_sort_entries(iter_start, iter_stop); |
278 | 295 | ||
279 | for (iter = iter_start; iter < iter_stop; iter++) { | 296 | for (iter = iter_start; iter < iter_stop; iter++) { |
280 | if (iter->key == (jump_label_t)(unsigned long)key) | 297 | struct static_key *iterk; |
281 | continue; | ||
282 | 298 | ||
283 | key = (struct jump_label_key *)(unsigned long)iter->key; | 299 | iterk = (struct static_key *)(unsigned long)iter->key; |
300 | if (iterk == key) | ||
301 | continue; | ||
284 | 302 | ||
303 | key = iterk; | ||
285 | if (__module_address(iter->key) == mod) { | 304 | if (__module_address(iter->key) == mod) { |
286 | atomic_set(&key->enabled, 0); | 305 | /* |
287 | key->entries = iter; | 306 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. |
307 | */ | ||
308 | *((unsigned long *)&key->entries) += (unsigned long)iter; | ||
288 | key->next = NULL; | 309 | key->next = NULL; |
289 | continue; | 310 | continue; |
290 | } | 311 | } |
291 | 312 | jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); | |
292 | jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); | ||
293 | if (!jlm) | 313 | if (!jlm) |
294 | return -ENOMEM; | 314 | return -ENOMEM; |
295 | |||
296 | jlm->mod = mod; | 315 | jlm->mod = mod; |
297 | jlm->entries = iter; | 316 | jlm->entries = iter; |
298 | jlm->next = key->next; | 317 | jlm->next = key->next; |
299 | key->next = jlm; | 318 | key->next = jlm; |
300 | 319 | ||
301 | if (jump_label_enabled(key)) | 320 | if (jump_label_type(key) == JUMP_LABEL_ENABLE) |
302 | __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); | 321 | __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); |
303 | } | 322 | } |
304 | 323 | ||
@@ -310,14 +329,14 @@ static void jump_label_del_module(struct module *mod) | |||
310 | struct jump_entry *iter_start = mod->jump_entries; | 329 | struct jump_entry *iter_start = mod->jump_entries; |
311 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | 330 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
312 | struct jump_entry *iter; | 331 | struct jump_entry *iter; |
313 | struct jump_label_key *key = NULL; | 332 | struct static_key *key = NULL; |
314 | struct jump_label_mod *jlm, **prev; | 333 | struct static_key_mod *jlm, **prev; |
315 | 334 | ||
316 | for (iter = iter_start; iter < iter_stop; iter++) { | 335 | for (iter = iter_start; iter < iter_stop; iter++) { |
317 | if (iter->key == (jump_label_t)(unsigned long)key) | 336 | if (iter->key == (jump_label_t)(unsigned long)key) |
318 | continue; | 337 | continue; |
319 | 338 | ||
320 | key = (struct jump_label_key *)(unsigned long)iter->key; | 339 | key = (struct static_key *)(unsigned long)iter->key; |
321 | 340 | ||
322 | if (__module_address(iter->key) == mod) | 341 | if (__module_address(iter->key) == mod) |
323 | continue; | 342 | continue; |
@@ -419,9 +438,10 @@ int jump_label_text_reserved(void *start, void *end) | |||
419 | return ret; | 438 | return ret; |
420 | } | 439 | } |
421 | 440 | ||
422 | static void jump_label_update(struct jump_label_key *key, int enable) | 441 | static void jump_label_update(struct static_key *key, int enable) |
423 | { | 442 | { |
424 | struct jump_entry *entry = key->entries, *stop = __stop___jump_table; | 443 | struct jump_entry *stop = __stop___jump_table; |
444 | struct jump_entry *entry = jump_label_get_entries(key); | ||
425 | 445 | ||
426 | #ifdef CONFIG_MODULES | 446 | #ifdef CONFIG_MODULES |
427 | struct module *mod = __module_address((unsigned long)key); | 447 | struct module *mod = __module_address((unsigned long)key); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5255c9d2e053..112c6824476b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -162,13 +162,13 @@ static int sched_feat_show(struct seq_file *m, void *v) | |||
162 | 162 | ||
163 | #ifdef HAVE_JUMP_LABEL | 163 | #ifdef HAVE_JUMP_LABEL |
164 | 164 | ||
165 | #define jump_label_key__true jump_label_key_enabled | 165 | #define jump_label_key__true STATIC_KEY_INIT_TRUE |
166 | #define jump_label_key__false jump_label_key_disabled | 166 | #define jump_label_key__false STATIC_KEY_INIT_FALSE |
167 | 167 | ||
168 | #define SCHED_FEAT(name, enabled) \ | 168 | #define SCHED_FEAT(name, enabled) \ |
169 | jump_label_key__##enabled , | 169 | jump_label_key__##enabled , |
170 | 170 | ||
171 | struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = { | 171 | struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { |
172 | #include "features.h" | 172 | #include "features.h" |
173 | }; | 173 | }; |
174 | 174 | ||
@@ -176,14 +176,14 @@ struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = { | |||
176 | 176 | ||
177 | static void sched_feat_disable(int i) | 177 | static void sched_feat_disable(int i) |
178 | { | 178 | { |
179 | if (jump_label_enabled(&sched_feat_keys[i])) | 179 | if (static_key_enabled(&sched_feat_keys[i])) |
180 | jump_label_dec(&sched_feat_keys[i]); | 180 | static_key_slow_dec(&sched_feat_keys[i]); |
181 | } | 181 | } |
182 | 182 | ||
183 | static void sched_feat_enable(int i) | 183 | static void sched_feat_enable(int i) |
184 | { | 184 | { |
185 | if (!jump_label_enabled(&sched_feat_keys[i])) | 185 | if (!static_key_enabled(&sched_feat_keys[i])) |
186 | jump_label_inc(&sched_feat_keys[i]); | 186 | static_key_slow_inc(&sched_feat_keys[i]); |
187 | } | 187 | } |
188 | #else | 188 | #else |
189 | static void sched_feat_disable(int i) { }; | 189 | static void sched_feat_disable(int i) { }; |
@@ -894,7 +894,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) | |||
894 | delta -= irq_delta; | 894 | delta -= irq_delta; |
895 | #endif | 895 | #endif |
896 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING | 896 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
897 | if (static_branch((¶virt_steal_rq_enabled))) { | 897 | if (static_key_false((¶virt_steal_rq_enabled))) { |
898 | u64 st; | 898 | u64 st; |
899 | 899 | ||
900 | steal = paravirt_steal_clock(cpu_of(rq)); | 900 | steal = paravirt_steal_clock(cpu_of(rq)); |
@@ -2756,7 +2756,7 @@ void account_idle_time(cputime_t cputime) | |||
2756 | static __always_inline bool steal_account_process_tick(void) | 2756 | static __always_inline bool steal_account_process_tick(void) |
2757 | { | 2757 | { |
2758 | #ifdef CONFIG_PARAVIRT | 2758 | #ifdef CONFIG_PARAVIRT |
2759 | if (static_branch(¶virt_steal_enabled)) { | 2759 | if (static_key_false(¶virt_steal_enabled)) { |
2760 | u64 steal, st = 0; | 2760 | u64 steal, st = 0; |
2761 | 2761 | ||
2762 | steal = paravirt_steal_clock(smp_processor_id()); | 2762 | steal = paravirt_steal_clock(smp_processor_id()); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7c6414fc669d..423547ada38a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1399,20 +1399,20 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
1399 | #ifdef CONFIG_CFS_BANDWIDTH | 1399 | #ifdef CONFIG_CFS_BANDWIDTH |
1400 | 1400 | ||
1401 | #ifdef HAVE_JUMP_LABEL | 1401 | #ifdef HAVE_JUMP_LABEL |
1402 | static struct jump_label_key __cfs_bandwidth_used; | 1402 | static struct static_key __cfs_bandwidth_used; |
1403 | 1403 | ||
1404 | static inline bool cfs_bandwidth_used(void) | 1404 | static inline bool cfs_bandwidth_used(void) |
1405 | { | 1405 | { |
1406 | return static_branch(&__cfs_bandwidth_used); | 1406 | return static_key_false(&__cfs_bandwidth_used); |
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | void account_cfs_bandwidth_used(int enabled, int was_enabled) | 1409 | void account_cfs_bandwidth_used(int enabled, int was_enabled) |
1410 | { | 1410 | { |
1411 | /* only need to count groups transitioning between enabled/!enabled */ | 1411 | /* only need to count groups transitioning between enabled/!enabled */ |
1412 | if (enabled && !was_enabled) | 1412 | if (enabled && !was_enabled) |
1413 | jump_label_inc(&__cfs_bandwidth_used); | 1413 | static_key_slow_inc(&__cfs_bandwidth_used); |
1414 | else if (!enabled && was_enabled) | 1414 | else if (!enabled && was_enabled) |
1415 | jump_label_dec(&__cfs_bandwidth_used); | 1415 | static_key_slow_dec(&__cfs_bandwidth_used); |
1416 | } | 1416 | } |
1417 | #else /* HAVE_JUMP_LABEL */ | 1417 | #else /* HAVE_JUMP_LABEL */ |
1418 | static bool cfs_bandwidth_used(void) | 1418 | static bool cfs_bandwidth_used(void) |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 98c0c2623db8..b4cd6d8ea150 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -611,7 +611,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
611 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: | 611 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
612 | */ | 612 | */ |
613 | #ifdef CONFIG_SCHED_DEBUG | 613 | #ifdef CONFIG_SCHED_DEBUG |
614 | # include <linux/jump_label.h> | 614 | # include <linux/static_key.h> |
615 | # define const_debug __read_mostly | 615 | # define const_debug __read_mostly |
616 | #else | 616 | #else |
617 | # define const_debug const | 617 | # define const_debug const |
@@ -630,18 +630,18 @@ enum { | |||
630 | #undef SCHED_FEAT | 630 | #undef SCHED_FEAT |
631 | 631 | ||
632 | #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) | 632 | #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) |
633 | static __always_inline bool static_branch__true(struct jump_label_key *key) | 633 | static __always_inline bool static_branch__true(struct static_key *key) |
634 | { | 634 | { |
635 | return likely(static_branch(key)); /* Not out of line branch. */ | 635 | return static_key_true(key); /* Not out of line branch. */ |
636 | } | 636 | } |
637 | 637 | ||
638 | static __always_inline bool static_branch__false(struct jump_label_key *key) | 638 | static __always_inline bool static_branch__false(struct static_key *key) |
639 | { | 639 | { |
640 | return unlikely(static_branch(key)); /* Out of line branch. */ | 640 | return static_key_false(key); /* Out of line branch. */ |
641 | } | 641 | } |
642 | 642 | ||
643 | #define SCHED_FEAT(name, enabled) \ | 643 | #define SCHED_FEAT(name, enabled) \ |
644 | static __always_inline bool static_branch_##name(struct jump_label_key *key) \ | 644 | static __always_inline bool static_branch_##name(struct static_key *key) \ |
645 | { \ | 645 | { \ |
646 | return static_branch__##enabled(key); \ | 646 | return static_branch__##enabled(key); \ |
647 | } | 647 | } |
@@ -650,7 +650,7 @@ static __always_inline bool static_branch_##name(struct jump_label_key *key) \ | |||
650 | 650 | ||
651 | #undef SCHED_FEAT | 651 | #undef SCHED_FEAT |
652 | 652 | ||
653 | extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR]; | 653 | extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
654 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) | 654 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
655 | #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ | 655 | #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ |
656 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) | 656 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index f1539decd99d..d96ba22dabfa 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/jump_label.h> | 28 | #include <linux/static_key.h> |
29 | 29 | ||
30 | extern struct tracepoint * const __start___tracepoints_ptrs[]; | 30 | extern struct tracepoint * const __start___tracepoints_ptrs[]; |
31 | extern struct tracepoint * const __stop___tracepoints_ptrs[]; | 31 | extern struct tracepoint * const __stop___tracepoints_ptrs[]; |
@@ -256,9 +256,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
256 | { | 256 | { |
257 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | 257 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); |
258 | 258 | ||
259 | if (elem->regfunc && !jump_label_enabled(&elem->key) && active) | 259 | if (elem->regfunc && !static_key_enabled(&elem->key) && active) |
260 | elem->regfunc(); | 260 | elem->regfunc(); |
261 | else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) | 261 | else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) |
262 | elem->unregfunc(); | 262 | elem->unregfunc(); |
263 | 263 | ||
264 | /* | 264 | /* |
@@ -269,10 +269,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
269 | * is used. | 269 | * is used. |
270 | */ | 270 | */ |
271 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 271 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); |
272 | if (active && !jump_label_enabled(&elem->key)) | 272 | if (active && !static_key_enabled(&elem->key)) |
273 | jump_label_inc(&elem->key); | 273 | static_key_slow_inc(&elem->key); |
274 | else if (!active && jump_label_enabled(&elem->key)) | 274 | else if (!active && static_key_enabled(&elem->key)) |
275 | jump_label_dec(&elem->key); | 275 | static_key_slow_dec(&elem->key); |
276 | } | 276 | } |
277 | 277 | ||
278 | /* | 278 | /* |
@@ -283,11 +283,11 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
283 | */ | 283 | */ |
284 | static void disable_tracepoint(struct tracepoint *elem) | 284 | static void disable_tracepoint(struct tracepoint *elem) |
285 | { | 285 | { |
286 | if (elem->unregfunc && jump_label_enabled(&elem->key)) | 286 | if (elem->unregfunc && static_key_enabled(&elem->key)) |
287 | elem->unregfunc(); | 287 | elem->unregfunc(); |
288 | 288 | ||
289 | if (jump_label_enabled(&elem->key)) | 289 | if (static_key_enabled(&elem->key)) |
290 | jump_label_dec(&elem->key); | 290 | static_key_slow_dec(&elem->key); |
291 | rcu_assign_pointer(elem->funcs, NULL); | 291 | rcu_assign_pointer(elem->funcs, NULL); |
292 | } | 292 | } |
293 | 293 | ||