aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-02-24 02:31:31 -0500
committerIngo Molnar <mingo@elte.hu>2012-02-24 04:05:59 -0500
commitc5905afb0ee6550b42c49213da1c22d67316c194 (patch)
tree253fdb322e6e5b257ffda3b9b66bce90a473a6f7
parent1cfa60dc7d7c7cc774a44eee47ff135a644a1f31 (diff)
static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]()
So here's a boot tested patch on top of Jason's series that does all the cleanups I talked about and turns jump labels into a more intuitive to use facility. It should also address the various misconceptions and confusions that surround jump labels. Typical usage scenarios: #include <linux/static_key.h> struct static_key key = STATIC_KEY_INIT_TRUE; if (static_key_false(&key)) do unlikely code else do likely code Or: if (static_key_true(&key)) do likely code else do unlikely code The static key is modified via: static_key_slow_inc(&key); ... static_key_slow_dec(&key); The 'slow' prefix makes it abundantly clear that this is an expensive operation. I've updated all in-kernel code to use this everywhere. Note that I (intentionally) have not pushed through the rename blindly through to the lowest levels: the actual jump-label patching arch facility should be named like that, so we want to decouple jump labels from the static-key facility a bit. On non-jump-label enabled architectures static keys default to likely()/unlikely() branches. Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jason Baron <jbaron@redhat.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Cc: a.p.zijlstra@chello.nl Cc: mathieu.desnoyers@efficios.com Cc: davem@davemloft.net Cc: ddaney.cavm@gmail.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/Kconfig29
-rw-r--r--arch/ia64/include/asm/paravirt.h6
-rw-r--r--arch/ia64/kernel/paravirt.c4
-rw-r--r--arch/mips/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/jump_label.h6
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kvm/mmu_audit.c8
-rw-r--r--include/linux/jump_label.h139
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter.h6
-rw-r--r--include/linux/perf_event.h12
-rw-r--r--include/linux/static_key.h1
-rw-r--r--include/linux/tracepoint.h8
-rw-r--r--include/net/sock.h6
-rw-r--r--kernel/events/core.c16
-rw-r--r--kernel/jump_label.c128
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/sched.h14
-rw-r--r--kernel/tracepoint.c20
-rw-r--r--net/core/dev.c24
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/sysctl_net_core.c4
-rw-r--r--net/ipv4/tcp_memcontrol.c6
-rw-r--r--net/netfilter/core.c6
31 files changed, 298 insertions, 205 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 4f55c736be11..5b448a74d0f7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -47,18 +47,29 @@ config KPROBES
47 If in doubt, say "N". 47 If in doubt, say "N".
48 48
49config JUMP_LABEL 49config JUMP_LABEL
50 bool "Optimize trace point call sites" 50 bool "Optimize very unlikely/likely branches"
51 depends on HAVE_ARCH_JUMP_LABEL 51 depends on HAVE_ARCH_JUMP_LABEL
52 help 52 help
53 This option enables a transparent branch optimization that
54 makes certain almost-always-true or almost-always-false branch
55 conditions even cheaper to execute within the kernel.
56
57 Certain performance-sensitive kernel code, such as trace points,
58 scheduler functionality, networking code and KVM have such
59 branches and include support for this optimization technique.
60
53 If it is detected that the compiler has support for "asm goto", 61 If it is detected that the compiler has support for "asm goto",
54 the kernel will compile trace point locations with just a 62 the kernel will compile such branches with just a nop
55 nop instruction. When trace points are enabled, the nop will 63 instruction. When the condition flag is toggled to true, the
56 be converted to a jump to the trace function. This technique 64 nop will be converted to a jump instruction to execute the
57 lowers overhead and stress on the branch prediction of the 65 conditional block of instructions.
58 processor. 66
59 67 This technique lowers overhead and stress on the branch prediction
60 On i386, options added to the compiler flags may increase 68 of the processor and generally makes the kernel faster. The update
61 the size of the kernel slightly. 69 of the condition is slower, but those are always very rare.
70
71 ( On 32-bit x86, the necessary options added to the compiler
72 flags may increase the size of the kernel slightly. )
62 73
63config OPTPROBES 74config OPTPROBES
64 def_bool y 75 def_bool y
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 32551d304cd7..b149b88ea795 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -281,9 +281,9 @@ paravirt_init_missing_ticks_accounting(int cpu)
281 pv_time_ops.init_missing_ticks_accounting(cpu); 281 pv_time_ops.init_missing_ticks_accounting(cpu);
282} 282}
283 283
284struct jump_label_key; 284struct static_key;
285extern struct jump_label_key paravirt_steal_enabled; 285extern struct static_key paravirt_steal_enabled;
286extern struct jump_label_key paravirt_steal_rq_enabled; 286extern struct static_key paravirt_steal_rq_enabled;
287 287
288static inline int 288static inline int
289paravirt_do_steal_accounting(unsigned long *new_itm) 289paravirt_do_steal_accounting(unsigned long *new_itm)
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index 100868216c55..1b22f6de2932 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -634,8 +634,8 @@ struct pv_irq_ops pv_irq_ops = {
634 * pv_time_ops 634 * pv_time_ops
635 * time operations 635 * time operations
636 */ 636 */
637struct jump_label_key paravirt_steal_enabled; 637struct static_key paravirt_steal_enabled;
638struct jump_label_key paravirt_steal_rq_enabled; 638struct static_key paravirt_steal_rq_enabled;
639 639
640static int 640static int
641ia64_native_do_steal_accounting(unsigned long *new_itm) 641ia64_native_do_steal_accounting(unsigned long *new_itm)
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 1881b316ca45..4d6d77ed9b9d 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -20,7 +20,7 @@
20#define WORD_INSN ".word" 20#define WORD_INSN ".word"
21#endif 21#endif
22 22
23static __always_inline bool arch_static_branch(struct jump_label_key *key) 23static __always_inline bool arch_static_branch(struct static_key *key)
24{ 24{
25 asm goto("1:\tnop\n\t" 25 asm goto("1:\tnop\n\t"
26 "nop\n\t" 26 "nop\n\t"
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 938986e412f1..ae098c438f00 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -17,7 +17,7 @@
17#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) 17#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
18#define JUMP_LABEL_NOP_SIZE 4 18#define JUMP_LABEL_NOP_SIZE 4
19 19
20static __always_inline bool arch_static_branch(struct jump_label_key *key) 20static __always_inline bool arch_static_branch(struct static_key *key)
21{ 21{
22 asm goto("1:\n\t" 22 asm goto("1:\n\t"
23 "nop\n\t" 23 "nop\n\t"
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 95a6cf2b5b67..6c32190dc73e 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -13,7 +13,7 @@
13#define ASM_ALIGN ".balign 4" 13#define ASM_ALIGN ".balign 4"
14#endif 14#endif
15 15
16static __always_inline bool arch_static_branch(struct jump_label_key *key) 16static __always_inline bool arch_static_branch(struct static_key *key)
17{ 17{
18 asm goto("0: brcl 0,0\n" 18 asm goto("0: brcl 0,0\n"
19 ".pushsection __jump_table, \"aw\"\n" 19 ".pushsection __jump_table, \"aw\"\n"
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index fc73a82366f8..5080d16a832f 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -7,7 +7,7 @@
7 7
8#define JUMP_LABEL_NOP_SIZE 4 8#define JUMP_LABEL_NOP_SIZE 4
9 9
10static __always_inline bool arch_static_branch(struct jump_label_key *key) 10static __always_inline bool arch_static_branch(struct static_key *key)
11{ 11{
12 asm goto("1:\n\t" 12 asm goto("1:\n\t"
13 "nop\n\t" 13 "nop\n\t"
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a32b18ce6ead..3a16c1483b45 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -9,12 +9,12 @@
9 9
10#define JUMP_LABEL_NOP_SIZE 5 10#define JUMP_LABEL_NOP_SIZE 5
11 11
12#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" 12#define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
13 13
14static __always_inline bool arch_static_branch(struct jump_label_key *key) 14static __always_inline bool arch_static_branch(struct static_key *key)
15{ 15{
16 asm goto("1:" 16 asm goto("1:"
17 JUMP_LABEL_INITIAL_NOP 17 STATIC_KEY_INITIAL_NOP
18 ".pushsection __jump_table, \"aw\" \n\t" 18 ".pushsection __jump_table, \"aw\" \n\t"
19 _ASM_ALIGN "\n\t" 19 _ASM_ALIGN "\n\t"
20 _ASM_PTR "1b, %l[l_yes], %c0 \n\t" 20 _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index a7d2db9a74fb..c0180fd372d2 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -230,9 +230,9 @@ static inline unsigned long long paravirt_sched_clock(void)
230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
231} 231}
232 232
233struct jump_label_key; 233struct static_key;
234extern struct jump_label_key paravirt_steal_enabled; 234extern struct static_key paravirt_steal_enabled;
235extern struct jump_label_key paravirt_steal_rq_enabled; 235extern struct static_key paravirt_steal_rq_enabled;
236 236
237static inline u64 paravirt_steal_clock(int cpu) 237static inline u64 paravirt_steal_clock(int cpu)
238{ 238{
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index f0c6fd6f176b..694d801bf606 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -438,9 +438,9 @@ void __init kvm_guest_init(void)
438static __init int activate_jump_labels(void) 438static __init int activate_jump_labels(void)
439{ 439{
440 if (has_steal_clock) { 440 if (has_steal_clock) {
441 jump_label_inc(&paravirt_steal_enabled); 441 static_key_slow_inc(&paravirt_steal_enabled);
442 if (steal_acc) 442 if (steal_acc)
443 jump_label_inc(&paravirt_steal_rq_enabled); 443 static_key_slow_inc(&paravirt_steal_rq_enabled);
444 } 444 }
445 445
446 return 0; 446 return 0;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index d90272e6bc40..ada2f99388dd 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -202,8 +202,8 @@ static void native_flush_tlb_single(unsigned long addr)
202 __native_flush_tlb_single(addr); 202 __native_flush_tlb_single(addr);
203} 203}
204 204
205struct jump_label_key paravirt_steal_enabled; 205struct static_key paravirt_steal_enabled;
206struct jump_label_key paravirt_steal_rq_enabled; 206struct static_key paravirt_steal_rq_enabled;
207 207
208static u64 native_steal_clock(int cpu) 208static u64 native_steal_clock(int cpu)
209{ 209{
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index fe15dcc07a6b..ea7b4fd34676 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -234,7 +234,7 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
234} 234}
235 235
236static bool mmu_audit; 236static bool mmu_audit;
237static struct jump_label_key mmu_audit_key; 237static struct static_key mmu_audit_key;
238 238
239static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) 239static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
240{ 240{
@@ -250,7 +250,7 @@ static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
250 250
251static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) 251static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
252{ 252{
253 if (static_branch((&mmu_audit_key))) 253 if (static_key_false((&mmu_audit_key)))
254 __kvm_mmu_audit(vcpu, point); 254 __kvm_mmu_audit(vcpu, point);
255} 255}
256 256
@@ -259,7 +259,7 @@ static void mmu_audit_enable(void)
259 if (mmu_audit) 259 if (mmu_audit)
260 return; 260 return;
261 261
262 jump_label_inc(&mmu_audit_key); 262 static_key_slow_inc(&mmu_audit_key);
263 mmu_audit = true; 263 mmu_audit = true;
264} 264}
265 265
@@ -268,7 +268,7 @@ static void mmu_audit_disable(void)
268 if (!mmu_audit) 268 if (!mmu_audit)
269 return; 269 return;
270 270
271 jump_label_dec(&mmu_audit_key); 271 static_key_slow_dec(&mmu_audit_key);
272 mmu_audit = false; 272 mmu_audit = false;
273} 273}
274 274
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f7c69580fea7..2172da2d9bb4 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -9,15 +9,15 @@
9 * 9 *
10 * Jump labels provide an interface to generate dynamic branches using 10 * Jump labels provide an interface to generate dynamic branches using
11 * self-modifying code. Assuming toolchain and architecture support the result 11 * self-modifying code. Assuming toolchain and architecture support the result
12 * of a "if (static_branch(&key))" statement is a unconditional branch (which 12 * of a "if (static_key_false(&key))" statement is a unconditional branch (which
13 * defaults to false - and the true block is placed out of line). 13 * defaults to false - and the true block is placed out of line).
14 * 14 *
15 * However at runtime we can change the 'static' branch target using 15 * However at runtime we can change the branch target using
16 * jump_label_{inc,dec}(). These function as a 'reference' count on the key 16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
17 * object and for as long as there are references all branches referring to 17 * object and for as long as there are references all branches referring to
18 * that particular key will point to the (out of line) true block. 18 * that particular key will point to the (out of line) true block.
19 * 19 *
20 * Since this relies on modifying code the jump_label_{inc,dec}() functions 20 * Since this relies on modifying code the static_key_slow_{inc,dec}() functions
21 * must be considered absolute slow paths (machine wide synchronization etc.). 21 * must be considered absolute slow paths (machine wide synchronization etc.).
22 * OTOH, since the affected branches are unconditional their runtime overhead 22 * OTOH, since the affected branches are unconditional their runtime overhead
23 * will be absolutely minimal, esp. in the default (off) case where the total 23 * will be absolutely minimal, esp. in the default (off) case where the total
@@ -26,12 +26,26 @@
26 * 26 *
27 * When the control is directly exposed to userspace it is prudent to delay the 27 * When the control is directly exposed to userspace it is prudent to delay the
28 * decrement to avoid high frequency code modifications which can (and do) 28 * decrement to avoid high frequency code modifications which can (and do)
29 * cause significant performance degradation. Struct jump_label_key_deferred and 29 * cause significant performance degradation. Struct static_key_deferred and
30 * jump_label_dec_deferred() provide for this. 30 * static_key_slow_dec_deferred() provide for this.
31 * 31 *
32 * Lacking toolchain and or architecture support, it falls back to a simple 32 * Lacking toolchain and or architecture support, it falls back to a simple
33 * conditional branch. 33 * conditional branch.
34 */ 34 *
35 * struct static_key my_key = STATIC_KEY_INIT_TRUE;
36 *
37 * if (static_key_true(&my_key)) {
38 * }
39 *
40 * will result in the true case being in-line and starts the key with a single
41 * reference. Mixing static_key_true() and static_key_false() on the same key is not
42 * allowed.
43 *
44 * Not initializing the key (static data is initialized to 0s anyway) is the
45 * same as using STATIC_KEY_INIT_FALSE and static_key_false() is
46 * equivalent with static_branch().
47 *
48*/
35 49
36#include <linux/types.h> 50#include <linux/types.h>
37#include <linux/compiler.h> 51#include <linux/compiler.h>
@@ -39,16 +53,17 @@
39 53
40#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 54#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
41 55
42struct jump_label_key { 56struct static_key {
43 atomic_t enabled; 57 atomic_t enabled;
58/* Set lsb bit to 1 if branch is default true, 0 ot */
44 struct jump_entry *entries; 59 struct jump_entry *entries;
45#ifdef CONFIG_MODULES 60#ifdef CONFIG_MODULES
46 struct jump_label_mod *next; 61 struct static_key_mod *next;
47#endif 62#endif
48}; 63};
49 64
50struct jump_label_key_deferred { 65struct static_key_deferred {
51 struct jump_label_key key; 66 struct static_key key;
52 unsigned long timeout; 67 unsigned long timeout;
53 struct delayed_work work; 68 struct delayed_work work;
54}; 69};
@@ -66,13 +81,34 @@ struct module;
66 81
67#ifdef HAVE_JUMP_LABEL 82#ifdef HAVE_JUMP_LABEL
68 83
69#ifdef CONFIG_MODULES 84#define JUMP_LABEL_TRUE_BRANCH 1UL
70#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL, NULL} 85
71#else 86static
72#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL} 87inline struct jump_entry *jump_label_get_entries(struct static_key *key)
73#endif 88{
89 return (struct jump_entry *)((unsigned long)key->entries
90 & ~JUMP_LABEL_TRUE_BRANCH);
91}
92
93static inline bool jump_label_get_branch_default(struct static_key *key)
94{
95 if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH)
96 return true;
97 return false;
98}
99
100static __always_inline bool static_key_false(struct static_key *key)
101{
102 return arch_static_branch(key);
103}
74 104
75static __always_inline bool static_branch(struct jump_label_key *key) 105static __always_inline bool static_key_true(struct static_key *key)
106{
107 return !static_key_false(key);
108}
109
110/* Deprecated. Please use 'static_key_false() instead. */
111static __always_inline bool static_branch(struct static_key *key)
76{ 112{
77 return arch_static_branch(key); 113 return arch_static_branch(key);
78} 114}
@@ -88,21 +124,24 @@ extern void arch_jump_label_transform(struct jump_entry *entry,
88extern void arch_jump_label_transform_static(struct jump_entry *entry, 124extern void arch_jump_label_transform_static(struct jump_entry *entry,
89 enum jump_label_type type); 125 enum jump_label_type type);
90extern int jump_label_text_reserved(void *start, void *end); 126extern int jump_label_text_reserved(void *start, void *end);
91extern void jump_label_inc(struct jump_label_key *key); 127extern void static_key_slow_inc(struct static_key *key);
92extern void jump_label_dec(struct jump_label_key *key); 128extern void static_key_slow_dec(struct static_key *key);
93extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); 129extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
94extern bool jump_label_enabled(struct jump_label_key *key); 130extern bool static_key_enabled(struct static_key *key);
95extern void jump_label_apply_nops(struct module *mod); 131extern void jump_label_apply_nops(struct module *mod);
96extern void jump_label_rate_limit(struct jump_label_key_deferred *key, 132extern void
97 unsigned long rl); 133jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
134
135#define STATIC_KEY_INIT_TRUE ((struct static_key) \
136 { .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
137#define STATIC_KEY_INIT_FALSE ((struct static_key) \
138 { .enabled = ATOMIC_INIT(0), .entries = (void *)0 })
98 139
99#else /* !HAVE_JUMP_LABEL */ 140#else /* !HAVE_JUMP_LABEL */
100 141
101#include <linux/atomic.h> 142#include <linux/atomic.h>
102 143
103#define JUMP_LABEL_INIT {ATOMIC_INIT(0)} 144struct static_key {
104
105struct jump_label_key {
106 atomic_t enabled; 145 atomic_t enabled;
107}; 146};
108 147
@@ -110,30 +149,45 @@ static __always_inline void jump_label_init(void)
110{ 149{
111} 150}
112 151
113struct jump_label_key_deferred { 152struct static_key_deferred {
114 struct jump_label_key key; 153 struct static_key key;
115}; 154};
116 155
117static __always_inline bool static_branch(struct jump_label_key *key) 156static __always_inline bool static_key_false(struct static_key *key)
157{
158 if (unlikely(atomic_read(&key->enabled)) > 0)
159 return true;
160 return false;
161}
162
163static __always_inline bool static_key_true(struct static_key *key)
118{ 164{
119 if (unlikely(atomic_read(&key->enabled))) 165 if (likely(atomic_read(&key->enabled)) > 0)
120 return true; 166 return true;
121 return false; 167 return false;
122} 168}
123 169
124static inline void jump_label_inc(struct jump_label_key *key) 170/* Deprecated. Please use 'static_key_false() instead. */
171static __always_inline bool static_branch(struct static_key *key)
172{
173 if (unlikely(atomic_read(&key->enabled)) > 0)
174 return true;
175 return false;
176}
177
178static inline void static_key_slow_inc(struct static_key *key)
125{ 179{
126 atomic_inc(&key->enabled); 180 atomic_inc(&key->enabled);
127} 181}
128 182
129static inline void jump_label_dec(struct jump_label_key *key) 183static inline void static_key_slow_dec(struct static_key *key)
130{ 184{
131 atomic_dec(&key->enabled); 185 atomic_dec(&key->enabled);
132} 186}
133 187
134static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) 188static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
135{ 189{
136 jump_label_dec(&key->key); 190 static_key_slow_dec(&key->key);
137} 191}
138 192
139static inline int jump_label_text_reserved(void *start, void *end) 193static inline int jump_label_text_reserved(void *start, void *end)
@@ -144,9 +198,9 @@ static inline int jump_label_text_reserved(void *start, void *end)
144static inline void jump_label_lock(void) {} 198static inline void jump_label_lock(void) {}
145static inline void jump_label_unlock(void) {} 199static inline void jump_label_unlock(void) {}
146 200
147static inline bool jump_label_enabled(struct jump_label_key *key) 201static inline bool static_key_enabled(struct static_key *key)
148{ 202{
149 return !!atomic_read(&key->enabled); 203 return (atomic_read(&key->enabled) > 0);
150} 204}
151 205
152static inline int jump_label_apply_nops(struct module *mod) 206static inline int jump_label_apply_nops(struct module *mod)
@@ -154,13 +208,20 @@ static inline int jump_label_apply_nops(struct module *mod)
154 return 0; 208 return 0;
155} 209}
156 210
157static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, 211static inline void
212jump_label_rate_limit(struct static_key_deferred *key,
158 unsigned long rl) 213 unsigned long rl)
159{ 214{
160} 215}
216
217#define STATIC_KEY_INIT_TRUE ((struct static_key) \
218 { .enabled = ATOMIC_INIT(1) })
219#define STATIC_KEY_INIT_FALSE ((struct static_key) \
220 { .enabled = ATOMIC_INIT(0) })
221
161#endif /* HAVE_JUMP_LABEL */ 222#endif /* HAVE_JUMP_LABEL */
162 223
163#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) 224#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
164#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) 225#define jump_label_enabled static_key_enabled
165 226
166#endif /* _LINUX_JUMP_LABEL_H */ 227#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0eac07c95255..7dfaae7846ab 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -214,8 +214,8 @@ enum {
214#include <linux/skbuff.h> 214#include <linux/skbuff.h>
215 215
216#ifdef CONFIG_RPS 216#ifdef CONFIG_RPS
217#include <linux/jump_label.h> 217#include <linux/static_key.h>
218extern struct jump_label_key rps_needed; 218extern struct static_key rps_needed;
219#endif 219#endif
220 220
221struct neighbour; 221struct neighbour;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b809265607d0..29734be334c1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -163,13 +163,13 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
164 164
165#if defined(CONFIG_JUMP_LABEL) 165#if defined(CONFIG_JUMP_LABEL)
166#include <linux/jump_label.h> 166#include <linux/static_key.h>
167extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 167extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
168static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) 168static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
169{ 169{
170 if (__builtin_constant_p(pf) && 170 if (__builtin_constant_p(pf) &&
171 __builtin_constant_p(hook)) 171 __builtin_constant_p(hook))
172 return static_branch(&nf_hooks_needed[pf][hook]); 172 return static_key_false(&nf_hooks_needed[pf][hook]);
173 173
174 return !list_empty(&nf_hooks[pf][hook]); 174 return !list_empty(&nf_hooks[pf][hook]);
175} 175}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 412b790f5da6..0d21e6f1cf53 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -514,7 +514,7 @@ struct perf_guest_info_callbacks {
514#include <linux/ftrace.h> 514#include <linux/ftrace.h>
515#include <linux/cpu.h> 515#include <linux/cpu.h>
516#include <linux/irq_work.h> 516#include <linux/irq_work.h>
517#include <linux/jump_label.h> 517#include <linux/static_key.h>
518#include <linux/atomic.h> 518#include <linux/atomic.h>
519#include <asm/local.h> 519#include <asm/local.h>
520 520
@@ -1038,7 +1038,7 @@ static inline int is_software_event(struct perf_event *event)
1038 return event->pmu->task_ctx_nr == perf_sw_context; 1038 return event->pmu->task_ctx_nr == perf_sw_context;
1039} 1039}
1040 1040
1041extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1041extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1042 1042
1043extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 1043extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1044 1044
@@ -1066,7 +1066,7 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1066{ 1066{
1067 struct pt_regs hot_regs; 1067 struct pt_regs hot_regs;
1068 1068
1069 if (static_branch(&perf_swevent_enabled[event_id])) { 1069 if (static_key_false(&perf_swevent_enabled[event_id])) {
1070 if (!regs) { 1070 if (!regs) {
1071 perf_fetch_caller_regs(&hot_regs); 1071 perf_fetch_caller_regs(&hot_regs);
1072 regs = &hot_regs; 1072 regs = &hot_regs;
@@ -1075,12 +1075,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1075 } 1075 }
1076} 1076}
1077 1077
1078extern struct jump_label_key_deferred perf_sched_events; 1078extern struct static_key_deferred perf_sched_events;
1079 1079
1080static inline void perf_event_task_sched_in(struct task_struct *prev, 1080static inline void perf_event_task_sched_in(struct task_struct *prev,
1081 struct task_struct *task) 1081 struct task_struct *task)
1082{ 1082{
1083 if (static_branch(&perf_sched_events.key)) 1083 if (static_key_false(&perf_sched_events.key))
1084 __perf_event_task_sched_in(prev, task); 1084 __perf_event_task_sched_in(prev, task);
1085} 1085}
1086 1086
@@ -1089,7 +1089,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
1089{ 1089{
1090 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1090 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1091 1091
1092 if (static_branch(&perf_sched_events.key)) 1092 if (static_key_false(&perf_sched_events.key))
1093 __perf_event_task_sched_out(prev, next); 1093 __perf_event_task_sched_out(prev, next);
1094} 1094}
1095 1095
diff --git a/include/linux/static_key.h b/include/linux/static_key.h
new file mode 100644
index 000000000000..27bd3f8a0857
--- /dev/null
+++ b/include/linux/static_key.h
@@ -0,0 +1 @@
#include <linux/jump_label.h>
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index fc36da97ff7e..bd96ecd0e05c 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -17,7 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
20#include <linux/jump_label.h> 20#include <linux/static_key.h>
21 21
22struct module; 22struct module;
23struct tracepoint; 23struct tracepoint;
@@ -29,7 +29,7 @@ struct tracepoint_func {
29 29
30struct tracepoint { 30struct tracepoint {
31 const char *name; /* Tracepoint name */ 31 const char *name; /* Tracepoint name */
32 struct jump_label_key key; 32 struct static_key key;
33 void (*regfunc)(void); 33 void (*regfunc)(void);
34 void (*unregfunc)(void); 34 void (*unregfunc)(void);
35 struct tracepoint_func __rcu *funcs; 35 struct tracepoint_func __rcu *funcs;
@@ -145,7 +145,7 @@ static inline void tracepoint_synchronize_unregister(void)
145 extern struct tracepoint __tracepoint_##name; \ 145 extern struct tracepoint __tracepoint_##name; \
146 static inline void trace_##name(proto) \ 146 static inline void trace_##name(proto) \
147 { \ 147 { \
148 if (static_branch(&__tracepoint_##name.key)) \ 148 if (static_key_false(&__tracepoint_##name.key)) \
149 __DO_TRACE(&__tracepoint_##name, \ 149 __DO_TRACE(&__tracepoint_##name, \
150 TP_PROTO(data_proto), \ 150 TP_PROTO(data_proto), \
151 TP_ARGS(data_args), \ 151 TP_ARGS(data_args), \
@@ -188,7 +188,7 @@ static inline void tracepoint_synchronize_unregister(void)
188 __attribute__((section("__tracepoints_strings"))) = #name; \ 188 __attribute__((section("__tracepoints_strings"))) = #name; \
189 struct tracepoint __tracepoint_##name \ 189 struct tracepoint __tracepoint_##name \
190 __attribute__((section("__tracepoints"))) = \ 190 __attribute__((section("__tracepoints"))) = \
191 { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ 191 { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
192 static struct tracepoint * const __tracepoint_ptr_##name __used \ 192 static struct tracepoint * const __tracepoint_ptr_##name __used \
193 __attribute__((section("__tracepoints_ptrs"))) = \ 193 __attribute__((section("__tracepoints_ptrs"))) = \
194 &__tracepoint_##name; 194 &__tracepoint_##name;
diff --git a/include/net/sock.h b/include/net/sock.h
index 91c1c8baf020..dcde2d9268cd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -55,7 +55,7 @@
55#include <linux/uaccess.h> 55#include <linux/uaccess.h>
56#include <linux/memcontrol.h> 56#include <linux/memcontrol.h>
57#include <linux/res_counter.h> 57#include <linux/res_counter.h>
58#include <linux/jump_label.h> 58#include <linux/static_key.h>
59 59
60#include <linux/filter.h> 60#include <linux/filter.h>
61#include <linux/rculist_nulls.h> 61#include <linux/rculist_nulls.h>
@@ -924,13 +924,13 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
924#endif /* SOCK_REFCNT_DEBUG */ 924#endif /* SOCK_REFCNT_DEBUG */
925 925
926#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET) 926#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
927extern struct jump_label_key memcg_socket_limit_enabled; 927extern struct static_key memcg_socket_limit_enabled;
928static inline struct cg_proto *parent_cg_proto(struct proto *proto, 928static inline struct cg_proto *parent_cg_proto(struct proto *proto,
929 struct cg_proto *cg_proto) 929 struct cg_proto *cg_proto)
930{ 930{
931 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); 931 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
932} 932}
933#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled) 933#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
934#else 934#else
935#define mem_cgroup_sockets_enabled 0 935#define mem_cgroup_sockets_enabled 0
936static inline struct cg_proto *parent_cg_proto(struct proto *proto, 936static inline struct cg_proto *parent_cg_proto(struct proto *proto,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7c3b9de55f6b..5e0f8bb89b2b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -128,7 +128,7 @@ enum event_type_t {
128 * perf_sched_events : >0 events exist 128 * perf_sched_events : >0 events exist
129 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 129 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
130 */ 130 */
131struct jump_label_key_deferred perf_sched_events __read_mostly; 131struct static_key_deferred perf_sched_events __read_mostly;
132static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 132static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
133 133
134static atomic_t nr_mmap_events __read_mostly; 134static atomic_t nr_mmap_events __read_mostly;
@@ -2769,7 +2769,7 @@ static void free_event(struct perf_event *event)
2769 2769
2770 if (!event->parent) { 2770 if (!event->parent) {
2771 if (event->attach_state & PERF_ATTACH_TASK) 2771 if (event->attach_state & PERF_ATTACH_TASK)
2772 jump_label_dec_deferred(&perf_sched_events); 2772 static_key_slow_dec_deferred(&perf_sched_events);
2773 if (event->attr.mmap || event->attr.mmap_data) 2773 if (event->attr.mmap || event->attr.mmap_data)
2774 atomic_dec(&nr_mmap_events); 2774 atomic_dec(&nr_mmap_events);
2775 if (event->attr.comm) 2775 if (event->attr.comm)
@@ -2780,7 +2780,7 @@ static void free_event(struct perf_event *event)
2780 put_callchain_buffers(); 2780 put_callchain_buffers();
2781 if (is_cgroup_event(event)) { 2781 if (is_cgroup_event(event)) {
2782 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); 2782 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2783 jump_label_dec_deferred(&perf_sched_events); 2783 static_key_slow_dec_deferred(&perf_sched_events);
2784 } 2784 }
2785 } 2785 }
2786 2786
@@ -4982,7 +4982,7 @@ fail:
4982 return err; 4982 return err;
4983} 4983}
4984 4984
4985struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 4985struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
4986 4986
4987static void sw_perf_event_destroy(struct perf_event *event) 4987static void sw_perf_event_destroy(struct perf_event *event)
4988{ 4988{
@@ -4990,7 +4990,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
4990 4990
4991 WARN_ON(event->parent); 4991 WARN_ON(event->parent);
4992 4992
4993 jump_label_dec(&perf_swevent_enabled[event_id]); 4993 static_key_slow_dec(&perf_swevent_enabled[event_id]);
4994 swevent_hlist_put(event); 4994 swevent_hlist_put(event);
4995} 4995}
4996 4996
@@ -5020,7 +5020,7 @@ static int perf_swevent_init(struct perf_event *event)
5020 if (err) 5020 if (err)
5021 return err; 5021 return err;
5022 5022
5023 jump_label_inc(&perf_swevent_enabled[event_id]); 5023 static_key_slow_inc(&perf_swevent_enabled[event_id]);
5024 event->destroy = sw_perf_event_destroy; 5024 event->destroy = sw_perf_event_destroy;
5025 } 5025 }
5026 5026
@@ -5843,7 +5843,7 @@ done:
5843 5843
5844 if (!event->parent) { 5844 if (!event->parent) {
5845 if (event->attach_state & PERF_ATTACH_TASK) 5845 if (event->attach_state & PERF_ATTACH_TASK)
5846 jump_label_inc(&perf_sched_events.key); 5846 static_key_slow_inc(&perf_sched_events.key);
5847 if (event->attr.mmap || event->attr.mmap_data) 5847 if (event->attr.mmap || event->attr.mmap_data)
5848 atomic_inc(&nr_mmap_events); 5848 atomic_inc(&nr_mmap_events);
5849 if (event->attr.comm) 5849 if (event->attr.comm)
@@ -6081,7 +6081,7 @@ SYSCALL_DEFINE5(perf_event_open,
6081 * - that may need work on context switch 6081 * - that may need work on context switch
6082 */ 6082 */
6083 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); 6083 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6084 jump_label_inc(&perf_sched_events.key); 6084 static_key_slow_inc(&perf_sched_events.key);
6085 } 6085 }
6086 6086
6087 /* 6087 /*
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 543782e7cdd2..bf9dcadbb53a 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -12,7 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/sort.h> 13#include <linux/sort.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/jump_label.h> 15#include <linux/static_key.h>
16 16
17#ifdef HAVE_JUMP_LABEL 17#ifdef HAVE_JUMP_LABEL
18 18
@@ -29,10 +29,11 @@ void jump_label_unlock(void)
29 mutex_unlock(&jump_label_mutex); 29 mutex_unlock(&jump_label_mutex);
30} 30}
31 31
32bool jump_label_enabled(struct jump_label_key *key) 32bool static_key_enabled(struct static_key *key)
33{ 33{
34 return !!atomic_read(&key->enabled); 34 return (atomic_read(&key->enabled) > 0);
35} 35}
36EXPORT_SYMBOL_GPL(static_key_enabled);
36 37
37static int jump_label_cmp(const void *a, const void *b) 38static int jump_label_cmp(const void *a, const void *b)
38{ 39{
@@ -58,22 +59,26 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
58 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); 59 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59} 60}
60 61
61static void jump_label_update(struct jump_label_key *key, int enable); 62static void jump_label_update(struct static_key *key, int enable);
62 63
63void jump_label_inc(struct jump_label_key *key) 64void static_key_slow_inc(struct static_key *key)
64{ 65{
65 if (atomic_inc_not_zero(&key->enabled)) 66 if (atomic_inc_not_zero(&key->enabled))
66 return; 67 return;
67 68
68 jump_label_lock(); 69 jump_label_lock();
69 if (atomic_read(&key->enabled) == 0) 70 if (atomic_read(&key->enabled) == 0) {
70 jump_label_update(key, JUMP_LABEL_ENABLE); 71 if (!jump_label_get_branch_default(key))
72 jump_label_update(key, JUMP_LABEL_ENABLE);
73 else
74 jump_label_update(key, JUMP_LABEL_DISABLE);
75 }
71 atomic_inc(&key->enabled); 76 atomic_inc(&key->enabled);
72 jump_label_unlock(); 77 jump_label_unlock();
73} 78}
74EXPORT_SYMBOL_GPL(jump_label_inc); 79EXPORT_SYMBOL_GPL(static_key_slow_inc);
75 80
76static void __jump_label_dec(struct jump_label_key *key, 81static void __static_key_slow_dec(struct static_key *key,
77 unsigned long rate_limit, struct delayed_work *work) 82 unsigned long rate_limit, struct delayed_work *work)
78{ 83{
79 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { 84 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
@@ -85,32 +90,35 @@ static void __jump_label_dec(struct jump_label_key *key,
85 if (rate_limit) { 90 if (rate_limit) {
86 atomic_inc(&key->enabled); 91 atomic_inc(&key->enabled);
87 schedule_delayed_work(work, rate_limit); 92 schedule_delayed_work(work, rate_limit);
88 } else 93 } else {
89 jump_label_update(key, JUMP_LABEL_DISABLE); 94 if (!jump_label_get_branch_default(key))
90 95 jump_label_update(key, JUMP_LABEL_DISABLE);
96 else
97 jump_label_update(key, JUMP_LABEL_ENABLE);
98 }
91 jump_label_unlock(); 99 jump_label_unlock();
92} 100}
93EXPORT_SYMBOL_GPL(jump_label_dec);
94 101
95static void jump_label_update_timeout(struct work_struct *work) 102static void jump_label_update_timeout(struct work_struct *work)
96{ 103{
97 struct jump_label_key_deferred *key = 104 struct static_key_deferred *key =
98 container_of(work, struct jump_label_key_deferred, work.work); 105 container_of(work, struct static_key_deferred, work.work);
99 __jump_label_dec(&key->key, 0, NULL); 106 __static_key_slow_dec(&key->key, 0, NULL);
100} 107}
101 108
102void jump_label_dec(struct jump_label_key *key) 109void static_key_slow_dec(struct static_key *key)
103{ 110{
104 __jump_label_dec(key, 0, NULL); 111 __static_key_slow_dec(key, 0, NULL);
105} 112}
113EXPORT_SYMBOL_GPL(static_key_slow_dec);
106 114
107void jump_label_dec_deferred(struct jump_label_key_deferred *key) 115void static_key_slow_dec_deferred(struct static_key_deferred *key)
108{ 116{
109 __jump_label_dec(&key->key, key->timeout, &key->work); 117 __static_key_slow_dec(&key->key, key->timeout, &key->work);
110} 118}
119EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
111 120
112 121void jump_label_rate_limit(struct static_key_deferred *key,
113void jump_label_rate_limit(struct jump_label_key_deferred *key,
114 unsigned long rl) 122 unsigned long rl)
115{ 123{
116 key->timeout = rl; 124 key->timeout = rl;
@@ -153,7 +161,7 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry
153 arch_jump_label_transform(entry, type); 161 arch_jump_label_transform(entry, type);
154} 162}
155 163
156static void __jump_label_update(struct jump_label_key *key, 164static void __jump_label_update(struct static_key *key,
157 struct jump_entry *entry, 165 struct jump_entry *entry,
158 struct jump_entry *stop, int enable) 166 struct jump_entry *stop, int enable)
159{ 167{
@@ -170,27 +178,40 @@ static void __jump_label_update(struct jump_label_key *key,
170 } 178 }
171} 179}
172 180
181static enum jump_label_type jump_label_type(struct static_key *key)
182{
183 bool true_branch = jump_label_get_branch_default(key);
184 bool state = static_key_enabled(key);
185
186 if ((!true_branch && state) || (true_branch && !state))
187 return JUMP_LABEL_ENABLE;
188
189 return JUMP_LABEL_DISABLE;
190}
191
173void __init jump_label_init(void) 192void __init jump_label_init(void)
174{ 193{
175 struct jump_entry *iter_start = __start___jump_table; 194 struct jump_entry *iter_start = __start___jump_table;
176 struct jump_entry *iter_stop = __stop___jump_table; 195 struct jump_entry *iter_stop = __stop___jump_table;
177 struct jump_label_key *key = NULL; 196 struct static_key *key = NULL;
178 struct jump_entry *iter; 197 struct jump_entry *iter;
179 198
180 jump_label_lock(); 199 jump_label_lock();
181 jump_label_sort_entries(iter_start, iter_stop); 200 jump_label_sort_entries(iter_start, iter_stop);
182 201
183 for (iter = iter_start; iter < iter_stop; iter++) { 202 for (iter = iter_start; iter < iter_stop; iter++) {
184 struct jump_label_key *iterk; 203 struct static_key *iterk;
185 204
186 iterk = (struct jump_label_key *)(unsigned long)iter->key; 205 iterk = (struct static_key *)(unsigned long)iter->key;
187 arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ? 206 arch_jump_label_transform_static(iter, jump_label_type(iterk));
188 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
189 if (iterk == key) 207 if (iterk == key)
190 continue; 208 continue;
191 209
192 key = iterk; 210 key = iterk;
193 key->entries = iter; 211 /*
212 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
213 */
214 *((unsigned long *)&key->entries) += (unsigned long)iter;
194#ifdef CONFIG_MODULES 215#ifdef CONFIG_MODULES
195 key->next = NULL; 216 key->next = NULL;
196#endif 217#endif
@@ -200,8 +221,8 @@ void __init jump_label_init(void)
200 221
201#ifdef CONFIG_MODULES 222#ifdef CONFIG_MODULES
202 223
203struct jump_label_mod { 224struct static_key_mod {
204 struct jump_label_mod *next; 225 struct static_key_mod *next;
205 struct jump_entry *entries; 226 struct jump_entry *entries;
206 struct module *mod; 227 struct module *mod;
207}; 228};
@@ -221,9 +242,9 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
221 start, end); 242 start, end);
222} 243}
223 244
224static void __jump_label_mod_update(struct jump_label_key *key, int enable) 245static void __jump_label_mod_update(struct static_key *key, int enable)
225{ 246{
226 struct jump_label_mod *mod = key->next; 247 struct static_key_mod *mod = key->next;
227 248
228 while (mod) { 249 while (mod) {
229 struct module *m = mod->mod; 250 struct module *m = mod->mod;
@@ -254,11 +275,7 @@ void jump_label_apply_nops(struct module *mod)
254 return; 275 return;
255 276
256 for (iter = iter_start; iter < iter_stop; iter++) { 277 for (iter = iter_start; iter < iter_stop; iter++) {
257 struct jump_label_key *iterk; 278 arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
258
259 iterk = (struct jump_label_key *)(unsigned long)iter->key;
260 arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
261 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
262 } 279 }
263} 280}
264 281
@@ -267,8 +284,8 @@ static int jump_label_add_module(struct module *mod)
267 struct jump_entry *iter_start = mod->jump_entries; 284 struct jump_entry *iter_start = mod->jump_entries;
268 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 285 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
269 struct jump_entry *iter; 286 struct jump_entry *iter;
270 struct jump_label_key *key = NULL; 287 struct static_key *key = NULL;
271 struct jump_label_mod *jlm; 288 struct static_key_mod *jlm;
272 289
273 /* if the module doesn't have jump label entries, just return */ 290 /* if the module doesn't have jump label entries, just return */
274 if (iter_start == iter_stop) 291 if (iter_start == iter_stop)
@@ -277,28 +294,30 @@ static int jump_label_add_module(struct module *mod)
277 jump_label_sort_entries(iter_start, iter_stop); 294 jump_label_sort_entries(iter_start, iter_stop);
278 295
279 for (iter = iter_start; iter < iter_stop; iter++) { 296 for (iter = iter_start; iter < iter_stop; iter++) {
280 if (iter->key == (jump_label_t)(unsigned long)key) 297 struct static_key *iterk;
281 continue;
282 298
283 key = (struct jump_label_key *)(unsigned long)iter->key; 299 iterk = (struct static_key *)(unsigned long)iter->key;
300 if (iterk == key)
301 continue;
284 302
303 key = iterk;
285 if (__module_address(iter->key) == mod) { 304 if (__module_address(iter->key) == mod) {
286 atomic_set(&key->enabled, 0); 305 /*
287 key->entries = iter; 306 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
307 */
308 *((unsigned long *)&key->entries) += (unsigned long)iter;
288 key->next = NULL; 309 key->next = NULL;
289 continue; 310 continue;
290 } 311 }
291 312 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
292 jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
293 if (!jlm) 313 if (!jlm)
294 return -ENOMEM; 314 return -ENOMEM;
295
296 jlm->mod = mod; 315 jlm->mod = mod;
297 jlm->entries = iter; 316 jlm->entries = iter;
298 jlm->next = key->next; 317 jlm->next = key->next;
299 key->next = jlm; 318 key->next = jlm;
300 319
301 if (jump_label_enabled(key)) 320 if (jump_label_type(key) == JUMP_LABEL_ENABLE)
302 __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); 321 __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
303 } 322 }
304 323
@@ -310,14 +329,14 @@ static void jump_label_del_module(struct module *mod)
310 struct jump_entry *iter_start = mod->jump_entries; 329 struct jump_entry *iter_start = mod->jump_entries;
311 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 330 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
312 struct jump_entry *iter; 331 struct jump_entry *iter;
313 struct jump_label_key *key = NULL; 332 struct static_key *key = NULL;
314 struct jump_label_mod *jlm, **prev; 333 struct static_key_mod *jlm, **prev;
315 334
316 for (iter = iter_start; iter < iter_stop; iter++) { 335 for (iter = iter_start; iter < iter_stop; iter++) {
317 if (iter->key == (jump_label_t)(unsigned long)key) 336 if (iter->key == (jump_label_t)(unsigned long)key)
318 continue; 337 continue;
319 338
320 key = (struct jump_label_key *)(unsigned long)iter->key; 339 key = (struct static_key *)(unsigned long)iter->key;
321 340
322 if (__module_address(iter->key) == mod) 341 if (__module_address(iter->key) == mod)
323 continue; 342 continue;
@@ -419,9 +438,10 @@ int jump_label_text_reserved(void *start, void *end)
419 return ret; 438 return ret;
420} 439}
421 440
422static void jump_label_update(struct jump_label_key *key, int enable) 441static void jump_label_update(struct static_key *key, int enable)
423{ 442{
424 struct jump_entry *entry = key->entries, *stop = __stop___jump_table; 443 struct jump_entry *stop = __stop___jump_table;
444 struct jump_entry *entry = jump_label_get_entries(key);
425 445
426#ifdef CONFIG_MODULES 446#ifdef CONFIG_MODULES
427 struct module *mod = __module_address((unsigned long)key); 447 struct module *mod = __module_address((unsigned long)key);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5255c9d2e053..112c6824476b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -162,13 +162,13 @@ static int sched_feat_show(struct seq_file *m, void *v)
162 162
163#ifdef HAVE_JUMP_LABEL 163#ifdef HAVE_JUMP_LABEL
164 164
165#define jump_label_key__true jump_label_key_enabled 165#define jump_label_key__true STATIC_KEY_INIT_TRUE
166#define jump_label_key__false jump_label_key_disabled 166#define jump_label_key__false STATIC_KEY_INIT_FALSE
167 167
168#define SCHED_FEAT(name, enabled) \ 168#define SCHED_FEAT(name, enabled) \
169 jump_label_key__##enabled , 169 jump_label_key__##enabled ,
170 170
171struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = { 171struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
172#include "features.h" 172#include "features.h"
173}; 173};
174 174
@@ -176,14 +176,14 @@ struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
176 176
177static void sched_feat_disable(int i) 177static void sched_feat_disable(int i)
178{ 178{
179 if (jump_label_enabled(&sched_feat_keys[i])) 179 if (static_key_enabled(&sched_feat_keys[i]))
180 jump_label_dec(&sched_feat_keys[i]); 180 static_key_slow_dec(&sched_feat_keys[i]);
181} 181}
182 182
183static void sched_feat_enable(int i) 183static void sched_feat_enable(int i)
184{ 184{
185 if (!jump_label_enabled(&sched_feat_keys[i])) 185 if (!static_key_enabled(&sched_feat_keys[i]))
186 jump_label_inc(&sched_feat_keys[i]); 186 static_key_slow_inc(&sched_feat_keys[i]);
187} 187}
188#else 188#else
189static void sched_feat_disable(int i) { }; 189static void sched_feat_disable(int i) { };
@@ -894,7 +894,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
894 delta -= irq_delta; 894 delta -= irq_delta;
895#endif 895#endif
896#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 896#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
897 if (static_branch((&paravirt_steal_rq_enabled))) { 897 if (static_key_false((&paravirt_steal_rq_enabled))) {
898 u64 st; 898 u64 st;
899 899
900 steal = paravirt_steal_clock(cpu_of(rq)); 900 steal = paravirt_steal_clock(cpu_of(rq));
@@ -2756,7 +2756,7 @@ void account_idle_time(cputime_t cputime)
2756static __always_inline bool steal_account_process_tick(void) 2756static __always_inline bool steal_account_process_tick(void)
2757{ 2757{
2758#ifdef CONFIG_PARAVIRT 2758#ifdef CONFIG_PARAVIRT
2759 if (static_branch(&paravirt_steal_enabled)) { 2759 if (static_key_false(&paravirt_steal_enabled)) {
2760 u64 steal, st = 0; 2760 u64 steal, st = 0;
2761 2761
2762 steal = paravirt_steal_clock(smp_processor_id()); 2762 steal = paravirt_steal_clock(smp_processor_id());
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7c6414fc669d..423547ada38a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1399,20 +1399,20 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1399#ifdef CONFIG_CFS_BANDWIDTH 1399#ifdef CONFIG_CFS_BANDWIDTH
1400 1400
1401#ifdef HAVE_JUMP_LABEL 1401#ifdef HAVE_JUMP_LABEL
1402static struct jump_label_key __cfs_bandwidth_used; 1402static struct static_key __cfs_bandwidth_used;
1403 1403
1404static inline bool cfs_bandwidth_used(void) 1404static inline bool cfs_bandwidth_used(void)
1405{ 1405{
1406 return static_branch(&__cfs_bandwidth_used); 1406 return static_key_false(&__cfs_bandwidth_used);
1407} 1407}
1408 1408
1409void account_cfs_bandwidth_used(int enabled, int was_enabled) 1409void account_cfs_bandwidth_used(int enabled, int was_enabled)
1410{ 1410{
1411 /* only need to count groups transitioning between enabled/!enabled */ 1411 /* only need to count groups transitioning between enabled/!enabled */
1412 if (enabled && !was_enabled) 1412 if (enabled && !was_enabled)
1413 jump_label_inc(&__cfs_bandwidth_used); 1413 static_key_slow_inc(&__cfs_bandwidth_used);
1414 else if (!enabled && was_enabled) 1414 else if (!enabled && was_enabled)
1415 jump_label_dec(&__cfs_bandwidth_used); 1415 static_key_slow_dec(&__cfs_bandwidth_used);
1416} 1416}
1417#else /* HAVE_JUMP_LABEL */ 1417#else /* HAVE_JUMP_LABEL */
1418static bool cfs_bandwidth_used(void) 1418static bool cfs_bandwidth_used(void)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 98c0c2623db8..b4cd6d8ea150 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -611,7 +611,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
611 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 611 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
612 */ 612 */
613#ifdef CONFIG_SCHED_DEBUG 613#ifdef CONFIG_SCHED_DEBUG
614# include <linux/jump_label.h> 614# include <linux/static_key.h>
615# define const_debug __read_mostly 615# define const_debug __read_mostly
616#else 616#else
617# define const_debug const 617# define const_debug const
@@ -630,18 +630,18 @@ enum {
630#undef SCHED_FEAT 630#undef SCHED_FEAT
631 631
632#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 632#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
633static __always_inline bool static_branch__true(struct jump_label_key *key) 633static __always_inline bool static_branch__true(struct static_key *key)
634{ 634{
635 return likely(static_branch(key)); /* Not out of line branch. */ 635 return static_key_true(key); /* Not out of line branch. */
636} 636}
637 637
638static __always_inline bool static_branch__false(struct jump_label_key *key) 638static __always_inline bool static_branch__false(struct static_key *key)
639{ 639{
640 return unlikely(static_branch(key)); /* Out of line branch. */ 640 return static_key_false(key); /* Out of line branch. */
641} 641}
642 642
643#define SCHED_FEAT(name, enabled) \ 643#define SCHED_FEAT(name, enabled) \
644static __always_inline bool static_branch_##name(struct jump_label_key *key) \ 644static __always_inline bool static_branch_##name(struct static_key *key) \
645{ \ 645{ \
646 return static_branch__##enabled(key); \ 646 return static_branch__##enabled(key); \
647} 647}
@@ -650,7 +650,7 @@ static __always_inline bool static_branch_##name(struct jump_label_key *key) \
650 650
651#undef SCHED_FEAT 651#undef SCHED_FEAT
652 652
653extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR]; 653extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
654#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 654#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
655#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 655#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
656#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 656#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index f1539decd99d..d96ba22dabfa 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -25,7 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/jump_label.h> 28#include <linux/static_key.h>
29 29
30extern struct tracepoint * const __start___tracepoints_ptrs[]; 30extern struct tracepoint * const __start___tracepoints_ptrs[];
31extern struct tracepoint * const __stop___tracepoints_ptrs[]; 31extern struct tracepoint * const __stop___tracepoints_ptrs[];
@@ -256,9 +256,9 @@ static void set_tracepoint(struct tracepoint_entry **entry,
256{ 256{
257 WARN_ON(strcmp((*entry)->name, elem->name) != 0); 257 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
258 258
259 if (elem->regfunc && !jump_label_enabled(&elem->key) && active) 259 if (elem->regfunc && !static_key_enabled(&elem->key) && active)
260 elem->regfunc(); 260 elem->regfunc();
261 else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) 261 else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
262 elem->unregfunc(); 262 elem->unregfunc();
263 263
264 /* 264 /*
@@ -269,10 +269,10 @@ static void set_tracepoint(struct tracepoint_entry **entry,
269 * is used. 269 * is used.
270 */ 270 */
271 rcu_assign_pointer(elem->funcs, (*entry)->funcs); 271 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
272 if (active && !jump_label_enabled(&elem->key)) 272 if (active && !static_key_enabled(&elem->key))
273 jump_label_inc(&elem->key); 273 static_key_slow_inc(&elem->key);
274 else if (!active && jump_label_enabled(&elem->key)) 274 else if (!active && static_key_enabled(&elem->key))
275 jump_label_dec(&elem->key); 275 static_key_slow_dec(&elem->key);
276} 276}
277 277
278/* 278/*
@@ -283,11 +283,11 @@ static void set_tracepoint(struct tracepoint_entry **entry,
283 */ 283 */
284static void disable_tracepoint(struct tracepoint *elem) 284static void disable_tracepoint(struct tracepoint *elem)
285{ 285{
286 if (elem->unregfunc && jump_label_enabled(&elem->key)) 286 if (elem->unregfunc && static_key_enabled(&elem->key))
287 elem->unregfunc(); 287 elem->unregfunc();
288 288
289 if (jump_label_enabled(&elem->key)) 289 if (static_key_enabled(&elem->key))
290 jump_label_dec(&elem->key); 290 static_key_slow_dec(&elem->key);
291 rcu_assign_pointer(elem->funcs, NULL); 291 rcu_assign_pointer(elem->funcs, NULL);
292} 292}
293 293
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..da7ce7f0e566 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -134,7 +134,7 @@
134#include <linux/inetdevice.h> 134#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h> 135#include <linux/cpu_rmap.h>
136#include <linux/net_tstamp.h> 136#include <linux/net_tstamp.h>
137#include <linux/jump_label.h> 137#include <linux/static_key.h>
138#include <net/flow_keys.h> 138#include <net/flow_keys.h>
139 139
140#include "net-sysfs.h" 140#include "net-sysfs.h"
@@ -1441,11 +1441,11 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1441} 1441}
1442EXPORT_SYMBOL(call_netdevice_notifiers); 1442EXPORT_SYMBOL(call_netdevice_notifiers);
1443 1443
1444static struct jump_label_key netstamp_needed __read_mostly; 1444static struct static_key netstamp_needed __read_mostly;
1445#ifdef HAVE_JUMP_LABEL 1445#ifdef HAVE_JUMP_LABEL
1446/* We are not allowed to call jump_label_dec() from irq context 1446/* We are not allowed to call static_key_slow_dec() from irq context
1447 * If net_disable_timestamp() is called from irq context, defer the 1447 * If net_disable_timestamp() is called from irq context, defer the
1448 * jump_label_dec() calls. 1448 * static_key_slow_dec() calls.
1449 */ 1449 */
1450static atomic_t netstamp_needed_deferred; 1450static atomic_t netstamp_needed_deferred;
1451#endif 1451#endif
@@ -1457,12 +1457,12 @@ void net_enable_timestamp(void)
1457 1457
1458 if (deferred) { 1458 if (deferred) {
1459 while (--deferred) 1459 while (--deferred)
1460 jump_label_dec(&netstamp_needed); 1460 static_key_slow_dec(&netstamp_needed);
1461 return; 1461 return;
1462 } 1462 }
1463#endif 1463#endif
1464 WARN_ON(in_interrupt()); 1464 WARN_ON(in_interrupt());
1465 jump_label_inc(&netstamp_needed); 1465 static_key_slow_inc(&netstamp_needed);
1466} 1466}
1467EXPORT_SYMBOL(net_enable_timestamp); 1467EXPORT_SYMBOL(net_enable_timestamp);
1468 1468
@@ -1474,19 +1474,19 @@ void net_disable_timestamp(void)
1474 return; 1474 return;
1475 } 1475 }
1476#endif 1476#endif
1477 jump_label_dec(&netstamp_needed); 1477 static_key_slow_dec(&netstamp_needed);
1478} 1478}
1479EXPORT_SYMBOL(net_disable_timestamp); 1479EXPORT_SYMBOL(net_disable_timestamp);
1480 1480
1481static inline void net_timestamp_set(struct sk_buff *skb) 1481static inline void net_timestamp_set(struct sk_buff *skb)
1482{ 1482{
1483 skb->tstamp.tv64 = 0; 1483 skb->tstamp.tv64 = 0;
1484 if (static_branch(&netstamp_needed)) 1484 if (static_key_false(&netstamp_needed))
1485 __net_timestamp(skb); 1485 __net_timestamp(skb);
1486} 1486}
1487 1487
1488#define net_timestamp_check(COND, SKB) \ 1488#define net_timestamp_check(COND, SKB) \
1489 if (static_branch(&netstamp_needed)) { \ 1489 if (static_key_false(&netstamp_needed)) { \
1490 if ((COND) && !(SKB)->tstamp.tv64) \ 1490 if ((COND) && !(SKB)->tstamp.tv64) \
1491 __net_timestamp(SKB); \ 1491 __net_timestamp(SKB); \
1492 } \ 1492 } \
@@ -2660,7 +2660,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
2660struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2660struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2661EXPORT_SYMBOL(rps_sock_flow_table); 2661EXPORT_SYMBOL(rps_sock_flow_table);
2662 2662
2663struct jump_label_key rps_needed __read_mostly; 2663struct static_key rps_needed __read_mostly;
2664 2664
2665static struct rps_dev_flow * 2665static struct rps_dev_flow *
2666set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2666set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
@@ -2945,7 +2945,7 @@ int netif_rx(struct sk_buff *skb)
2945 2945
2946 trace_netif_rx(skb); 2946 trace_netif_rx(skb);
2947#ifdef CONFIG_RPS 2947#ifdef CONFIG_RPS
2948 if (static_branch(&rps_needed)) { 2948 if (static_key_false(&rps_needed)) {
2949 struct rps_dev_flow voidflow, *rflow = &voidflow; 2949 struct rps_dev_flow voidflow, *rflow = &voidflow;
2950 int cpu; 2950 int cpu;
2951 2951
@@ -3309,7 +3309,7 @@ int netif_receive_skb(struct sk_buff *skb)
3309 return NET_RX_SUCCESS; 3309 return NET_RX_SUCCESS;
3310 3310
3311#ifdef CONFIG_RPS 3311#ifdef CONFIG_RPS
3312 if (static_branch(&rps_needed)) { 3312 if (static_key_false(&rps_needed)) {
3313 struct rps_dev_flow voidflow, *rflow = &voidflow; 3313 struct rps_dev_flow voidflow, *rflow = &voidflow;
3314 int cpu, ret; 3314 int cpu, ret;
3315 3315
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a1727cda03d7..495586232aa1 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -608,10 +608,10 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
608 spin_unlock(&rps_map_lock); 608 spin_unlock(&rps_map_lock);
609 609
610 if (map) 610 if (map)
611 jump_label_inc(&rps_needed); 611 static_key_slow_inc(&rps_needed);
612 if (old_map) { 612 if (old_map) {
613 kfree_rcu(old_map, rcu); 613 kfree_rcu(old_map, rcu);
614 jump_label_dec(&rps_needed); 614 static_key_slow_dec(&rps_needed);
615 } 615 }
616 free_cpumask_var(mask); 616 free_cpumask_var(mask);
617 return len; 617 return len;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e81fd2e3c75..3a4e5817a2a7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -111,7 +111,7 @@
111#include <linux/init.h> 111#include <linux/init.h>
112#include <linux/highmem.h> 112#include <linux/highmem.h>
113#include <linux/user_namespace.h> 113#include <linux/user_namespace.h>
114#include <linux/jump_label.h> 114#include <linux/static_key.h>
115#include <linux/memcontrol.h> 115#include <linux/memcontrol.h>
116 116
117#include <asm/uaccess.h> 117#include <asm/uaccess.h>
@@ -184,7 +184,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
184static struct lock_class_key af_family_keys[AF_MAX]; 184static struct lock_class_key af_family_keys[AF_MAX];
185static struct lock_class_key af_family_slock_keys[AF_MAX]; 185static struct lock_class_key af_family_slock_keys[AF_MAX];
186 186
187struct jump_label_key memcg_socket_limit_enabled; 187struct static_key memcg_socket_limit_enabled;
188EXPORT_SYMBOL(memcg_socket_limit_enabled); 188EXPORT_SYMBOL(memcg_socket_limit_enabled);
189 189
190/* 190/*
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index d05559d4d9cd..0c2850874254 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -69,9 +69,9 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
69 if (sock_table != orig_sock_table) { 69 if (sock_table != orig_sock_table) {
70 rcu_assign_pointer(rps_sock_flow_table, sock_table); 70 rcu_assign_pointer(rps_sock_flow_table, sock_table);
71 if (sock_table) 71 if (sock_table)
72 jump_label_inc(&rps_needed); 72 static_key_slow_inc(&rps_needed);
73 if (orig_sock_table) { 73 if (orig_sock_table) {
74 jump_label_dec(&rps_needed); 74 static_key_slow_dec(&rps_needed);
75 synchronize_rcu(); 75 synchronize_rcu();
76 vfree(orig_sock_table); 76 vfree(orig_sock_table);
77 } 77 }
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 49978788a9dc..602fb305365f 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -111,7 +111,7 @@ void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
111 val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); 111 val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
112 112
113 if (val != RESOURCE_MAX) 113 if (val != RESOURCE_MAX)
114 jump_label_dec(&memcg_socket_limit_enabled); 114 static_key_slow_dec(&memcg_socket_limit_enabled);
115} 115}
116EXPORT_SYMBOL(tcp_destroy_cgroup); 116EXPORT_SYMBOL(tcp_destroy_cgroup);
117 117
@@ -143,9 +143,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
143 net->ipv4.sysctl_tcp_mem[i]); 143 net->ipv4.sysctl_tcp_mem[i]);
144 144
145 if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) 145 if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
146 jump_label_dec(&memcg_socket_limit_enabled); 146 static_key_slow_dec(&memcg_socket_limit_enabled);
147 else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) 147 else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
148 jump_label_inc(&memcg_socket_limit_enabled); 148 static_key_slow_inc(&memcg_socket_limit_enabled);
149 149
150 return 0; 150 return 0;
151} 151}
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index b4e8ff05b301..e1b7e051332e 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -56,7 +56,7 @@ struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
56EXPORT_SYMBOL(nf_hooks); 56EXPORT_SYMBOL(nf_hooks);
57 57
58#if defined(CONFIG_JUMP_LABEL) 58#if defined(CONFIG_JUMP_LABEL)
59struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 59struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
60EXPORT_SYMBOL(nf_hooks_needed); 60EXPORT_SYMBOL(nf_hooks_needed);
61#endif 61#endif
62 62
@@ -77,7 +77,7 @@ int nf_register_hook(struct nf_hook_ops *reg)
77 list_add_rcu(&reg->list, elem->list.prev); 77 list_add_rcu(&reg->list, elem->list.prev);
78 mutex_unlock(&nf_hook_mutex); 78 mutex_unlock(&nf_hook_mutex);
79#if defined(CONFIG_JUMP_LABEL) 79#if defined(CONFIG_JUMP_LABEL)
80 jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); 80 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
81#endif 81#endif
82 return 0; 82 return 0;
83} 83}
@@ -89,7 +89,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
89 list_del_rcu(&reg->list); 89 list_del_rcu(&reg->list);
90 mutex_unlock(&nf_hook_mutex); 90 mutex_unlock(&nf_hook_mutex);
91#if defined(CONFIG_JUMP_LABEL) 91#if defined(CONFIG_JUMP_LABEL)
92 jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); 92 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
93#endif 93#endif
94 synchronize_net(); 94 synchronize_net();
95} 95}