diff options
-rw-r--r-- | arch/mips/include/asm/jump_label.h | 22 | ||||
-rw-r--r-- | arch/sparc/include/asm/jump_label.h | 25 | ||||
-rw-r--r-- | arch/x86/include/asm/alternative.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/jump_label.h | 26 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/module.c | 1 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 14 | ||||
-rw-r--r-- | include/linux/dynamic_debug.h | 2 | ||||
-rw-r--r-- | include/linux/jump_label.h | 89 | ||||
-rw-r--r-- | include/linux/jump_label_ref.h | 44 | ||||
-rw-r--r-- | include/linux/perf_event.h | 26 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 22 | ||||
-rw-r--r-- | kernel/jump_label.c | 539 | ||||
-rw-r--r-- | kernel/perf_event.c | 4 | ||||
-rw-r--r-- | kernel/tracepoint.c | 23 |
15 files changed, 356 insertions, 486 deletions
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 7622ccf75076..1881b316ca45 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h | |||
@@ -20,16 +20,18 @@ | |||
20 | #define WORD_INSN ".word" | 20 | #define WORD_INSN ".word" |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define JUMP_LABEL(key, label) \ | 23 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
24 | do { \ | 24 | { |
25 | asm goto("1:\tnop\n\t" \ | 25 | asm goto("1:\tnop\n\t" |
26 | "nop\n\t" \ | 26 | "nop\n\t" |
27 | ".pushsection __jump_table, \"a\"\n\t" \ | 27 | ".pushsection __jump_table, \"aw\"\n\t" |
28 | WORD_INSN " 1b, %l[" #label "], %0\n\t" \ | 28 | WORD_INSN " 1b, %l[l_yes], %0\n\t" |
29 | ".popsection\n\t" \ | 29 | ".popsection\n\t" |
30 | : : "i" (key) : : label); \ | 30 | : : "i" (key) : : l_yes); |
31 | } while (0) | 31 | return false; |
32 | 32 | l_yes: | |
33 | return true; | ||
34 | } | ||
33 | 35 | ||
34 | #endif /* __KERNEL__ */ | 36 | #endif /* __KERNEL__ */ |
35 | 37 | ||
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index 427d4684e0d2..fc73a82366f8 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h | |||
@@ -7,17 +7,20 @@ | |||
7 | 7 | ||
8 | #define JUMP_LABEL_NOP_SIZE 4 | 8 | #define JUMP_LABEL_NOP_SIZE 4 |
9 | 9 | ||
10 | #define JUMP_LABEL(key, label) \ | 10 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
11 | do { \ | 11 | { |
12 | asm goto("1:\n\t" \ | 12 | asm goto("1:\n\t" |
13 | "nop\n\t" \ | 13 | "nop\n\t" |
14 | "nop\n\t" \ | 14 | "nop\n\t" |
15 | ".pushsection __jump_table, \"a\"\n\t"\ | 15 | ".pushsection __jump_table, \"aw\"\n\t" |
16 | ".align 4\n\t" \ | 16 | ".align 4\n\t" |
17 | ".word 1b, %l[" #label "], %c0\n\t" \ | 17 | ".word 1b, %l[l_yes], %c0\n\t" |
18 | ".popsection \n\t" \ | 18 | ".popsection \n\t" |
19 | : : "i" (key) : : label);\ | 19 | : : "i" (key) : : l_yes); |
20 | } while (0) | 20 | return false; |
21 | l_yes: | ||
22 | return true; | ||
23 | } | ||
21 | 24 | ||
22 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
23 | 26 | ||
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 13009d1af99a..8cdd1e247975 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include <linux/jump_label.h> | ||
8 | #include <asm/asm.h> | 7 | #include <asm/asm.h> |
9 | 8 | ||
10 | /* | 9 | /* |
@@ -191,7 +190,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); | |||
191 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | 190 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); |
192 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); | 191 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); |
193 | 192 | ||
194 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | 193 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) |
195 | #define IDEAL_NOP_SIZE_5 5 | 194 | #define IDEAL_NOP_SIZE_5 5 |
196 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; | 195 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; |
197 | extern void arch_init_ideal_nop5(void); | 196 | extern void arch_init_ideal_nop5(void); |
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 574dbc22893a..f217cee86533 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h | |||
@@ -5,20 +5,24 @@ | |||
5 | 5 | ||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <asm/nops.h> | 7 | #include <asm/nops.h> |
8 | #include <asm/asm.h> | ||
8 | 9 | ||
9 | #define JUMP_LABEL_NOP_SIZE 5 | 10 | #define JUMP_LABEL_NOP_SIZE 5 |
10 | 11 | ||
11 | # define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" | 12 | #define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" |
12 | 13 | ||
13 | # define JUMP_LABEL(key, label) \ | 14 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
14 | do { \ | 15 | { |
15 | asm goto("1:" \ | 16 | asm goto("1:" |
16 | JUMP_LABEL_INITIAL_NOP \ | 17 | JUMP_LABEL_INITIAL_NOP |
17 | ".pushsection __jump_table, \"aw\" \n\t"\ | 18 | ".pushsection __jump_table, \"aw\" \n\t" |
18 | _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ | 19 | _ASM_PTR "1b, %l[l_yes], %c0 \n\t" |
19 | ".popsection \n\t" \ | 20 | ".popsection \n\t" |
20 | : : "i" (key) : : label); \ | 21 | : : "i" (key) : : l_yes); |
21 | } while (0) | 22 | return false; |
23 | l_yes: | ||
24 | return true; | ||
25 | } | ||
22 | 26 | ||
23 | #endif /* __KERNEL__ */ | 27 | #endif /* __KERNEL__ */ |
24 | 28 | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 4a234677e213..651454b0c811 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -679,7 +679,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) | |||
679 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | 679 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); |
680 | } | 680 | } |
681 | 681 | ||
682 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | 682 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) |
683 | 683 | ||
684 | #ifdef CONFIG_X86_64 | 684 | #ifdef CONFIG_X86_64 |
685 | unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; | 685 | unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index ab23f1ad4bf1..52f256f2cc81 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/gfp.h> | 26 | #include <linux/gfp.h> |
27 | #include <linux/jump_label.h> | ||
27 | 28 | ||
28 | #include <asm/system.h> | 29 | #include <asm/system.h> |
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 32c45e5fe0ab..79522166d7f1 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -170,6 +170,10 @@ | |||
170 | STRUCT_ALIGN(); \ | 170 | STRUCT_ALIGN(); \ |
171 | *(__tracepoints) \ | 171 | *(__tracepoints) \ |
172 | /* implement dynamic printk debug */ \ | 172 | /* implement dynamic printk debug */ \ |
173 | . = ALIGN(8); \ | ||
174 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
175 | *(__jump_table) \ | ||
176 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
173 | . = ALIGN(8); \ | 177 | . = ALIGN(8); \ |
174 | VMLINUX_SYMBOL(__start___verbose) = .; \ | 178 | VMLINUX_SYMBOL(__start___verbose) = .; \ |
175 | *(__verbose) \ | 179 | *(__verbose) \ |
@@ -228,8 +232,6 @@ | |||
228 | \ | 232 | \ |
229 | BUG_TABLE \ | 233 | BUG_TABLE \ |
230 | \ | 234 | \ |
231 | JUMP_TABLE \ | ||
232 | \ | ||
233 | /* PCI quirks */ \ | 235 | /* PCI quirks */ \ |
234 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | 236 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
235 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | 237 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ |
@@ -589,14 +591,6 @@ | |||
589 | #define BUG_TABLE | 591 | #define BUG_TABLE |
590 | #endif | 592 | #endif |
591 | 593 | ||
592 | #define JUMP_TABLE \ | ||
593 | . = ALIGN(8); \ | ||
594 | __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ | ||
595 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
596 | *(__jump_table) \ | ||
597 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
598 | } | ||
599 | |||
600 | #ifdef CONFIG_PM_TRACE | 594 | #ifdef CONFIG_PM_TRACE |
601 | #define TRACEDATA \ | 595 | #define TRACEDATA \ |
602 | . = ALIGN(4); \ | 596 | . = ALIGN(4); \ |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 0c9653f11c18..e747ecd48e1c 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _DYNAMIC_DEBUG_H | 1 | #ifndef _DYNAMIC_DEBUG_H |
2 | #define _DYNAMIC_DEBUG_H | 2 | #define _DYNAMIC_DEBUG_H |
3 | 3 | ||
4 | #include <linux/jump_label.h> | ||
5 | |||
6 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which | 4 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which |
7 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | 5 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They |
8 | * use independent hash functions, to reduce the chance of false positives. | 6 | * use independent hash functions, to reduce the chance of false positives. |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7880f18e4b86..83e745f3ead7 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -1,20 +1,43 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_H | 1 | #ifndef _LINUX_JUMP_LABEL_H |
2 | #define _LINUX_JUMP_LABEL_H | 2 | #define _LINUX_JUMP_LABEL_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
4 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) | 7 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) |
8 | |||
9 | struct jump_label_key { | ||
10 | atomic_t enabled; | ||
11 | struct jump_entry *entries; | ||
12 | #ifdef CONFIG_MODULES | ||
13 | struct jump_label_mod *next; | ||
14 | #endif | ||
15 | }; | ||
16 | |||
5 | # include <asm/jump_label.h> | 17 | # include <asm/jump_label.h> |
6 | # define HAVE_JUMP_LABEL | 18 | # define HAVE_JUMP_LABEL |
7 | #endif | 19 | #endif |
8 | 20 | ||
9 | enum jump_label_type { | 21 | enum jump_label_type { |
22 | JUMP_LABEL_DISABLE = 0, | ||
10 | JUMP_LABEL_ENABLE, | 23 | JUMP_LABEL_ENABLE, |
11 | JUMP_LABEL_DISABLE | ||
12 | }; | 24 | }; |
13 | 25 | ||
14 | struct module; | 26 | struct module; |
15 | 27 | ||
16 | #ifdef HAVE_JUMP_LABEL | 28 | #ifdef HAVE_JUMP_LABEL |
17 | 29 | ||
30 | #ifdef CONFIG_MODULES | ||
31 | #define JUMP_LABEL_INIT {{ 0 }, NULL, NULL} | ||
32 | #else | ||
33 | #define JUMP_LABEL_INIT {{ 0 }, NULL} | ||
34 | #endif | ||
35 | |||
36 | static __always_inline bool static_branch(struct jump_label_key *key) | ||
37 | { | ||
38 | return arch_static_branch(key); | ||
39 | } | ||
40 | |||
18 | extern struct jump_entry __start___jump_table[]; | 41 | extern struct jump_entry __start___jump_table[]; |
19 | extern struct jump_entry __stop___jump_table[]; | 42 | extern struct jump_entry __stop___jump_table[]; |
20 | 43 | ||
@@ -23,37 +46,37 @@ extern void jump_label_unlock(void); | |||
23 | extern void arch_jump_label_transform(struct jump_entry *entry, | 46 | extern void arch_jump_label_transform(struct jump_entry *entry, |
24 | enum jump_label_type type); | 47 | enum jump_label_type type); |
25 | extern void arch_jump_label_text_poke_early(jump_label_t addr); | 48 | extern void arch_jump_label_text_poke_early(jump_label_t addr); |
26 | extern void jump_label_update(unsigned long key, enum jump_label_type type); | ||
27 | extern void jump_label_apply_nops(struct module *mod); | ||
28 | extern int jump_label_text_reserved(void *start, void *end); | 49 | extern int jump_label_text_reserved(void *start, void *end); |
50 | extern void jump_label_inc(struct jump_label_key *key); | ||
51 | extern void jump_label_dec(struct jump_label_key *key); | ||
52 | extern bool jump_label_enabled(struct jump_label_key *key); | ||
53 | extern void jump_label_apply_nops(struct module *mod); | ||
29 | 54 | ||
30 | #define jump_label_enable(key) \ | 55 | #else |
31 | jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); | ||
32 | 56 | ||
33 | #define jump_label_disable(key) \ | 57 | #include <asm/atomic.h> |
34 | jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); | ||
35 | 58 | ||
36 | #else | 59 | #define JUMP_LABEL_INIT {ATOMIC_INIT(0)} |
37 | 60 | ||
38 | #define JUMP_LABEL(key, label) \ | 61 | struct jump_label_key { |
39 | do { \ | 62 | atomic_t enabled; |
40 | if (unlikely(*key)) \ | 63 | }; |
41 | goto label; \ | ||
42 | } while (0) | ||
43 | 64 | ||
44 | #define jump_label_enable(cond_var) \ | 65 | static __always_inline bool static_branch(struct jump_label_key *key) |
45 | do { \ | 66 | { |
46 | *(cond_var) = 1; \ | 67 | if (unlikely(atomic_read(&key->enabled))) |
47 | } while (0) | 68 | return true; |
69 | return false; | ||
70 | } | ||
48 | 71 | ||
49 | #define jump_label_disable(cond_var) \ | 72 | static inline void jump_label_inc(struct jump_label_key *key) |
50 | do { \ | 73 | { |
51 | *(cond_var) = 0; \ | 74 | atomic_inc(&key->enabled); |
52 | } while (0) | 75 | } |
53 | 76 | ||
54 | static inline int jump_label_apply_nops(struct module *mod) | 77 | static inline void jump_label_dec(struct jump_label_key *key) |
55 | { | 78 | { |
56 | return 0; | 79 | atomic_dec(&key->enabled); |
57 | } | 80 | } |
58 | 81 | ||
59 | static inline int jump_label_text_reserved(void *start, void *end) | 82 | static inline int jump_label_text_reserved(void *start, void *end) |
@@ -64,16 +87,16 @@ static inline int jump_label_text_reserved(void *start, void *end) | |||
64 | static inline void jump_label_lock(void) {} | 87 | static inline void jump_label_lock(void) {} |
65 | static inline void jump_label_unlock(void) {} | 88 | static inline void jump_label_unlock(void) {} |
66 | 89 | ||
67 | #endif | 90 | static inline bool jump_label_enabled(struct jump_label_key *key) |
91 | { | ||
92 | return !!atomic_read(&key->enabled); | ||
93 | } | ||
68 | 94 | ||
69 | #define COND_STMT(key, stmt) \ | 95 | static inline int jump_label_apply_nops(struct module *mod) |
70 | do { \ | 96 | { |
71 | __label__ jl_enabled; \ | 97 | return 0; |
72 | JUMP_LABEL(key, jl_enabled); \ | 98 | } |
73 | if (0) { \ | 99 | |
74 | jl_enabled: \ | 100 | #endif |
75 | stmt; \ | ||
76 | } \ | ||
77 | } while (0) | ||
78 | 101 | ||
79 | #endif | 102 | #endif |
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h deleted file mode 100644 index e5d012ad92c6..000000000000 --- a/include/linux/jump_label_ref.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_REF_H | ||
2 | #define _LINUX_JUMP_LABEL_REF_H | ||
3 | |||
4 | #include <linux/jump_label.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | #ifdef HAVE_JUMP_LABEL | ||
8 | |||
9 | static inline void jump_label_inc(atomic_t *key) | ||
10 | { | ||
11 | if (atomic_add_return(1, key) == 1) | ||
12 | jump_label_enable(key); | ||
13 | } | ||
14 | |||
15 | static inline void jump_label_dec(atomic_t *key) | ||
16 | { | ||
17 | if (atomic_dec_and_test(key)) | ||
18 | jump_label_disable(key); | ||
19 | } | ||
20 | |||
21 | #else /* !HAVE_JUMP_LABEL */ | ||
22 | |||
23 | static inline void jump_label_inc(atomic_t *key) | ||
24 | { | ||
25 | atomic_inc(key); | ||
26 | } | ||
27 | |||
28 | static inline void jump_label_dec(atomic_t *key) | ||
29 | { | ||
30 | atomic_dec(key); | ||
31 | } | ||
32 | |||
33 | #undef JUMP_LABEL | ||
34 | #define JUMP_LABEL(key, label) \ | ||
35 | do { \ | ||
36 | if (unlikely(__builtin_choose_expr( \ | ||
37 | __builtin_types_compatible_p(typeof(key), atomic_t *), \ | ||
38 | atomic_read((atomic_t *)(key)), *(key)))) \ | ||
39 | goto label; \ | ||
40 | } while (0) | ||
41 | |||
42 | #endif /* HAVE_JUMP_LABEL */ | ||
43 | |||
44 | #endif /* _LINUX_JUMP_LABEL_REF_H */ | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 311b4dc785a1..730b7821690f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -505,7 +505,7 @@ struct perf_guest_info_callbacks { | |||
505 | #include <linux/ftrace.h> | 505 | #include <linux/ftrace.h> |
506 | #include <linux/cpu.h> | 506 | #include <linux/cpu.h> |
507 | #include <linux/irq_work.h> | 507 | #include <linux/irq_work.h> |
508 | #include <linux/jump_label_ref.h> | 508 | #include <linux/jump_label.h> |
509 | #include <asm/atomic.h> | 509 | #include <asm/atomic.h> |
510 | #include <asm/local.h> | 510 | #include <asm/local.h> |
511 | 511 | ||
@@ -1034,7 +1034,7 @@ static inline int is_software_event(struct perf_event *event) | |||
1034 | return event->pmu->task_ctx_nr == perf_sw_context; | 1034 | return event->pmu->task_ctx_nr == perf_sw_context; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1037 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1038 | 1038 | ||
1039 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1039 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
1040 | 1040 | ||
@@ -1063,22 +1063,21 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
1063 | { | 1063 | { |
1064 | struct pt_regs hot_regs; | 1064 | struct pt_regs hot_regs; |
1065 | 1065 | ||
1066 | JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); | 1066 | if (static_branch(&perf_swevent_enabled[event_id])) { |
1067 | return; | 1067 | if (!regs) { |
1068 | 1068 | perf_fetch_caller_regs(&hot_regs); | |
1069 | have_event: | 1069 | regs = &hot_regs; |
1070 | if (!regs) { | 1070 | } |
1071 | perf_fetch_caller_regs(&hot_regs); | 1071 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
1072 | regs = &hot_regs; | ||
1073 | } | 1072 | } |
1074 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
1075 | } | 1073 | } |
1076 | 1074 | ||
1077 | extern atomic_t perf_sched_events; | 1075 | extern struct jump_label_key perf_sched_events; |
1078 | 1076 | ||
1079 | static inline void perf_event_task_sched_in(struct task_struct *task) | 1077 | static inline void perf_event_task_sched_in(struct task_struct *task) |
1080 | { | 1078 | { |
1081 | COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); | 1079 | if (static_branch(&perf_sched_events)) |
1080 | __perf_event_task_sched_in(task); | ||
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | static inline | 1083 | static inline |
@@ -1086,7 +1085,8 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex | |||
1086 | { | 1085 | { |
1087 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | 1086 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1088 | 1087 | ||
1089 | COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next)); | 1088 | if (static_branch(&perf_sched_events)) |
1089 | __perf_event_task_sched_out(task, next); | ||
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1092 | extern void perf_event_mmap(struct vm_area_struct *vma); |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 97c84a58efb8..d530a4460a0b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -29,7 +29,7 @@ struct tracepoint_func { | |||
29 | 29 | ||
30 | struct tracepoint { | 30 | struct tracepoint { |
31 | const char *name; /* Tracepoint name */ | 31 | const char *name; /* Tracepoint name */ |
32 | int state; /* State. */ | 32 | struct jump_label_key key; |
33 | void (*regfunc)(void); | 33 | void (*regfunc)(void); |
34 | void (*unregfunc)(void); | 34 | void (*unregfunc)(void); |
35 | struct tracepoint_func __rcu *funcs; | 35 | struct tracepoint_func __rcu *funcs; |
@@ -146,9 +146,7 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, | |||
146 | extern struct tracepoint __tracepoint_##name; \ | 146 | extern struct tracepoint __tracepoint_##name; \ |
147 | static inline void trace_##name(proto) \ | 147 | static inline void trace_##name(proto) \ |
148 | { \ | 148 | { \ |
149 | JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ | 149 | if (static_branch(&__tracepoint_##name.key)) \ |
150 | return; \ | ||
151 | do_trace: \ | ||
152 | __DO_TRACE(&__tracepoint_##name, \ | 150 | __DO_TRACE(&__tracepoint_##name, \ |
153 | TP_PROTO(data_proto), \ | 151 | TP_PROTO(data_proto), \ |
154 | TP_ARGS(data_args), \ | 152 | TP_ARGS(data_args), \ |
@@ -176,14 +174,14 @@ do_trace: \ | |||
176 | * structures, so we create an array of pointers that will be used for iteration | 174 | * structures, so we create an array of pointers that will be used for iteration |
177 | * on the tracepoints. | 175 | * on the tracepoints. |
178 | */ | 176 | */ |
179 | #define DEFINE_TRACE_FN(name, reg, unreg) \ | 177 | #define DEFINE_TRACE_FN(name, reg, unreg) \ |
180 | static const char __tpstrtab_##name[] \ | 178 | static const char __tpstrtab_##name[] \ |
181 | __attribute__((section("__tracepoints_strings"))) = #name; \ | 179 | __attribute__((section("__tracepoints_strings"))) = #name; \ |
182 | struct tracepoint __tracepoint_##name \ | 180 | struct tracepoint __tracepoint_##name \ |
183 | __attribute__((section("__tracepoints"))) = \ | 181 | __attribute__((section("__tracepoints"))) = \ |
184 | { __tpstrtab_##name, 0, reg, unreg, NULL }; \ | 182 | { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ |
185 | static struct tracepoint * const __tracepoint_ptr_##name __used \ | 183 | static struct tracepoint * const __tracepoint_ptr_##name __used \ |
186 | __attribute__((section("__tracepoints_ptrs"))) = \ | 184 | __attribute__((section("__tracepoints_ptrs"))) = \ |
187 | &__tracepoint_##name; | 185 | &__tracepoint_##name; |
188 | 186 | ||
189 | #define DEFINE_TRACE(name) \ | 187 | #define DEFINE_TRACE(name) \ |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 3b79bd938330..74d1c099fbd1 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -2,43 +2,23 @@ | |||
2 | * jump label support | 2 | * jump label support |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> | 4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
5 | * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> | ||
5 | * | 6 | * |
6 | */ | 7 | */ |
7 | #include <linux/jump_label.h> | ||
8 | #include <linux/memory.h> | 8 | #include <linux/memory.h> |
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/jhash.h> | ||
13 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
14 | #include <linux/sort.h> | 13 | #include <linux/sort.h> |
15 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/jump_label.h> | ||
16 | 16 | ||
17 | #ifdef HAVE_JUMP_LABEL | 17 | #ifdef HAVE_JUMP_LABEL |
18 | 18 | ||
19 | #define JUMP_LABEL_HASH_BITS 6 | ||
20 | #define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) | ||
21 | static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; | ||
22 | |||
23 | /* mutex to protect coming/going of the the jump_label table */ | 19 | /* mutex to protect coming/going of the the jump_label table */ |
24 | static DEFINE_MUTEX(jump_label_mutex); | 20 | static DEFINE_MUTEX(jump_label_mutex); |
25 | 21 | ||
26 | struct jump_label_entry { | ||
27 | struct hlist_node hlist; | ||
28 | struct jump_entry *table; | ||
29 | int nr_entries; | ||
30 | /* hang modules off here */ | ||
31 | struct hlist_head modules; | ||
32 | unsigned long key; | ||
33 | }; | ||
34 | |||
35 | struct jump_label_module_entry { | ||
36 | struct hlist_node hlist; | ||
37 | struct jump_entry *table; | ||
38 | int nr_entries; | ||
39 | struct module *mod; | ||
40 | }; | ||
41 | |||
42 | void jump_label_lock(void) | 22 | void jump_label_lock(void) |
43 | { | 23 | { |
44 | mutex_lock(&jump_label_mutex); | 24 | mutex_lock(&jump_label_mutex); |
@@ -49,6 +29,11 @@ void jump_label_unlock(void) | |||
49 | mutex_unlock(&jump_label_mutex); | 29 | mutex_unlock(&jump_label_mutex); |
50 | } | 30 | } |
51 | 31 | ||
32 | bool jump_label_enabled(struct jump_label_key *key) | ||
33 | { | ||
34 | return !!atomic_read(&key->enabled); | ||
35 | } | ||
36 | |||
52 | static int jump_label_cmp(const void *a, const void *b) | 37 | static int jump_label_cmp(const void *a, const void *b) |
53 | { | 38 | { |
54 | const struct jump_entry *jea = a; | 39 | const struct jump_entry *jea = a; |
@@ -64,7 +49,7 @@ static int jump_label_cmp(const void *a, const void *b) | |||
64 | } | 49 | } |
65 | 50 | ||
66 | static void | 51 | static void |
67 | sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | 52 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
68 | { | 53 | { |
69 | unsigned long size; | 54 | unsigned long size; |
70 | 55 | ||
@@ -73,118 +58,25 @@ sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | |||
73 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); | 58 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
74 | } | 59 | } |
75 | 60 | ||
76 | static struct jump_label_entry *get_jump_label_entry(jump_label_t key) | 61 | static void jump_label_update(struct jump_label_key *key, int enable); |
77 | { | ||
78 | struct hlist_head *head; | ||
79 | struct hlist_node *node; | ||
80 | struct jump_label_entry *e; | ||
81 | u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
82 | |||
83 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
84 | hlist_for_each_entry(e, node, head, hlist) { | ||
85 | if (key == e->key) | ||
86 | return e; | ||
87 | } | ||
88 | return NULL; | ||
89 | } | ||
90 | 62 | ||
91 | static struct jump_label_entry * | 63 | void jump_label_inc(struct jump_label_key *key) |
92 | add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) | ||
93 | { | 64 | { |
94 | struct hlist_head *head; | 65 | if (atomic_inc_not_zero(&key->enabled)) |
95 | struct jump_label_entry *e; | 66 | return; |
96 | u32 hash; | ||
97 | |||
98 | e = get_jump_label_entry(key); | ||
99 | if (e) | ||
100 | return ERR_PTR(-EEXIST); | ||
101 | |||
102 | e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); | ||
103 | if (!e) | ||
104 | return ERR_PTR(-ENOMEM); | ||
105 | |||
106 | hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
107 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
108 | e->key = key; | ||
109 | e->table = table; | ||
110 | e->nr_entries = nr_entries; | ||
111 | INIT_HLIST_HEAD(&(e->modules)); | ||
112 | hlist_add_head(&e->hlist, head); | ||
113 | return e; | ||
114 | } | ||
115 | 67 | ||
116 | static int | 68 | jump_label_lock(); |
117 | build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) | 69 | if (atomic_add_return(1, &key->enabled) == 1) |
118 | { | 70 | jump_label_update(key, JUMP_LABEL_ENABLE); |
119 | struct jump_entry *iter, *iter_begin; | 71 | jump_label_unlock(); |
120 | struct jump_label_entry *entry; | ||
121 | int count; | ||
122 | |||
123 | sort_jump_label_entries(start, stop); | ||
124 | iter = start; | ||
125 | while (iter < stop) { | ||
126 | entry = get_jump_label_entry(iter->key); | ||
127 | if (!entry) { | ||
128 | iter_begin = iter; | ||
129 | count = 0; | ||
130 | while ((iter < stop) && | ||
131 | (iter->key == iter_begin->key)) { | ||
132 | iter++; | ||
133 | count++; | ||
134 | } | ||
135 | entry = add_jump_label_entry(iter_begin->key, | ||
136 | count, iter_begin); | ||
137 | if (IS_ERR(entry)) | ||
138 | return PTR_ERR(entry); | ||
139 | } else { | ||
140 | WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); | ||
141 | return -1; | ||
142 | } | ||
143 | } | ||
144 | return 0; | ||
145 | } | 72 | } |
146 | 73 | ||
147 | /*** | 74 | void jump_label_dec(struct jump_label_key *key) |
148 | * jump_label_update - update jump label text | ||
149 | * @key - key value associated with a a jump label | ||
150 | * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE | ||
151 | * | ||
152 | * Will enable/disable the jump for jump label @key, depending on the | ||
153 | * value of @type. | ||
154 | * | ||
155 | */ | ||
156 | |||
157 | void jump_label_update(unsigned long key, enum jump_label_type type) | ||
158 | { | 75 | { |
159 | struct jump_entry *iter; | 76 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) |
160 | struct jump_label_entry *entry; | 77 | return; |
161 | struct hlist_node *module_node; | ||
162 | struct jump_label_module_entry *e_module; | ||
163 | int count; | ||
164 | 78 | ||
165 | jump_label_lock(); | 79 | jump_label_update(key, JUMP_LABEL_DISABLE); |
166 | entry = get_jump_label_entry((jump_label_t)key); | ||
167 | if (entry) { | ||
168 | count = entry->nr_entries; | ||
169 | iter = entry->table; | ||
170 | while (count--) { | ||
171 | if (kernel_text_address(iter->code)) | ||
172 | arch_jump_label_transform(iter, type); | ||
173 | iter++; | ||
174 | } | ||
175 | /* eanble/disable jump labels in modules */ | ||
176 | hlist_for_each_entry(e_module, module_node, &(entry->modules), | ||
177 | hlist) { | ||
178 | count = e_module->nr_entries; | ||
179 | iter = e_module->table; | ||
180 | while (count--) { | ||
181 | if (iter->key && | ||
182 | kernel_text_address(iter->code)) | ||
183 | arch_jump_label_transform(iter, type); | ||
184 | iter++; | ||
185 | } | ||
186 | } | ||
187 | } | ||
188 | jump_label_unlock(); | 80 | jump_label_unlock(); |
189 | } | 81 | } |
190 | 82 | ||
@@ -197,77 +89,33 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end) | |||
197 | return 0; | 89 | return 0; |
198 | } | 90 | } |
199 | 91 | ||
200 | #ifdef CONFIG_MODULES | 92 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
201 | 93 | struct jump_entry *iter_stop, void *start, void *end) | |
202 | static int module_conflict(void *start, void *end) | ||
203 | { | 94 | { |
204 | struct hlist_head *head; | ||
205 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | ||
206 | struct jump_label_entry *e; | ||
207 | struct jump_label_module_entry *e_module; | ||
208 | struct jump_entry *iter; | 95 | struct jump_entry *iter; |
209 | int i, count; | ||
210 | int conflict = 0; | ||
211 | |||
212 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | ||
213 | head = &jump_label_table[i]; | ||
214 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | ||
215 | hlist_for_each_entry_safe(e_module, module_node, | ||
216 | module_node_next, | ||
217 | &(e->modules), hlist) { | ||
218 | count = e_module->nr_entries; | ||
219 | iter = e_module->table; | ||
220 | while (count--) { | ||
221 | if (addr_conflict(iter, start, end)) { | ||
222 | conflict = 1; | ||
223 | goto out; | ||
224 | } | ||
225 | iter++; | ||
226 | } | ||
227 | } | ||
228 | } | ||
229 | } | ||
230 | out: | ||
231 | return conflict; | ||
232 | } | ||
233 | |||
234 | #endif | ||
235 | |||
236 | /*** | ||
237 | * jump_label_text_reserved - check if addr range is reserved | ||
238 | * @start: start text addr | ||
239 | * @end: end text addr | ||
240 | * | ||
241 | * checks if the text addr located between @start and @end | ||
242 | * overlaps with any of the jump label patch addresses. Code | ||
243 | * that wants to modify kernel text should first verify that | ||
244 | * it does not overlap with any of the jump label addresses. | ||
245 | * Caller must hold jump_label_mutex. | ||
246 | * | ||
247 | * returns 1 if there is an overlap, 0 otherwise | ||
248 | */ | ||
249 | int jump_label_text_reserved(void *start, void *end) | ||
250 | { | ||
251 | struct jump_entry *iter; | ||
252 | struct jump_entry *iter_start = __start___jump_table; | ||
253 | struct jump_entry *iter_stop = __start___jump_table; | ||
254 | int conflict = 0; | ||
255 | 96 | ||
256 | iter = iter_start; | 97 | iter = iter_start; |
257 | while (iter < iter_stop) { | 98 | while (iter < iter_stop) { |
258 | if (addr_conflict(iter, start, end)) { | 99 | if (addr_conflict(iter, start, end)) |
259 | conflict = 1; | 100 | return 1; |
260 | goto out; | ||
261 | } | ||
262 | iter++; | 101 | iter++; |
263 | } | 102 | } |
264 | 103 | ||
265 | /* now check modules */ | 104 | return 0; |
266 | #ifdef CONFIG_MODULES | 105 | } |
267 | conflict = module_conflict(start, end); | 106 | |
268 | #endif | 107 | static void __jump_label_update(struct jump_label_key *key, |
269 | out: | 108 | struct jump_entry *entry, int enable) |
270 | return conflict; | 109 | { |
110 | for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { | ||
111 | /* | ||
112 | * entry->code set to 0 invalidates module init text sections | ||
113 | * kernel_text_address() verifies we are not in core kernel | ||
114 | * init code, see jump_label_invalidate_module_init(). | ||
115 | */ | ||
116 | if (entry->code && kernel_text_address(entry->code)) | ||
117 | arch_jump_label_transform(entry, enable); | ||
118 | } | ||
271 | } | 119 | } |
272 | 120 | ||
273 | /* | 121 | /* |
@@ -277,142 +125,173 @@ void __weak arch_jump_label_text_poke_early(jump_label_t addr) | |||
277 | { | 125 | { |
278 | } | 126 | } |
279 | 127 | ||
280 | static __init int init_jump_label(void) | 128 | static __init int jump_label_init(void) |
281 | { | 129 | { |
282 | int ret; | ||
283 | struct jump_entry *iter_start = __start___jump_table; | 130 | struct jump_entry *iter_start = __start___jump_table; |
284 | struct jump_entry *iter_stop = __stop___jump_table; | 131 | struct jump_entry *iter_stop = __stop___jump_table; |
132 | struct jump_label_key *key = NULL; | ||
285 | struct jump_entry *iter; | 133 | struct jump_entry *iter; |
286 | 134 | ||
287 | jump_label_lock(); | 135 | jump_label_lock(); |
288 | ret = build_jump_label_hashtable(__start___jump_table, | 136 | jump_label_sort_entries(iter_start, iter_stop); |
289 | __stop___jump_table); | 137 | |
290 | iter = iter_start; | 138 | for (iter = iter_start; iter < iter_stop; iter++) { |
291 | while (iter < iter_stop) { | ||
292 | arch_jump_label_text_poke_early(iter->code); | 139 | arch_jump_label_text_poke_early(iter->code); |
293 | iter++; | 140 | if (iter->key == (jump_label_t)(unsigned long)key) |
141 | continue; | ||
142 | |||
143 | key = (struct jump_label_key *)(unsigned long)iter->key; | ||
144 | atomic_set(&key->enabled, 0); | ||
145 | key->entries = iter; | ||
146 | #ifdef CONFIG_MODULES | ||
147 | key->next = NULL; | ||
148 | #endif | ||
294 | } | 149 | } |
295 | jump_label_unlock(); | 150 | jump_label_unlock(); |
296 | return ret; | 151 | |
152 | return 0; | ||
297 | } | 153 | } |
298 | early_initcall(init_jump_label); | 154 | early_initcall(jump_label_init); |
299 | 155 | ||
300 | #ifdef CONFIG_MODULES | 156 | #ifdef CONFIG_MODULES |
301 | 157 | ||
302 | static struct jump_label_module_entry * | 158 | struct jump_label_mod { |
303 | add_jump_label_module_entry(struct jump_label_entry *entry, | 159 | struct jump_label_mod *next; |
304 | struct jump_entry *iter_begin, | 160 | struct jump_entry *entries; |
305 | int count, struct module *mod) | 161 | struct module *mod; |
162 | }; | ||
163 | |||
164 | static int __jump_label_mod_text_reserved(void *start, void *end) | ||
165 | { | ||
166 | struct module *mod; | ||
167 | |||
168 | mod = __module_text_address((unsigned long)start); | ||
169 | if (!mod) | ||
170 | return 0; | ||
171 | |||
172 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); | ||
173 | |||
174 | return __jump_label_text_reserved(mod->jump_entries, | ||
175 | mod->jump_entries + mod->num_jump_entries, | ||
176 | start, end); | ||
177 | } | ||
178 | |||
179 | static void __jump_label_mod_update(struct jump_label_key *key, int enable) | ||
180 | { | ||
181 | struct jump_label_mod *mod = key->next; | ||
182 | |||
183 | while (mod) { | ||
184 | __jump_label_update(key, mod->entries, enable); | ||
185 | mod = mod->next; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /*** | ||
190 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
191 | * @mod: module to patch | ||
192 | * | ||
193 | * Allow for run-time selection of the optimal nops. Before the module | ||
194 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
195 | * the arch specific jump label code. | ||
196 | */ | ||
197 | void jump_label_apply_nops(struct module *mod) | ||
306 | { | 198 | { |
307 | struct jump_label_module_entry *e; | 199 | struct jump_entry *iter_start = mod->jump_entries; |
308 | 200 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | |
309 | e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); | 201 | struct jump_entry *iter; |
310 | if (!e) | 202 | |
311 | return ERR_PTR(-ENOMEM); | 203 | /* if the module doesn't have jump label entries, just return */ |
312 | e->mod = mod; | 204 | if (iter_start == iter_stop) |
313 | e->nr_entries = count; | 205 | return; |
314 | e->table = iter_begin; | 206 | |
315 | hlist_add_head(&e->hlist, &entry->modules); | 207 | for (iter = iter_start; iter < iter_stop; iter++) |
316 | return e; | 208 | arch_jump_label_text_poke_early(iter->code); |
317 | } | 209 | } |
318 | 210 | ||
319 | static int add_jump_label_module(struct module *mod) | 211 | static int jump_label_add_module(struct module *mod) |
320 | { | 212 | { |
321 | struct jump_entry *iter, *iter_begin; | 213 | struct jump_entry *iter_start = mod->jump_entries; |
322 | struct jump_label_entry *entry; | 214 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
323 | struct jump_label_module_entry *module_entry; | 215 | struct jump_entry *iter; |
324 | int count; | 216 | struct jump_label_key *key = NULL; |
217 | struct jump_label_mod *jlm; | ||
325 | 218 | ||
326 | /* if the module doesn't have jump label entries, just return */ | 219 | /* if the module doesn't have jump label entries, just return */ |
327 | if (!mod->num_jump_entries) | 220 | if (iter_start == iter_stop) |
328 | return 0; | 221 | return 0; |
329 | 222 | ||
330 | sort_jump_label_entries(mod->jump_entries, | 223 | jump_label_sort_entries(iter_start, iter_stop); |
331 | mod->jump_entries + mod->num_jump_entries); | 224 | |
332 | iter = mod->jump_entries; | 225 | for (iter = iter_start; iter < iter_stop; iter++) { |
333 | while (iter < mod->jump_entries + mod->num_jump_entries) { | 226 | if (iter->key == (jump_label_t)(unsigned long)key) |
334 | entry = get_jump_label_entry(iter->key); | 227 | continue; |
335 | iter_begin = iter; | 228 | |
336 | count = 0; | 229 | key = (struct jump_label_key *)(unsigned long)iter->key; |
337 | while ((iter < mod->jump_entries + mod->num_jump_entries) && | 230 | |
338 | (iter->key == iter_begin->key)) { | 231 | if (__module_address(iter->key) == mod) { |
339 | iter++; | 232 | atomic_set(&key->enabled, 0); |
340 | count++; | 233 | key->entries = iter; |
341 | } | 234 | key->next = NULL; |
342 | if (!entry) { | 235 | continue; |
343 | entry = add_jump_label_entry(iter_begin->key, 0, NULL); | ||
344 | if (IS_ERR(entry)) | ||
345 | return PTR_ERR(entry); | ||
346 | } | 236 | } |
347 | module_entry = add_jump_label_module_entry(entry, iter_begin, | 237 | |
348 | count, mod); | 238 | jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); |
349 | if (IS_ERR(module_entry)) | 239 | if (!jlm) |
350 | return PTR_ERR(module_entry); | 240 | return -ENOMEM; |
241 | |||
242 | jlm->mod = mod; | ||
243 | jlm->entries = iter; | ||
244 | jlm->next = key->next; | ||
245 | key->next = jlm; | ||
246 | |||
247 | if (jump_label_enabled(key)) | ||
248 | __jump_label_update(key, iter, JUMP_LABEL_ENABLE); | ||
351 | } | 249 | } |
250 | |||
352 | return 0; | 251 | return 0; |
353 | } | 252 | } |
354 | 253 | ||
355 | static void remove_jump_label_module(struct module *mod) | 254 | static void jump_label_del_module(struct module *mod) |
356 | { | 255 | { |
357 | struct hlist_head *head; | 256 | struct jump_entry *iter_start = mod->jump_entries; |
358 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | 257 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
359 | struct jump_label_entry *e; | 258 | struct jump_entry *iter; |
360 | struct jump_label_module_entry *e_module; | 259 | struct jump_label_key *key = NULL; |
361 | int i; | 260 | struct jump_label_mod *jlm, **prev; |
362 | 261 | ||
363 | /* if the module doesn't have jump label entries, just return */ | 262 | for (iter = iter_start; iter < iter_stop; iter++) { |
364 | if (!mod->num_jump_entries) | 263 | if (iter->key == (jump_label_t)(unsigned long)key) |
365 | return; | 264 | continue; |
265 | |||
266 | key = (struct jump_label_key *)(unsigned long)iter->key; | ||
267 | |||
268 | if (__module_address(iter->key) == mod) | ||
269 | continue; | ||
270 | |||
271 | prev = &key->next; | ||
272 | jlm = key->next; | ||
366 | 273 | ||
367 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | 274 | while (jlm && jlm->mod != mod) { |
368 | head = &jump_label_table[i]; | 275 | prev = &jlm->next; |
369 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | 276 | jlm = jlm->next; |
370 | hlist_for_each_entry_safe(e_module, module_node, | 277 | } |
371 | module_node_next, | 278 | |
372 | &(e->modules), hlist) { | 279 | if (jlm) { |
373 | if (e_module->mod == mod) { | 280 | *prev = jlm->next; |
374 | hlist_del(&e_module->hlist); | 281 | kfree(jlm); |
375 | kfree(e_module); | ||
376 | } | ||
377 | } | ||
378 | if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { | ||
379 | hlist_del(&e->hlist); | ||
380 | kfree(e); | ||
381 | } | ||
382 | } | 282 | } |
383 | } | 283 | } |
384 | } | 284 | } |
385 | 285 | ||
386 | static void remove_jump_label_module_init(struct module *mod) | 286 | static void jump_label_invalidate_module_init(struct module *mod) |
387 | { | 287 | { |
388 | struct hlist_head *head; | 288 | struct jump_entry *iter_start = mod->jump_entries; |
389 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | 289 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
390 | struct jump_label_entry *e; | ||
391 | struct jump_label_module_entry *e_module; | ||
392 | struct jump_entry *iter; | 290 | struct jump_entry *iter; |
393 | int i, count; | ||
394 | |||
395 | /* if the module doesn't have jump label entries, just return */ | ||
396 | if (!mod->num_jump_entries) | ||
397 | return; | ||
398 | 291 | ||
399 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | 292 | for (iter = iter_start; iter < iter_stop; iter++) { |
400 | head = &jump_label_table[i]; | 293 | if (within_module_init(iter->code, mod)) |
401 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | 294 | iter->code = 0; |
402 | hlist_for_each_entry_safe(e_module, module_node, | ||
403 | module_node_next, | ||
404 | &(e->modules), hlist) { | ||
405 | if (e_module->mod != mod) | ||
406 | continue; | ||
407 | count = e_module->nr_entries; | ||
408 | iter = e_module->table; | ||
409 | while (count--) { | ||
410 | if (within_module_init(iter->code, mod)) | ||
411 | iter->key = 0; | ||
412 | iter++; | ||
413 | } | ||
414 | } | ||
415 | } | ||
416 | } | 295 | } |
417 | } | 296 | } |
418 | 297 | ||
@@ -426,59 +305,77 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, | |||
426 | switch (val) { | 305 | switch (val) { |
427 | case MODULE_STATE_COMING: | 306 | case MODULE_STATE_COMING: |
428 | jump_label_lock(); | 307 | jump_label_lock(); |
429 | ret = add_jump_label_module(mod); | 308 | ret = jump_label_add_module(mod); |
430 | if (ret) | 309 | if (ret) |
431 | remove_jump_label_module(mod); | 310 | jump_label_del_module(mod); |
432 | jump_label_unlock(); | 311 | jump_label_unlock(); |
433 | break; | 312 | break; |
434 | case MODULE_STATE_GOING: | 313 | case MODULE_STATE_GOING: |
435 | jump_label_lock(); | 314 | jump_label_lock(); |
436 | remove_jump_label_module(mod); | 315 | jump_label_del_module(mod); |
437 | jump_label_unlock(); | 316 | jump_label_unlock(); |
438 | break; | 317 | break; |
439 | case MODULE_STATE_LIVE: | 318 | case MODULE_STATE_LIVE: |
440 | jump_label_lock(); | 319 | jump_label_lock(); |
441 | remove_jump_label_module_init(mod); | 320 | jump_label_invalidate_module_init(mod); |
442 | jump_label_unlock(); | 321 | jump_label_unlock(); |
443 | break; | 322 | break; |
444 | } | 323 | } |
445 | return ret; | ||
446 | } | ||
447 | 324 | ||
448 | /*** | 325 | return notifier_from_errno(ret); |
449 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
450 | * @mod: module to patch | ||
451 | * | ||
452 | * Allow for run-time selection of the optimal nops. Before the module | ||
453 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
454 | * the arch specific jump label code. | ||
455 | */ | ||
456 | void jump_label_apply_nops(struct module *mod) | ||
457 | { | ||
458 | struct jump_entry *iter; | ||
459 | |||
460 | /* if the module doesn't have jump label entries, just return */ | ||
461 | if (!mod->num_jump_entries) | ||
462 | return; | ||
463 | |||
464 | iter = mod->jump_entries; | ||
465 | while (iter < mod->jump_entries + mod->num_jump_entries) { | ||
466 | arch_jump_label_text_poke_early(iter->code); | ||
467 | iter++; | ||
468 | } | ||
469 | } | 326 | } |
470 | 327 | ||
471 | struct notifier_block jump_label_module_nb = { | 328 | struct notifier_block jump_label_module_nb = { |
472 | .notifier_call = jump_label_module_notify, | 329 | .notifier_call = jump_label_module_notify, |
473 | .priority = 0, | 330 | .priority = 1, /* higher than tracepoints */ |
474 | }; | 331 | }; |
475 | 332 | ||
476 | static __init int init_jump_label_module(void) | 333 | static __init int jump_label_init_module(void) |
477 | { | 334 | { |
478 | return register_module_notifier(&jump_label_module_nb); | 335 | return register_module_notifier(&jump_label_module_nb); |
479 | } | 336 | } |
480 | early_initcall(init_jump_label_module); | 337 | early_initcall(jump_label_init_module); |
481 | 338 | ||
482 | #endif /* CONFIG_MODULES */ | 339 | #endif /* CONFIG_MODULES */ |
483 | 340 | ||
341 | /*** | ||
342 | * jump_label_text_reserved - check if addr range is reserved | ||
343 | * @start: start text addr | ||
344 | * @end: end text addr | ||
345 | * | ||
346 | * checks if the text addr located between @start and @end | ||
347 | * overlaps with any of the jump label patch addresses. Code | ||
348 | * that wants to modify kernel text should first verify that | ||
349 | * it does not overlap with any of the jump label addresses. | ||
350 | * Caller must hold jump_label_mutex. | ||
351 | * | ||
352 | * returns 1 if there is an overlap, 0 otherwise | ||
353 | */ | ||
354 | int jump_label_text_reserved(void *start, void *end) | ||
355 | { | ||
356 | int ret = __jump_label_text_reserved(__start___jump_table, | ||
357 | __stop___jump_table, start, end); | ||
358 | |||
359 | if (ret) | ||
360 | return ret; | ||
361 | |||
362 | #ifdef CONFIG_MODULES | ||
363 | ret = __jump_label_mod_text_reserved(start, end); | ||
364 | #endif | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static void jump_label_update(struct jump_label_key *key, int enable) | ||
369 | { | ||
370 | struct jump_entry *entry = key->entries; | ||
371 | |||
372 | /* if there are no users, entry can be NULL */ | ||
373 | if (entry) | ||
374 | __jump_label_update(key, entry, enable); | ||
375 | |||
376 | #ifdef CONFIG_MODULES | ||
377 | __jump_label_mod_update(key, enable); | ||
378 | #endif | ||
379 | } | ||
380 | |||
484 | #endif | 381 | #endif |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c75925c4d1e2..d665e92fbd44 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -125,7 +125,7 @@ enum event_type_t { | |||
125 | * perf_sched_events : >0 events exist | 125 | * perf_sched_events : >0 events exist |
126 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu | 126 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
127 | */ | 127 | */ |
128 | atomic_t perf_sched_events __read_mostly; | 128 | struct jump_label_key perf_sched_events __read_mostly; |
129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); | 129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
130 | 130 | ||
131 | static atomic_t nr_mmap_events __read_mostly; | 131 | static atomic_t nr_mmap_events __read_mostly; |
@@ -5417,7 +5417,7 @@ fail: | |||
5417 | return err; | 5417 | return err; |
5418 | } | 5418 | } |
5419 | 5419 | ||
5420 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 5420 | struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
5421 | 5421 | ||
5422 | static void sw_perf_event_destroy(struct perf_event *event) | 5422 | static void sw_perf_event_destroy(struct perf_event *event) |
5423 | { | 5423 | { |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 68187af4889e..b219f1449c54 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
251 | { | 251 | { |
252 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | 252 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); |
253 | 253 | ||
254 | if (elem->regfunc && !elem->state && active) | 254 | if (elem->regfunc && !jump_label_enabled(&elem->key) && active) |
255 | elem->regfunc(); | 255 | elem->regfunc(); |
256 | else if (elem->unregfunc && elem->state && !active) | 256 | else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) |
257 | elem->unregfunc(); | 257 | elem->unregfunc(); |
258 | 258 | ||
259 | /* | 259 | /* |
@@ -264,13 +264,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
264 | * is used. | 264 | * is used. |
265 | */ | 265 | */ |
266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); |
267 | if (!elem->state && active) { | 267 | if (active && !jump_label_enabled(&elem->key)) |
268 | jump_label_enable(&elem->state); | 268 | jump_label_inc(&elem->key); |
269 | elem->state = active; | 269 | else if (!active && jump_label_enabled(&elem->key)) |
270 | } else if (elem->state && !active) { | 270 | jump_label_dec(&elem->key); |
271 | jump_label_disable(&elem->state); | ||
272 | elem->state = active; | ||
273 | } | ||
274 | } | 271 | } |
275 | 272 | ||
276 | /* | 273 | /* |
@@ -281,13 +278,11 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
281 | */ | 278 | */ |
282 | static void disable_tracepoint(struct tracepoint *elem) | 279 | static void disable_tracepoint(struct tracepoint *elem) |
283 | { | 280 | { |
284 | if (elem->unregfunc && elem->state) | 281 | if (elem->unregfunc && jump_label_enabled(&elem->key)) |
285 | elem->unregfunc(); | 282 | elem->unregfunc(); |
286 | 283 | ||
287 | if (elem->state) { | 284 | if (jump_label_enabled(&elem->key)) |
288 | jump_label_disable(&elem->state); | 285 | jump_label_dec(&elem->key); |
289 | elem->state = 0; | ||
290 | } | ||
291 | rcu_assign_pointer(elem->funcs, NULL); | 286 | rcu_assign_pointer(elem->funcs, NULL); |
292 | } | 287 | } |
293 | 288 | ||