diff options
-rw-r--r-- | arch/mips/include/asm/jump_label.h | 22 | ||||
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/jump_label.h | 37 | ||||
-rw-r--r-- | arch/s390/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/kernel/jump_label.c | 59 | ||||
-rw-r--r-- | arch/sparc/include/asm/jump_label.h | 25 | ||||
-rw-r--r-- | arch/x86/include/asm/alternative.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/jump_label.h | 27 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/module.c | 1 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 14 | ||||
-rw-r--r-- | include/linux/dynamic_debug.h | 2 | ||||
-rw-r--r-- | include/linux/jump_label.h | 89 | ||||
-rw-r--r-- | include/linux/jump_label_ref.h | 44 | ||||
-rw-r--r-- | include/linux/perf_event.h | 23 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 22 | ||||
-rw-r--r-- | kernel/jump_label.c | 539 | ||||
-rw-r--r-- | kernel/perf_event.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 120 | ||||
-rw-r--r-- | kernel/tracepoint.c | 23 |
22 files changed, 576 insertions, 501 deletions
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 7622ccf75076..1881b316ca45 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h | |||
@@ -20,16 +20,18 @@ | |||
20 | #define WORD_INSN ".word" | 20 | #define WORD_INSN ".word" |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define JUMP_LABEL(key, label) \ | 23 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
24 | do { \ | 24 | { |
25 | asm goto("1:\tnop\n\t" \ | 25 | asm goto("1:\tnop\n\t" |
26 | "nop\n\t" \ | 26 | "nop\n\t" |
27 | ".pushsection __jump_table, \"a\"\n\t" \ | 27 | ".pushsection __jump_table, \"aw\"\n\t" |
28 | WORD_INSN " 1b, %l[" #label "], %0\n\t" \ | 28 | WORD_INSN " 1b, %l[l_yes], %0\n\t" |
29 | ".popsection\n\t" \ | 29 | ".popsection\n\t" |
30 | : : "i" (key) : : label); \ | 30 | : : "i" (key) : : l_yes); |
31 | } while (0) | 31 | return false; |
32 | 32 | l_yes: | |
33 | return true; | ||
34 | } | ||
33 | 35 | ||
34 | #endif /* __KERNEL__ */ | 36 | #endif /* __KERNEL__ */ |
35 | 37 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2508a6f31588..4a7f14079e03 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -88,6 +88,7 @@ config S390 | |||
88 | select HAVE_KERNEL_XZ | 88 | select HAVE_KERNEL_XZ |
89 | select HAVE_GET_USER_PAGES_FAST | 89 | select HAVE_GET_USER_PAGES_FAST |
90 | select HAVE_ARCH_MUTEX_CPU_RELAX | 90 | select HAVE_ARCH_MUTEX_CPU_RELAX |
91 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 | ||
91 | select ARCH_INLINE_SPIN_TRYLOCK | 92 | select ARCH_INLINE_SPIN_TRYLOCK |
92 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 93 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
93 | select ARCH_INLINE_SPIN_LOCK | 94 | select ARCH_INLINE_SPIN_LOCK |
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h new file mode 100644 index 000000000000..95a6cf2b5b67 --- /dev/null +++ b/arch/s390/include/asm/jump_label.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef _ASM_S390_JUMP_LABEL_H | ||
2 | #define _ASM_S390_JUMP_LABEL_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | #define JUMP_LABEL_NOP_SIZE 6 | ||
7 | |||
8 | #ifdef CONFIG_64BIT | ||
9 | #define ASM_PTR ".quad" | ||
10 | #define ASM_ALIGN ".balign 8" | ||
11 | #else | ||
12 | #define ASM_PTR ".long" | ||
13 | #define ASM_ALIGN ".balign 4" | ||
14 | #endif | ||
15 | |||
16 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | ||
17 | { | ||
18 | asm goto("0: brcl 0,0\n" | ||
19 | ".pushsection __jump_table, \"aw\"\n" | ||
20 | ASM_ALIGN "\n" | ||
21 | ASM_PTR " 0b, %l[label], %0\n" | ||
22 | ".popsection\n" | ||
23 | : : "X" (key) : : label); | ||
24 | return false; | ||
25 | label: | ||
26 | return true; | ||
27 | } | ||
28 | |||
29 | typedef unsigned long jump_label_t; | ||
30 | |||
31 | struct jump_entry { | ||
32 | jump_label_t code; | ||
33 | jump_label_t target; | ||
34 | jump_label_t key; | ||
35 | }; | ||
36 | |||
37 | #endif | ||
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 64230bc392fa..5ff15dacb571 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | |||
23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ | 23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ |
24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
25 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ | 25 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ |
26 | vdso.o vtime.o sysinfo.o nmi.o sclp.o | 26 | vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o |
27 | 27 | ||
28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c new file mode 100644 index 000000000000..44cc06bedf77 --- /dev/null +++ b/arch/s390/kernel/jump_label.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Jump label s390 support | ||
3 | * | ||
4 | * Copyright IBM Corp. 2011 | ||
5 | * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/uaccess.h> | ||
9 | #include <linux/stop_machine.h> | ||
10 | #include <linux/jump_label.h> | ||
11 | #include <asm/ipl.h> | ||
12 | |||
13 | #ifdef HAVE_JUMP_LABEL | ||
14 | |||
15 | struct insn { | ||
16 | u16 opcode; | ||
17 | s32 offset; | ||
18 | } __packed; | ||
19 | |||
20 | struct insn_args { | ||
21 | unsigned long *target; | ||
22 | struct insn *insn; | ||
23 | ssize_t size; | ||
24 | }; | ||
25 | |||
26 | static int __arch_jump_label_transform(void *data) | ||
27 | { | ||
28 | struct insn_args *args = data; | ||
29 | int rc; | ||
30 | |||
31 | rc = probe_kernel_write(args->target, args->insn, args->size); | ||
32 | WARN_ON_ONCE(rc < 0); | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | void arch_jump_label_transform(struct jump_entry *entry, | ||
37 | enum jump_label_type type) | ||
38 | { | ||
39 | struct insn_args args; | ||
40 | struct insn insn; | ||
41 | |||
42 | if (type == JUMP_LABEL_ENABLE) { | ||
43 | /* brcl 15,offset */ | ||
44 | insn.opcode = 0xc0f4; | ||
45 | insn.offset = (entry->target - entry->code) >> 1; | ||
46 | } else { | ||
47 | /* brcl 0,0 */ | ||
48 | insn.opcode = 0xc004; | ||
49 | insn.offset = 0; | ||
50 | } | ||
51 | |||
52 | args.target = (void *) entry->code; | ||
53 | args.insn = &insn; | ||
54 | args.size = JUMP_LABEL_NOP_SIZE; | ||
55 | |||
56 | stop_machine(__arch_jump_label_transform, &args, NULL); | ||
57 | } | ||
58 | |||
59 | #endif | ||
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index 427d4684e0d2..fc73a82366f8 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h | |||
@@ -7,17 +7,20 @@ | |||
7 | 7 | ||
8 | #define JUMP_LABEL_NOP_SIZE 4 | 8 | #define JUMP_LABEL_NOP_SIZE 4 |
9 | 9 | ||
10 | #define JUMP_LABEL(key, label) \ | 10 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
11 | do { \ | 11 | { |
12 | asm goto("1:\n\t" \ | 12 | asm goto("1:\n\t" |
13 | "nop\n\t" \ | 13 | "nop\n\t" |
14 | "nop\n\t" \ | 14 | "nop\n\t" |
15 | ".pushsection __jump_table, \"a\"\n\t"\ | 15 | ".pushsection __jump_table, \"aw\"\n\t" |
16 | ".align 4\n\t" \ | 16 | ".align 4\n\t" |
17 | ".word 1b, %l[" #label "], %c0\n\t" \ | 17 | ".word 1b, %l[l_yes], %c0\n\t" |
18 | ".popsection \n\t" \ | 18 | ".popsection \n\t" |
19 | : : "i" (key) : : label);\ | 19 | : : "i" (key) : : l_yes); |
20 | } while (0) | 20 | return false; |
21 | l_yes: | ||
22 | return true; | ||
23 | } | ||
21 | 24 | ||
22 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
23 | 26 | ||
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 13009d1af99a..8cdd1e247975 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include <linux/jump_label.h> | ||
8 | #include <asm/asm.h> | 7 | #include <asm/asm.h> |
9 | 8 | ||
10 | /* | 9 | /* |
@@ -191,7 +190,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); | |||
191 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | 190 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); |
192 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); | 191 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); |
193 | 192 | ||
194 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | 193 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) |
195 | #define IDEAL_NOP_SIZE_5 5 | 194 | #define IDEAL_NOP_SIZE_5 5 |
196 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; | 195 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; |
197 | extern void arch_init_ideal_nop5(void); | 196 | extern void arch_init_ideal_nop5(void); |
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 574dbc22893a..a32b18ce6ead 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h | |||
@@ -5,20 +5,25 @@ | |||
5 | 5 | ||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <asm/nops.h> | 7 | #include <asm/nops.h> |
8 | #include <asm/asm.h> | ||
8 | 9 | ||
9 | #define JUMP_LABEL_NOP_SIZE 5 | 10 | #define JUMP_LABEL_NOP_SIZE 5 |
10 | 11 | ||
11 | # define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" | 12 | #define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" |
12 | 13 | ||
13 | # define JUMP_LABEL(key, label) \ | 14 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
14 | do { \ | 15 | { |
15 | asm goto("1:" \ | 16 | asm goto("1:" |
16 | JUMP_LABEL_INITIAL_NOP \ | 17 | JUMP_LABEL_INITIAL_NOP |
17 | ".pushsection __jump_table, \"aw\" \n\t"\ | 18 | ".pushsection __jump_table, \"aw\" \n\t" |
18 | _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ | 19 | _ASM_ALIGN "\n\t" |
19 | ".popsection \n\t" \ | 20 | _ASM_PTR "1b, %l[l_yes], %c0 \n\t" |
20 | : : "i" (key) : : label); \ | 21 | ".popsection \n\t" |
21 | } while (0) | 22 | : : "i" (key) : : l_yes); |
23 | return false; | ||
24 | l_yes: | ||
25 | return true; | ||
26 | } | ||
22 | 27 | ||
23 | #endif /* __KERNEL__ */ | 28 | #endif /* __KERNEL__ */ |
24 | 29 | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 4a234677e213..651454b0c811 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -679,7 +679,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) | |||
679 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | 679 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); |
680 | } | 680 | } |
681 | 681 | ||
682 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | 682 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) |
683 | 683 | ||
684 | #ifdef CONFIG_X86_64 | 684 | #ifdef CONFIG_X86_64 |
685 | unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; | 685 | unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index ab23f1ad4bf1..52f256f2cc81 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/gfp.h> | 26 | #include <linux/gfp.h> |
27 | #include <linux/jump_label.h> | ||
27 | 28 | ||
28 | #include <asm/system.h> | 29 | #include <asm/system.h> |
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index bd297a20ab98..75a8692d144f 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -170,6 +170,10 @@ | |||
170 | STRUCT_ALIGN(); \ | 170 | STRUCT_ALIGN(); \ |
171 | *(__tracepoints) \ | 171 | *(__tracepoints) \ |
172 | /* implement dynamic printk debug */ \ | 172 | /* implement dynamic printk debug */ \ |
173 | . = ALIGN(8); \ | ||
174 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
175 | *(__jump_table) \ | ||
176 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
173 | . = ALIGN(8); \ | 177 | . = ALIGN(8); \ |
174 | VMLINUX_SYMBOL(__start___verbose) = .; \ | 178 | VMLINUX_SYMBOL(__start___verbose) = .; \ |
175 | *(__verbose) \ | 179 | *(__verbose) \ |
@@ -228,8 +232,6 @@ | |||
228 | \ | 232 | \ |
229 | BUG_TABLE \ | 233 | BUG_TABLE \ |
230 | \ | 234 | \ |
231 | JUMP_TABLE \ | ||
232 | \ | ||
233 | /* PCI quirks */ \ | 235 | /* PCI quirks */ \ |
234 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | 236 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
235 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | 237 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ |
@@ -589,14 +591,6 @@ | |||
589 | #define BUG_TABLE | 591 | #define BUG_TABLE |
590 | #endif | 592 | #endif |
591 | 593 | ||
592 | #define JUMP_TABLE \ | ||
593 | . = ALIGN(8); \ | ||
594 | __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ | ||
595 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
596 | *(__jump_table) \ | ||
597 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
598 | } | ||
599 | |||
600 | #ifdef CONFIG_PM_TRACE | 594 | #ifdef CONFIG_PM_TRACE |
601 | #define TRACEDATA \ | 595 | #define TRACEDATA \ |
602 | . = ALIGN(4); \ | 596 | . = ALIGN(4); \ |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 0c9653f11c18..e747ecd48e1c 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _DYNAMIC_DEBUG_H | 1 | #ifndef _DYNAMIC_DEBUG_H |
2 | #define _DYNAMIC_DEBUG_H | 2 | #define _DYNAMIC_DEBUG_H |
3 | 3 | ||
4 | #include <linux/jump_label.h> | ||
5 | |||
6 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which | 4 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which |
7 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | 5 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They |
8 | * use independent hash functions, to reduce the chance of false positives. | 6 | * use independent hash functions, to reduce the chance of false positives. |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7880f18e4b86..83e745f3ead7 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -1,20 +1,43 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_H | 1 | #ifndef _LINUX_JUMP_LABEL_H |
2 | #define _LINUX_JUMP_LABEL_H | 2 | #define _LINUX_JUMP_LABEL_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
4 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) | 7 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) |
8 | |||
9 | struct jump_label_key { | ||
10 | atomic_t enabled; | ||
11 | struct jump_entry *entries; | ||
12 | #ifdef CONFIG_MODULES | ||
13 | struct jump_label_mod *next; | ||
14 | #endif | ||
15 | }; | ||
16 | |||
5 | # include <asm/jump_label.h> | 17 | # include <asm/jump_label.h> |
6 | # define HAVE_JUMP_LABEL | 18 | # define HAVE_JUMP_LABEL |
7 | #endif | 19 | #endif |
8 | 20 | ||
9 | enum jump_label_type { | 21 | enum jump_label_type { |
22 | JUMP_LABEL_DISABLE = 0, | ||
10 | JUMP_LABEL_ENABLE, | 23 | JUMP_LABEL_ENABLE, |
11 | JUMP_LABEL_DISABLE | ||
12 | }; | 24 | }; |
13 | 25 | ||
14 | struct module; | 26 | struct module; |
15 | 27 | ||
16 | #ifdef HAVE_JUMP_LABEL | 28 | #ifdef HAVE_JUMP_LABEL |
17 | 29 | ||
30 | #ifdef CONFIG_MODULES | ||
31 | #define JUMP_LABEL_INIT {{ 0 }, NULL, NULL} | ||
32 | #else | ||
33 | #define JUMP_LABEL_INIT {{ 0 }, NULL} | ||
34 | #endif | ||
35 | |||
36 | static __always_inline bool static_branch(struct jump_label_key *key) | ||
37 | { | ||
38 | return arch_static_branch(key); | ||
39 | } | ||
40 | |||
18 | extern struct jump_entry __start___jump_table[]; | 41 | extern struct jump_entry __start___jump_table[]; |
19 | extern struct jump_entry __stop___jump_table[]; | 42 | extern struct jump_entry __stop___jump_table[]; |
20 | 43 | ||
@@ -23,37 +46,37 @@ extern void jump_label_unlock(void); | |||
23 | extern void arch_jump_label_transform(struct jump_entry *entry, | 46 | extern void arch_jump_label_transform(struct jump_entry *entry, |
24 | enum jump_label_type type); | 47 | enum jump_label_type type); |
25 | extern void arch_jump_label_text_poke_early(jump_label_t addr); | 48 | extern void arch_jump_label_text_poke_early(jump_label_t addr); |
26 | extern void jump_label_update(unsigned long key, enum jump_label_type type); | ||
27 | extern void jump_label_apply_nops(struct module *mod); | ||
28 | extern int jump_label_text_reserved(void *start, void *end); | 49 | extern int jump_label_text_reserved(void *start, void *end); |
50 | extern void jump_label_inc(struct jump_label_key *key); | ||
51 | extern void jump_label_dec(struct jump_label_key *key); | ||
52 | extern bool jump_label_enabled(struct jump_label_key *key); | ||
53 | extern void jump_label_apply_nops(struct module *mod); | ||
29 | 54 | ||
30 | #define jump_label_enable(key) \ | 55 | #else |
31 | jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); | ||
32 | 56 | ||
33 | #define jump_label_disable(key) \ | 57 | #include <asm/atomic.h> |
34 | jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); | ||
35 | 58 | ||
36 | #else | 59 | #define JUMP_LABEL_INIT {ATOMIC_INIT(0)} |
37 | 60 | ||
38 | #define JUMP_LABEL(key, label) \ | 61 | struct jump_label_key { |
39 | do { \ | 62 | atomic_t enabled; |
40 | if (unlikely(*key)) \ | 63 | }; |
41 | goto label; \ | ||
42 | } while (0) | ||
43 | 64 | ||
44 | #define jump_label_enable(cond_var) \ | 65 | static __always_inline bool static_branch(struct jump_label_key *key) |
45 | do { \ | 66 | { |
46 | *(cond_var) = 1; \ | 67 | if (unlikely(atomic_read(&key->enabled))) |
47 | } while (0) | 68 | return true; |
69 | return false; | ||
70 | } | ||
48 | 71 | ||
49 | #define jump_label_disable(cond_var) \ | 72 | static inline void jump_label_inc(struct jump_label_key *key) |
50 | do { \ | 73 | { |
51 | *(cond_var) = 0; \ | 74 | atomic_inc(&key->enabled); |
52 | } while (0) | 75 | } |
53 | 76 | ||
54 | static inline int jump_label_apply_nops(struct module *mod) | 77 | static inline void jump_label_dec(struct jump_label_key *key) |
55 | { | 78 | { |
56 | return 0; | 79 | atomic_dec(&key->enabled); |
57 | } | 80 | } |
58 | 81 | ||
59 | static inline int jump_label_text_reserved(void *start, void *end) | 82 | static inline int jump_label_text_reserved(void *start, void *end) |
@@ -64,16 +87,16 @@ static inline int jump_label_text_reserved(void *start, void *end) | |||
64 | static inline void jump_label_lock(void) {} | 87 | static inline void jump_label_lock(void) {} |
65 | static inline void jump_label_unlock(void) {} | 88 | static inline void jump_label_unlock(void) {} |
66 | 89 | ||
67 | #endif | 90 | static inline bool jump_label_enabled(struct jump_label_key *key) |
91 | { | ||
92 | return !!atomic_read(&key->enabled); | ||
93 | } | ||
68 | 94 | ||
69 | #define COND_STMT(key, stmt) \ | 95 | static inline int jump_label_apply_nops(struct module *mod) |
70 | do { \ | 96 | { |
71 | __label__ jl_enabled; \ | 97 | return 0; |
72 | JUMP_LABEL(key, jl_enabled); \ | 98 | } |
73 | if (0) { \ | 99 | |
74 | jl_enabled: \ | 100 | #endif |
75 | stmt; \ | ||
76 | } \ | ||
77 | } while (0) | ||
78 | 101 | ||
79 | #endif | 102 | #endif |
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h deleted file mode 100644 index e5d012ad92c6..000000000000 --- a/include/linux/jump_label_ref.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_REF_H | ||
2 | #define _LINUX_JUMP_LABEL_REF_H | ||
3 | |||
4 | #include <linux/jump_label.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | #ifdef HAVE_JUMP_LABEL | ||
8 | |||
9 | static inline void jump_label_inc(atomic_t *key) | ||
10 | { | ||
11 | if (atomic_add_return(1, key) == 1) | ||
12 | jump_label_enable(key); | ||
13 | } | ||
14 | |||
15 | static inline void jump_label_dec(atomic_t *key) | ||
16 | { | ||
17 | if (atomic_dec_and_test(key)) | ||
18 | jump_label_disable(key); | ||
19 | } | ||
20 | |||
21 | #else /* !HAVE_JUMP_LABEL */ | ||
22 | |||
23 | static inline void jump_label_inc(atomic_t *key) | ||
24 | { | ||
25 | atomic_inc(key); | ||
26 | } | ||
27 | |||
28 | static inline void jump_label_dec(atomic_t *key) | ||
29 | { | ||
30 | atomic_dec(key); | ||
31 | } | ||
32 | |||
33 | #undef JUMP_LABEL | ||
34 | #define JUMP_LABEL(key, label) \ | ||
35 | do { \ | ||
36 | if (unlikely(__builtin_choose_expr( \ | ||
37 | __builtin_types_compatible_p(typeof(key), atomic_t *), \ | ||
38 | atomic_read((atomic_t *)(key)), *(key)))) \ | ||
39 | goto label; \ | ||
40 | } while (0) | ||
41 | |||
42 | #endif /* HAVE_JUMP_LABEL */ | ||
43 | |||
44 | #endif /* _LINUX_JUMP_LABEL_REF_H */ | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ee9f1e782800..9eec53d97370 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -505,7 +505,7 @@ struct perf_guest_info_callbacks { | |||
505 | #include <linux/ftrace.h> | 505 | #include <linux/ftrace.h> |
506 | #include <linux/cpu.h> | 506 | #include <linux/cpu.h> |
507 | #include <linux/irq_work.h> | 507 | #include <linux/irq_work.h> |
508 | #include <linux/jump_label_ref.h> | 508 | #include <linux/jump_label.h> |
509 | #include <asm/atomic.h> | 509 | #include <asm/atomic.h> |
510 | #include <asm/local.h> | 510 | #include <asm/local.h> |
511 | 511 | ||
@@ -1034,7 +1034,7 @@ static inline int is_software_event(struct perf_event *event) | |||
1034 | return event->pmu->task_ctx_nr == perf_sw_context; | 1034 | return event->pmu->task_ctx_nr == perf_sw_context; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1037 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1038 | 1038 | ||
1039 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1039 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
1040 | 1040 | ||
@@ -1063,22 +1063,21 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
1063 | { | 1063 | { |
1064 | struct pt_regs hot_regs; | 1064 | struct pt_regs hot_regs; |
1065 | 1065 | ||
1066 | JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); | 1066 | if (static_branch(&perf_swevent_enabled[event_id])) { |
1067 | return; | 1067 | if (!regs) { |
1068 | 1068 | perf_fetch_caller_regs(&hot_regs); | |
1069 | have_event: | 1069 | regs = &hot_regs; |
1070 | if (!regs) { | 1070 | } |
1071 | perf_fetch_caller_regs(&hot_regs); | 1071 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
1072 | regs = &hot_regs; | ||
1073 | } | 1072 | } |
1074 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
1075 | } | 1073 | } |
1076 | 1074 | ||
1077 | extern atomic_t perf_sched_events; | 1075 | extern struct jump_label_key perf_sched_events; |
1078 | 1076 | ||
1079 | static inline void perf_event_task_sched_in(struct task_struct *task) | 1077 | static inline void perf_event_task_sched_in(struct task_struct *task) |
1080 | { | 1078 | { |
1081 | COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); | 1079 | if (static_branch(&perf_sched_events)) |
1080 | __perf_event_task_sched_in(task); | ||
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | static inline | 1083 | static inline |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 97c84a58efb8..d530a4460a0b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -29,7 +29,7 @@ struct tracepoint_func { | |||
29 | 29 | ||
30 | struct tracepoint { | 30 | struct tracepoint { |
31 | const char *name; /* Tracepoint name */ | 31 | const char *name; /* Tracepoint name */ |
32 | int state; /* State. */ | 32 | struct jump_label_key key; |
33 | void (*regfunc)(void); | 33 | void (*regfunc)(void); |
34 | void (*unregfunc)(void); | 34 | void (*unregfunc)(void); |
35 | struct tracepoint_func __rcu *funcs; | 35 | struct tracepoint_func __rcu *funcs; |
@@ -146,9 +146,7 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, | |||
146 | extern struct tracepoint __tracepoint_##name; \ | 146 | extern struct tracepoint __tracepoint_##name; \ |
147 | static inline void trace_##name(proto) \ | 147 | static inline void trace_##name(proto) \ |
148 | { \ | 148 | { \ |
149 | JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ | 149 | if (static_branch(&__tracepoint_##name.key)) \ |
150 | return; \ | ||
151 | do_trace: \ | ||
152 | __DO_TRACE(&__tracepoint_##name, \ | 150 | __DO_TRACE(&__tracepoint_##name, \ |
153 | TP_PROTO(data_proto), \ | 151 | TP_PROTO(data_proto), \ |
154 | TP_ARGS(data_args), \ | 152 | TP_ARGS(data_args), \ |
@@ -176,14 +174,14 @@ do_trace: \ | |||
176 | * structures, so we create an array of pointers that will be used for iteration | 174 | * structures, so we create an array of pointers that will be used for iteration |
177 | * on the tracepoints. | 175 | * on the tracepoints. |
178 | */ | 176 | */ |
179 | #define DEFINE_TRACE_FN(name, reg, unreg) \ | 177 | #define DEFINE_TRACE_FN(name, reg, unreg) \ |
180 | static const char __tpstrtab_##name[] \ | 178 | static const char __tpstrtab_##name[] \ |
181 | __attribute__((section("__tracepoints_strings"))) = #name; \ | 179 | __attribute__((section("__tracepoints_strings"))) = #name; \ |
182 | struct tracepoint __tracepoint_##name \ | 180 | struct tracepoint __tracepoint_##name \ |
183 | __attribute__((section("__tracepoints"))) = \ | 181 | __attribute__((section("__tracepoints"))) = \ |
184 | { __tpstrtab_##name, 0, reg, unreg, NULL }; \ | 182 | { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ |
185 | static struct tracepoint * const __tracepoint_ptr_##name __used \ | 183 | static struct tracepoint * const __tracepoint_ptr_##name __used \ |
186 | __attribute__((section("__tracepoints_ptrs"))) = \ | 184 | __attribute__((section("__tracepoints_ptrs"))) = \ |
187 | &__tracepoint_##name; | 185 | &__tracepoint_##name; |
188 | 186 | ||
189 | #define DEFINE_TRACE(name) \ | 187 | #define DEFINE_TRACE(name) \ |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 3b79bd938330..74d1c099fbd1 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -2,43 +2,23 @@ | |||
2 | * jump label support | 2 | * jump label support |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> | 4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
5 | * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> | ||
5 | * | 6 | * |
6 | */ | 7 | */ |
7 | #include <linux/jump_label.h> | ||
8 | #include <linux/memory.h> | 8 | #include <linux/memory.h> |
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/jhash.h> | ||
13 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
14 | #include <linux/sort.h> | 13 | #include <linux/sort.h> |
15 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/jump_label.h> | ||
16 | 16 | ||
17 | #ifdef HAVE_JUMP_LABEL | 17 | #ifdef HAVE_JUMP_LABEL |
18 | 18 | ||
19 | #define JUMP_LABEL_HASH_BITS 6 | ||
20 | #define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) | ||
21 | static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; | ||
22 | |||
23 | /* mutex to protect coming/going of the the jump_label table */ | 19 | /* mutex to protect coming/going of the the jump_label table */ |
24 | static DEFINE_MUTEX(jump_label_mutex); | 20 | static DEFINE_MUTEX(jump_label_mutex); |
25 | 21 | ||
26 | struct jump_label_entry { | ||
27 | struct hlist_node hlist; | ||
28 | struct jump_entry *table; | ||
29 | int nr_entries; | ||
30 | /* hang modules off here */ | ||
31 | struct hlist_head modules; | ||
32 | unsigned long key; | ||
33 | }; | ||
34 | |||
35 | struct jump_label_module_entry { | ||
36 | struct hlist_node hlist; | ||
37 | struct jump_entry *table; | ||
38 | int nr_entries; | ||
39 | struct module *mod; | ||
40 | }; | ||
41 | |||
42 | void jump_label_lock(void) | 22 | void jump_label_lock(void) |
43 | { | 23 | { |
44 | mutex_lock(&jump_label_mutex); | 24 | mutex_lock(&jump_label_mutex); |
@@ -49,6 +29,11 @@ void jump_label_unlock(void) | |||
49 | mutex_unlock(&jump_label_mutex); | 29 | mutex_unlock(&jump_label_mutex); |
50 | } | 30 | } |
51 | 31 | ||
32 | bool jump_label_enabled(struct jump_label_key *key) | ||
33 | { | ||
34 | return !!atomic_read(&key->enabled); | ||
35 | } | ||
36 | |||
52 | static int jump_label_cmp(const void *a, const void *b) | 37 | static int jump_label_cmp(const void *a, const void *b) |
53 | { | 38 | { |
54 | const struct jump_entry *jea = a; | 39 | const struct jump_entry *jea = a; |
@@ -64,7 +49,7 @@ static int jump_label_cmp(const void *a, const void *b) | |||
64 | } | 49 | } |
65 | 50 | ||
66 | static void | 51 | static void |
67 | sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | 52 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
68 | { | 53 | { |
69 | unsigned long size; | 54 | unsigned long size; |
70 | 55 | ||
@@ -73,118 +58,25 @@ sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | |||
73 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); | 58 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
74 | } | 59 | } |
75 | 60 | ||
76 | static struct jump_label_entry *get_jump_label_entry(jump_label_t key) | 61 | static void jump_label_update(struct jump_label_key *key, int enable); |
77 | { | ||
78 | struct hlist_head *head; | ||
79 | struct hlist_node *node; | ||
80 | struct jump_label_entry *e; | ||
81 | u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
82 | |||
83 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
84 | hlist_for_each_entry(e, node, head, hlist) { | ||
85 | if (key == e->key) | ||
86 | return e; | ||
87 | } | ||
88 | return NULL; | ||
89 | } | ||
90 | 62 | ||
91 | static struct jump_label_entry * | 63 | void jump_label_inc(struct jump_label_key *key) |
92 | add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) | ||
93 | { | 64 | { |
94 | struct hlist_head *head; | 65 | if (atomic_inc_not_zero(&key->enabled)) |
95 | struct jump_label_entry *e; | 66 | return; |
96 | u32 hash; | ||
97 | |||
98 | e = get_jump_label_entry(key); | ||
99 | if (e) | ||
100 | return ERR_PTR(-EEXIST); | ||
101 | |||
102 | e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); | ||
103 | if (!e) | ||
104 | return ERR_PTR(-ENOMEM); | ||
105 | |||
106 | hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
107 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
108 | e->key = key; | ||
109 | e->table = table; | ||
110 | e->nr_entries = nr_entries; | ||
111 | INIT_HLIST_HEAD(&(e->modules)); | ||
112 | hlist_add_head(&e->hlist, head); | ||
113 | return e; | ||
114 | } | ||
115 | 67 | ||
116 | static int | 68 | jump_label_lock(); |
117 | build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) | 69 | if (atomic_add_return(1, &key->enabled) == 1) |
118 | { | 70 | jump_label_update(key, JUMP_LABEL_ENABLE); |
119 | struct jump_entry *iter, *iter_begin; | 71 | jump_label_unlock(); |
120 | struct jump_label_entry *entry; | ||
121 | int count; | ||
122 | |||
123 | sort_jump_label_entries(start, stop); | ||
124 | iter = start; | ||
125 | while (iter < stop) { | ||
126 | entry = get_jump_label_entry(iter->key); | ||
127 | if (!entry) { | ||
128 | iter_begin = iter; | ||
129 | count = 0; | ||
130 | while ((iter < stop) && | ||
131 | (iter->key == iter_begin->key)) { | ||
132 | iter++; | ||
133 | count++; | ||
134 | } | ||
135 | entry = add_jump_label_entry(iter_begin->key, | ||
136 | count, iter_begin); | ||
137 | if (IS_ERR(entry)) | ||
138 | return PTR_ERR(entry); | ||
139 | } else { | ||
140 | WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); | ||
141 | return -1; | ||
142 | } | ||
143 | } | ||
144 | return 0; | ||
145 | } | 72 | } |
146 | 73 | ||
147 | /*** | 74 | void jump_label_dec(struct jump_label_key *key) |
148 | * jump_label_update - update jump label text | ||
149 | * @key - key value associated with a a jump label | ||
150 | * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE | ||
151 | * | ||
152 | * Will enable/disable the jump for jump label @key, depending on the | ||
153 | * value of @type. | ||
154 | * | ||
155 | */ | ||
156 | |||
157 | void jump_label_update(unsigned long key, enum jump_label_type type) | ||
158 | { | 75 | { |
159 | struct jump_entry *iter; | 76 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) |
160 | struct jump_label_entry *entry; | 77 | return; |
161 | struct hlist_node *module_node; | ||
162 | struct jump_label_module_entry *e_module; | ||
163 | int count; | ||
164 | 78 | ||
165 | jump_label_lock(); | 79 | jump_label_update(key, JUMP_LABEL_DISABLE); |
166 | entry = get_jump_label_entry((jump_label_t)key); | ||
167 | if (entry) { | ||
168 | count = entry->nr_entries; | ||
169 | iter = entry->table; | ||
170 | while (count--) { | ||
171 | if (kernel_text_address(iter->code)) | ||
172 | arch_jump_label_transform(iter, type); | ||
173 | iter++; | ||
174 | } | ||
175 | /* eanble/disable jump labels in modules */ | ||
176 | hlist_for_each_entry(e_module, module_node, &(entry->modules), | ||
177 | hlist) { | ||
178 | count = e_module->nr_entries; | ||
179 | iter = e_module->table; | ||
180 | while (count--) { | ||
181 | if (iter->key && | ||
182 | kernel_text_address(iter->code)) | ||
183 | arch_jump_label_transform(iter, type); | ||
184 | iter++; | ||
185 | } | ||
186 | } | ||
187 | } | ||
188 | jump_label_unlock(); | 80 | jump_label_unlock(); |
189 | } | 81 | } |
190 | 82 | ||
@@ -197,77 +89,33 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end) | |||
197 | return 0; | 89 | return 0; |
198 | } | 90 | } |
199 | 91 | ||
200 | #ifdef CONFIG_MODULES | 92 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
201 | 93 | struct jump_entry *iter_stop, void *start, void *end) | |
202 | static int module_conflict(void *start, void *end) | ||
203 | { | 94 | { |
204 | struct hlist_head *head; | ||
205 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | ||
206 | struct jump_label_entry *e; | ||
207 | struct jump_label_module_entry *e_module; | ||
208 | struct jump_entry *iter; | 95 | struct jump_entry *iter; |
209 | int i, count; | ||
210 | int conflict = 0; | ||
211 | |||
212 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | ||
213 | head = &jump_label_table[i]; | ||
214 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | ||
215 | hlist_for_each_entry_safe(e_module, module_node, | ||
216 | module_node_next, | ||
217 | &(e->modules), hlist) { | ||
218 | count = e_module->nr_entries; | ||
219 | iter = e_module->table; | ||
220 | while (count--) { | ||
221 | if (addr_conflict(iter, start, end)) { | ||
222 | conflict = 1; | ||
223 | goto out; | ||
224 | } | ||
225 | iter++; | ||
226 | } | ||
227 | } | ||
228 | } | ||
229 | } | ||
230 | out: | ||
231 | return conflict; | ||
232 | } | ||
233 | |||
234 | #endif | ||
235 | |||
236 | /*** | ||
237 | * jump_label_text_reserved - check if addr range is reserved | ||
238 | * @start: start text addr | ||
239 | * @end: end text addr | ||
240 | * | ||
241 | * checks if the text addr located between @start and @end | ||
242 | * overlaps with any of the jump label patch addresses. Code | ||
243 | * that wants to modify kernel text should first verify that | ||
244 | * it does not overlap with any of the jump label addresses. | ||
245 | * Caller must hold jump_label_mutex. | ||
246 | * | ||
247 | * returns 1 if there is an overlap, 0 otherwise | ||
248 | */ | ||
249 | int jump_label_text_reserved(void *start, void *end) | ||
250 | { | ||
251 | struct jump_entry *iter; | ||
252 | struct jump_entry *iter_start = __start___jump_table; | ||
253 | struct jump_entry *iter_stop = __start___jump_table; | ||
254 | int conflict = 0; | ||
255 | 96 | ||
256 | iter = iter_start; | 97 | iter = iter_start; |
257 | while (iter < iter_stop) { | 98 | while (iter < iter_stop) { |
258 | if (addr_conflict(iter, start, end)) { | 99 | if (addr_conflict(iter, start, end)) |
259 | conflict = 1; | 100 | return 1; |
260 | goto out; | ||
261 | } | ||
262 | iter++; | 101 | iter++; |
263 | } | 102 | } |
264 | 103 | ||
265 | /* now check modules */ | 104 | return 0; |
266 | #ifdef CONFIG_MODULES | 105 | } |
267 | conflict = module_conflict(start, end); | 106 | |
268 | #endif | 107 | static void __jump_label_update(struct jump_label_key *key, |
269 | out: | 108 | struct jump_entry *entry, int enable) |
270 | return conflict; | 109 | { |
110 | for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { | ||
111 | /* | ||
112 | * entry->code set to 0 invalidates module init text sections | ||
113 | * kernel_text_address() verifies we are not in core kernel | ||
114 | * init code, see jump_label_invalidate_module_init(). | ||
115 | */ | ||
116 | if (entry->code && kernel_text_address(entry->code)) | ||
117 | arch_jump_label_transform(entry, enable); | ||
118 | } | ||
271 | } | 119 | } |
272 | 120 | ||
273 | /* | 121 | /* |
@@ -277,142 +125,173 @@ void __weak arch_jump_label_text_poke_early(jump_label_t addr) | |||
277 | { | 125 | { |
278 | } | 126 | } |
279 | 127 | ||
280 | static __init int init_jump_label(void) | 128 | static __init int jump_label_init(void) |
281 | { | 129 | { |
282 | int ret; | ||
283 | struct jump_entry *iter_start = __start___jump_table; | 130 | struct jump_entry *iter_start = __start___jump_table; |
284 | struct jump_entry *iter_stop = __stop___jump_table; | 131 | struct jump_entry *iter_stop = __stop___jump_table; |
132 | struct jump_label_key *key = NULL; | ||
285 | struct jump_entry *iter; | 133 | struct jump_entry *iter; |
286 | 134 | ||
287 | jump_label_lock(); | 135 | jump_label_lock(); |
288 | ret = build_jump_label_hashtable(__start___jump_table, | 136 | jump_label_sort_entries(iter_start, iter_stop); |
289 | __stop___jump_table); | 137 | |
290 | iter = iter_start; | 138 | for (iter = iter_start; iter < iter_stop; iter++) { |
291 | while (iter < iter_stop) { | ||
292 | arch_jump_label_text_poke_early(iter->code); | 139 | arch_jump_label_text_poke_early(iter->code); |
293 | iter++; | 140 | if (iter->key == (jump_label_t)(unsigned long)key) |
141 | continue; | ||
142 | |||
143 | key = (struct jump_label_key *)(unsigned long)iter->key; | ||
144 | atomic_set(&key->enabled, 0); | ||
145 | key->entries = iter; | ||
146 | #ifdef CONFIG_MODULES | ||
147 | key->next = NULL; | ||
148 | #endif | ||
294 | } | 149 | } |
295 | jump_label_unlock(); | 150 | jump_label_unlock(); |
296 | return ret; | 151 | |
152 | return 0; | ||
297 | } | 153 | } |
298 | early_initcall(init_jump_label); | 154 | early_initcall(jump_label_init); |
299 | 155 | ||
300 | #ifdef CONFIG_MODULES | 156 | #ifdef CONFIG_MODULES |
301 | 157 | ||
302 | static struct jump_label_module_entry * | 158 | struct jump_label_mod { |
303 | add_jump_label_module_entry(struct jump_label_entry *entry, | 159 | struct jump_label_mod *next; |
304 | struct jump_entry *iter_begin, | 160 | struct jump_entry *entries; |
305 | int count, struct module *mod) | 161 | struct module *mod; |
162 | }; | ||
163 | |||
164 | static int __jump_label_mod_text_reserved(void *start, void *end) | ||
165 | { | ||
166 | struct module *mod; | ||
167 | |||
168 | mod = __module_text_address((unsigned long)start); | ||
169 | if (!mod) | ||
170 | return 0; | ||
171 | |||
172 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); | ||
173 | |||
174 | return __jump_label_text_reserved(mod->jump_entries, | ||
175 | mod->jump_entries + mod->num_jump_entries, | ||
176 | start, end); | ||
177 | } | ||
178 | |||
179 | static void __jump_label_mod_update(struct jump_label_key *key, int enable) | ||
180 | { | ||
181 | struct jump_label_mod *mod = key->next; | ||
182 | |||
183 | while (mod) { | ||
184 | __jump_label_update(key, mod->entries, enable); | ||
185 | mod = mod->next; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /*** | ||
190 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
191 | * @mod: module to patch | ||
192 | * | ||
193 | * Allow for run-time selection of the optimal nops. Before the module | ||
194 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
195 | * the arch specific jump label code. | ||
196 | */ | ||
197 | void jump_label_apply_nops(struct module *mod) | ||
306 | { | 198 | { |
307 | struct jump_label_module_entry *e; | 199 | struct jump_entry *iter_start = mod->jump_entries; |
308 | 200 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | |
309 | e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); | 201 | struct jump_entry *iter; |
310 | if (!e) | 202 | |
311 | return ERR_PTR(-ENOMEM); | 203 | /* if the module doesn't have jump label entries, just return */ |
312 | e->mod = mod; | 204 | if (iter_start == iter_stop) |
313 | e->nr_entries = count; | 205 | return; |
314 | e->table = iter_begin; | 206 | |
315 | hlist_add_head(&e->hlist, &entry->modules); | 207 | for (iter = iter_start; iter < iter_stop; iter++) |
316 | return e; | 208 | arch_jump_label_text_poke_early(iter->code); |
317 | } | 209 | } |
318 | 210 | ||
319 | static int add_jump_label_module(struct module *mod) | 211 | static int jump_label_add_module(struct module *mod) |
320 | { | 212 | { |
321 | struct jump_entry *iter, *iter_begin; | 213 | struct jump_entry *iter_start = mod->jump_entries; |
322 | struct jump_label_entry *entry; | 214 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
323 | struct jump_label_module_entry *module_entry; | 215 | struct jump_entry *iter; |
324 | int count; | 216 | struct jump_label_key *key = NULL; |
217 | struct jump_label_mod *jlm; | ||
325 | 218 | ||
326 | /* if the module doesn't have jump label entries, just return */ | 219 | /* if the module doesn't have jump label entries, just return */ |
327 | if (!mod->num_jump_entries) | 220 | if (iter_start == iter_stop) |
328 | return 0; | 221 | return 0; |
329 | 222 | ||
330 | sort_jump_label_entries(mod->jump_entries, | 223 | jump_label_sort_entries(iter_start, iter_stop); |
331 | mod->jump_entries + mod->num_jump_entries); | 224 | |
332 | iter = mod->jump_entries; | 225 | for (iter = iter_start; iter < iter_stop; iter++) { |
333 | while (iter < mod->jump_entries + mod->num_jump_entries) { | 226 | if (iter->key == (jump_label_t)(unsigned long)key) |
334 | entry = get_jump_label_entry(iter->key); | 227 | continue; |
335 | iter_begin = iter; | 228 | |
336 | count = 0; | 229 | key = (struct jump_label_key *)(unsigned long)iter->key; |
337 | while ((iter < mod->jump_entries + mod->num_jump_entries) && | 230 | |
338 | (iter->key == iter_begin->key)) { | 231 | if (__module_address(iter->key) == mod) { |
339 | iter++; | 232 | atomic_set(&key->enabled, 0); |
340 | count++; | 233 | key->entries = iter; |
341 | } | 234 | key->next = NULL; |
342 | if (!entry) { | 235 | continue; |
343 | entry = add_jump_label_entry(iter_begin->key, 0, NULL); | ||
344 | if (IS_ERR(entry)) | ||
345 | return PTR_ERR(entry); | ||
346 | } | 236 | } |
347 | module_entry = add_jump_label_module_entry(entry, iter_begin, | 237 | |
348 | count, mod); | 238 | jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); |
349 | if (IS_ERR(module_entry)) | 239 | if (!jlm) |
350 | return PTR_ERR(module_entry); | 240 | return -ENOMEM; |
241 | |||
242 | jlm->mod = mod; | ||
243 | jlm->entries = iter; | ||
244 | jlm->next = key->next; | ||
245 | key->next = jlm; | ||
246 | |||
247 | if (jump_label_enabled(key)) | ||
248 | __jump_label_update(key, iter, JUMP_LABEL_ENABLE); | ||
351 | } | 249 | } |
250 | |||
352 | return 0; | 251 | return 0; |
353 | } | 252 | } |
354 | 253 | ||
355 | static void remove_jump_label_module(struct module *mod) | 254 | static void jump_label_del_module(struct module *mod) |
356 | { | 255 | { |
357 | struct hlist_head *head; | 256 | struct jump_entry *iter_start = mod->jump_entries; |
358 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | 257 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
359 | struct jump_label_entry *e; | 258 | struct jump_entry *iter; |
360 | struct jump_label_module_entry *e_module; | 259 | struct jump_label_key *key = NULL; |
361 | int i; | 260 | struct jump_label_mod *jlm, **prev; |
362 | 261 | ||
363 | /* if the module doesn't have jump label entries, just return */ | 262 | for (iter = iter_start; iter < iter_stop; iter++) { |
364 | if (!mod->num_jump_entries) | 263 | if (iter->key == (jump_label_t)(unsigned long)key) |
365 | return; | 264 | continue; |
265 | |||
266 | key = (struct jump_label_key *)(unsigned long)iter->key; | ||
267 | |||
268 | if (__module_address(iter->key) == mod) | ||
269 | continue; | ||
270 | |||
271 | prev = &key->next; | ||
272 | jlm = key->next; | ||
366 | 273 | ||
367 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | 274 | while (jlm && jlm->mod != mod) { |
368 | head = &jump_label_table[i]; | 275 | prev = &jlm->next; |
369 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | 276 | jlm = jlm->next; |
370 | hlist_for_each_entry_safe(e_module, module_node, | 277 | } |
371 | module_node_next, | 278 | |
372 | &(e->modules), hlist) { | 279 | if (jlm) { |
373 | if (e_module->mod == mod) { | 280 | *prev = jlm->next; |
374 | hlist_del(&e_module->hlist); | 281 | kfree(jlm); |
375 | kfree(e_module); | ||
376 | } | ||
377 | } | ||
378 | if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { | ||
379 | hlist_del(&e->hlist); | ||
380 | kfree(e); | ||
381 | } | ||
382 | } | 282 | } |
383 | } | 283 | } |
384 | } | 284 | } |
385 | 285 | ||
386 | static void remove_jump_label_module_init(struct module *mod) | 286 | static void jump_label_invalidate_module_init(struct module *mod) |
387 | { | 287 | { |
388 | struct hlist_head *head; | 288 | struct jump_entry *iter_start = mod->jump_entries; |
389 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | 289 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
390 | struct jump_label_entry *e; | ||
391 | struct jump_label_module_entry *e_module; | ||
392 | struct jump_entry *iter; | 290 | struct jump_entry *iter; |
393 | int i, count; | ||
394 | |||
395 | /* if the module doesn't have jump label entries, just return */ | ||
396 | if (!mod->num_jump_entries) | ||
397 | return; | ||
398 | 291 | ||
399 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | 292 | for (iter = iter_start; iter < iter_stop; iter++) { |
400 | head = &jump_label_table[i]; | 293 | if (within_module_init(iter->code, mod)) |
401 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | 294 | iter->code = 0; |
402 | hlist_for_each_entry_safe(e_module, module_node, | ||
403 | module_node_next, | ||
404 | &(e->modules), hlist) { | ||
405 | if (e_module->mod != mod) | ||
406 | continue; | ||
407 | count = e_module->nr_entries; | ||
408 | iter = e_module->table; | ||
409 | while (count--) { | ||
410 | if (within_module_init(iter->code, mod)) | ||
411 | iter->key = 0; | ||
412 | iter++; | ||
413 | } | ||
414 | } | ||
415 | } | ||
416 | } | 295 | } |
417 | } | 296 | } |
418 | 297 | ||
@@ -426,59 +305,77 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, | |||
426 | switch (val) { | 305 | switch (val) { |
427 | case MODULE_STATE_COMING: | 306 | case MODULE_STATE_COMING: |
428 | jump_label_lock(); | 307 | jump_label_lock(); |
429 | ret = add_jump_label_module(mod); | 308 | ret = jump_label_add_module(mod); |
430 | if (ret) | 309 | if (ret) |
431 | remove_jump_label_module(mod); | 310 | jump_label_del_module(mod); |
432 | jump_label_unlock(); | 311 | jump_label_unlock(); |
433 | break; | 312 | break; |
434 | case MODULE_STATE_GOING: | 313 | case MODULE_STATE_GOING: |
435 | jump_label_lock(); | 314 | jump_label_lock(); |
436 | remove_jump_label_module(mod); | 315 | jump_label_del_module(mod); |
437 | jump_label_unlock(); | 316 | jump_label_unlock(); |
438 | break; | 317 | break; |
439 | case MODULE_STATE_LIVE: | 318 | case MODULE_STATE_LIVE: |
440 | jump_label_lock(); | 319 | jump_label_lock(); |
441 | remove_jump_label_module_init(mod); | 320 | jump_label_invalidate_module_init(mod); |
442 | jump_label_unlock(); | 321 | jump_label_unlock(); |
443 | break; | 322 | break; |
444 | } | 323 | } |
445 | return ret; | ||
446 | } | ||
447 | 324 | ||
448 | /*** | 325 | return notifier_from_errno(ret); |
449 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
450 | * @mod: module to patch | ||
451 | * | ||
452 | * Allow for run-time selection of the optimal nops. Before the module | ||
453 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
454 | * the arch specific jump label code. | ||
455 | */ | ||
456 | void jump_label_apply_nops(struct module *mod) | ||
457 | { | ||
458 | struct jump_entry *iter; | ||
459 | |||
460 | /* if the module doesn't have jump label entries, just return */ | ||
461 | if (!mod->num_jump_entries) | ||
462 | return; | ||
463 | |||
464 | iter = mod->jump_entries; | ||
465 | while (iter < mod->jump_entries + mod->num_jump_entries) { | ||
466 | arch_jump_label_text_poke_early(iter->code); | ||
467 | iter++; | ||
468 | } | ||
469 | } | 326 | } |
470 | 327 | ||
471 | struct notifier_block jump_label_module_nb = { | 328 | struct notifier_block jump_label_module_nb = { |
472 | .notifier_call = jump_label_module_notify, | 329 | .notifier_call = jump_label_module_notify, |
473 | .priority = 0, | 330 | .priority = 1, /* higher than tracepoints */ |
474 | }; | 331 | }; |
475 | 332 | ||
476 | static __init int init_jump_label_module(void) | 333 | static __init int jump_label_init_module(void) |
477 | { | 334 | { |
478 | return register_module_notifier(&jump_label_module_nb); | 335 | return register_module_notifier(&jump_label_module_nb); |
479 | } | 336 | } |
480 | early_initcall(init_jump_label_module); | 337 | early_initcall(jump_label_init_module); |
481 | 338 | ||
482 | #endif /* CONFIG_MODULES */ | 339 | #endif /* CONFIG_MODULES */ |
483 | 340 | ||
341 | /*** | ||
342 | * jump_label_text_reserved - check if addr range is reserved | ||
343 | * @start: start text addr | ||
344 | * @end: end text addr | ||
345 | * | ||
346 | * checks if the text addr located between @start and @end | ||
347 | * overlaps with any of the jump label patch addresses. Code | ||
348 | * that wants to modify kernel text should first verify that | ||
349 | * it does not overlap with any of the jump label addresses. | ||
350 | * Caller must hold jump_label_mutex. | ||
351 | * | ||
352 | * returns 1 if there is an overlap, 0 otherwise | ||
353 | */ | ||
354 | int jump_label_text_reserved(void *start, void *end) | ||
355 | { | ||
356 | int ret = __jump_label_text_reserved(__start___jump_table, | ||
357 | __stop___jump_table, start, end); | ||
358 | |||
359 | if (ret) | ||
360 | return ret; | ||
361 | |||
362 | #ifdef CONFIG_MODULES | ||
363 | ret = __jump_label_mod_text_reserved(start, end); | ||
364 | #endif | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static void jump_label_update(struct jump_label_key *key, int enable) | ||
369 | { | ||
370 | struct jump_entry *entry = key->entries; | ||
371 | |||
372 | /* if there are no users, entry can be NULL */ | ||
373 | if (entry) | ||
374 | __jump_label_update(key, entry, enable); | ||
375 | |||
376 | #ifdef CONFIG_MODULES | ||
377 | __jump_label_mod_update(key, enable); | ||
378 | #endif | ||
379 | } | ||
380 | |||
484 | #endif | 381 | #endif |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8e81a9860a0d..440bc485bbff 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -125,7 +125,7 @@ enum event_type_t { | |||
125 | * perf_sched_events : >0 events exist | 125 | * perf_sched_events : >0 events exist |
126 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu | 126 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
127 | */ | 127 | */ |
128 | atomic_t perf_sched_events __read_mostly; | 128 | struct jump_label_key perf_sched_events __read_mostly; |
129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); | 129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
130 | 130 | ||
131 | static atomic_t nr_mmap_events __read_mostly; | 131 | static atomic_t nr_mmap_events __read_mostly; |
@@ -5429,7 +5429,7 @@ fail: | |||
5429 | return err; | 5429 | return err; |
5430 | } | 5430 | } |
5431 | 5431 | ||
5432 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 5432 | struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
5433 | 5433 | ||
5434 | static void sw_perf_event_destroy(struct perf_event *event) | 5434 | static void sw_perf_event_destroy(struct perf_event *event) |
5435 | { | 5435 | { |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d38c16a06a6f..e0e14ce0caab 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2013,9 +2013,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2013 | { | 2013 | { |
2014 | enum print_line_t ret; | 2014 | enum print_line_t ret; |
2015 | 2015 | ||
2016 | if (iter->lost_events) | 2016 | if (iter->lost_events && |
2017 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | 2017 | !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
2018 | iter->cpu, iter->lost_events); | 2018 | iter->cpu, iter->lost_events)) |
2019 | return TRACE_TYPE_PARTIAL_LINE; | ||
2019 | 2020 | ||
2020 | if (iter->trace && iter->trace->print_line) { | 2021 | if (iter->trace && iter->trace->print_line) { |
2021 | ret = iter->trace->print_line(iter); | 2022 | ret = iter->trace->print_line(iter); |
@@ -3229,6 +3230,14 @@ waitagain: | |||
3229 | 3230 | ||
3230 | if (iter->seq.len >= cnt) | 3231 | if (iter->seq.len >= cnt) |
3231 | break; | 3232 | break; |
3233 | |||
3234 | /* | ||
3235 | * Setting the full flag means we reached the trace_seq buffer | ||
3236 | * size and we should leave by partial output condition above. | ||
3237 | * One of the trace_seq_* functions is not used properly. | ||
3238 | */ | ||
3239 | WARN_ONCE(iter->seq.full, "full flag set for trace type %d", | ||
3240 | iter->ent->type); | ||
3232 | } | 3241 | } |
3233 | trace_access_unlock(iter->cpu_file); | 3242 | trace_access_unlock(iter->cpu_file); |
3234 | trace_event_read_unlock(); | 3243 | trace_event_read_unlock(); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 456be9063c2d..cf535ccedc86 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -830,6 +830,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); | |||
830 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, | 830 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, |
831 | struct trace_event *event) | 831 | struct trace_event *event) |
832 | { | 832 | { |
833 | if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) | ||
834 | return TRACE_TYPE_PARTIAL_LINE; | ||
835 | |||
833 | return TRACE_TYPE_HANDLED; | 836 | return TRACE_TYPE_HANDLED; |
834 | } | 837 | } |
835 | 838 | ||
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 2547d8813cf0..dff763b7baf1 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -32,7 +32,7 @@ static DEFINE_MUTEX(btrace_mutex); | |||
32 | 32 | ||
33 | struct trace_bprintk_fmt { | 33 | struct trace_bprintk_fmt { |
34 | struct list_head list; | 34 | struct list_head list; |
35 | char fmt[0]; | 35 | const char *fmt; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) | 38 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) |
@@ -49,6 +49,7 @@ static | |||
49 | void hold_module_trace_bprintk_format(const char **start, const char **end) | 49 | void hold_module_trace_bprintk_format(const char **start, const char **end) |
50 | { | 50 | { |
51 | const char **iter; | 51 | const char **iter; |
52 | char *fmt; | ||
52 | 53 | ||
53 | mutex_lock(&btrace_mutex); | 54 | mutex_lock(&btrace_mutex); |
54 | for (iter = start; iter < end; iter++) { | 55 | for (iter = start; iter < end; iter++) { |
@@ -58,14 +59,18 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) | |||
58 | continue; | 59 | continue; |
59 | } | 60 | } |
60 | 61 | ||
61 | tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt) | 62 | tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL); |
62 | + strlen(*iter) + 1, GFP_KERNEL); | 63 | if (tb_fmt) |
63 | if (tb_fmt) { | 64 | fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); |
65 | if (tb_fmt && fmt) { | ||
64 | list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); | 66 | list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); |
65 | strcpy(tb_fmt->fmt, *iter); | 67 | strcpy(fmt, *iter); |
68 | tb_fmt->fmt = fmt; | ||
66 | *iter = tb_fmt->fmt; | 69 | *iter = tb_fmt->fmt; |
67 | } else | 70 | } else { |
71 | kfree(tb_fmt); | ||
68 | *iter = NULL; | 72 | *iter = NULL; |
73 | } | ||
69 | } | 74 | } |
70 | mutex_unlock(&btrace_mutex); | 75 | mutex_unlock(&btrace_mutex); |
71 | } | 76 | } |
@@ -84,6 +89,76 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, | |||
84 | return 0; | 89 | return 0; |
85 | } | 90 | } |
86 | 91 | ||
92 | /* | ||
93 | * The debugfs/tracing/printk_formats file maps the addresses with | ||
94 | * the ASCII formats that are used in the bprintk events in the | ||
95 | * buffer. For userspace tools to be able to decode the events from | ||
96 | * the buffer, they need to be able to map the address with the format. | ||
97 | * | ||
98 | * The addresses of the bprintk formats are in their own section | ||
99 | * __trace_printk_fmt. But for modules we copy them into a link list. | ||
100 | * The code to print the formats and their addresses passes around the | ||
101 | * address of the fmt string. If the fmt address passed into the seq | ||
102 | * functions is within the kernel core __trace_printk_fmt section, then | ||
103 | * it simply uses the next pointer in the list. | ||
104 | * | ||
105 | * When the fmt pointer is outside the kernel core __trace_printk_fmt | ||
106 | * section, then we need to read the link list pointers. The trick is | ||
107 | * we pass the address of the string to the seq function just like | ||
108 | * we do for the kernel core formats. To get back the structure that | ||
109 | * holds the format, we simply use containerof() and then go to the | ||
110 | * next format in the list. | ||
111 | */ | ||
112 | static const char ** | ||
113 | find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) | ||
114 | { | ||
115 | struct trace_bprintk_fmt *mod_fmt; | ||
116 | |||
117 | if (list_empty(&trace_bprintk_fmt_list)) | ||
118 | return NULL; | ||
119 | |||
120 | /* | ||
121 | * v will point to the address of the fmt record from t_next | ||
122 | * v will be NULL from t_start. | ||
123 | * If this is the first pointer or called from start | ||
124 | * then we need to walk the list. | ||
125 | */ | ||
126 | if (!v || start_index == *pos) { | ||
127 | struct trace_bprintk_fmt *p; | ||
128 | |||
129 | /* search the module list */ | ||
130 | list_for_each_entry(p, &trace_bprintk_fmt_list, list) { | ||
131 | if (start_index == *pos) | ||
132 | return &p->fmt; | ||
133 | start_index++; | ||
134 | } | ||
135 | /* pos > index */ | ||
136 | return NULL; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * v points to the address of the fmt field in the mod list | ||
141 | * structure that holds the module print format. | ||
142 | */ | ||
143 | mod_fmt = container_of(v, typeof(*mod_fmt), fmt); | ||
144 | if (mod_fmt->list.next == &trace_bprintk_fmt_list) | ||
145 | return NULL; | ||
146 | |||
147 | mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list); | ||
148 | |||
149 | return &mod_fmt->fmt; | ||
150 | } | ||
151 | |||
152 | static void format_mod_start(void) | ||
153 | { | ||
154 | mutex_lock(&btrace_mutex); | ||
155 | } | ||
156 | |||
157 | static void format_mod_stop(void) | ||
158 | { | ||
159 | mutex_unlock(&btrace_mutex); | ||
160 | } | ||
161 | |||
87 | #else /* !CONFIG_MODULES */ | 162 | #else /* !CONFIG_MODULES */ |
88 | __init static int | 163 | __init static int |
89 | module_trace_bprintk_format_notify(struct notifier_block *self, | 164 | module_trace_bprintk_format_notify(struct notifier_block *self, |
@@ -91,6 +166,13 @@ module_trace_bprintk_format_notify(struct notifier_block *self, | |||
91 | { | 166 | { |
92 | return 0; | 167 | return 0; |
93 | } | 168 | } |
169 | static inline const char ** | ||
170 | find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) | ||
171 | { | ||
172 | return NULL; | ||
173 | } | ||
174 | static inline void format_mod_start(void) { } | ||
175 | static inline void format_mod_stop(void) { } | ||
94 | #endif /* CONFIG_MODULES */ | 176 | #endif /* CONFIG_MODULES */ |
95 | 177 | ||
96 | 178 | ||
@@ -153,20 +235,33 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) | |||
153 | } | 235 | } |
154 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | 236 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); |
155 | 237 | ||
238 | static const char **find_next(void *v, loff_t *pos) | ||
239 | { | ||
240 | const char **fmt = v; | ||
241 | int start_index; | ||
242 | |||
243 | if (!fmt) | ||
244 | fmt = __start___trace_bprintk_fmt + *pos; | ||
245 | |||
246 | start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; | ||
247 | |||
248 | if (*pos < start_index) | ||
249 | return fmt; | ||
250 | |||
251 | return find_next_mod_format(start_index, v, fmt, pos); | ||
252 | } | ||
253 | |||
156 | static void * | 254 | static void * |
157 | t_start(struct seq_file *m, loff_t *pos) | 255 | t_start(struct seq_file *m, loff_t *pos) |
158 | { | 256 | { |
159 | const char **fmt = __start___trace_bprintk_fmt + *pos; | 257 | format_mod_start(); |
160 | 258 | return find_next(NULL, pos); | |
161 | if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) | ||
162 | return NULL; | ||
163 | return fmt; | ||
164 | } | 259 | } |
165 | 260 | ||
166 | static void *t_next(struct seq_file *m, void * v, loff_t *pos) | 261 | static void *t_next(struct seq_file *m, void * v, loff_t *pos) |
167 | { | 262 | { |
168 | (*pos)++; | 263 | (*pos)++; |
169 | return t_start(m, pos); | 264 | return find_next(v, pos); |
170 | } | 265 | } |
171 | 266 | ||
172 | static int t_show(struct seq_file *m, void *v) | 267 | static int t_show(struct seq_file *m, void *v) |
@@ -205,6 +300,7 @@ static int t_show(struct seq_file *m, void *v) | |||
205 | 300 | ||
206 | static void t_stop(struct seq_file *m, void *p) | 301 | static void t_stop(struct seq_file *m, void *p) |
207 | { | 302 | { |
303 | format_mod_stop(); | ||
208 | } | 304 | } |
209 | 305 | ||
210 | static const struct seq_operations show_format_seq_ops = { | 306 | static const struct seq_operations show_format_seq_ops = { |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 68187af4889e..b219f1449c54 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
251 | { | 251 | { |
252 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | 252 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); |
253 | 253 | ||
254 | if (elem->regfunc && !elem->state && active) | 254 | if (elem->regfunc && !jump_label_enabled(&elem->key) && active) |
255 | elem->regfunc(); | 255 | elem->regfunc(); |
256 | else if (elem->unregfunc && elem->state && !active) | 256 | else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) |
257 | elem->unregfunc(); | 257 | elem->unregfunc(); |
258 | 258 | ||
259 | /* | 259 | /* |
@@ -264,13 +264,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
264 | * is used. | 264 | * is used. |
265 | */ | 265 | */ |
266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); |
267 | if (!elem->state && active) { | 267 | if (active && !jump_label_enabled(&elem->key)) |
268 | jump_label_enable(&elem->state); | 268 | jump_label_inc(&elem->key); |
269 | elem->state = active; | 269 | else if (!active && jump_label_enabled(&elem->key)) |
270 | } else if (elem->state && !active) { | 270 | jump_label_dec(&elem->key); |
271 | jump_label_disable(&elem->state); | ||
272 | elem->state = active; | ||
273 | } | ||
274 | } | 271 | } |
275 | 272 | ||
276 | /* | 273 | /* |
@@ -281,13 +278,11 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
281 | */ | 278 | */ |
282 | static void disable_tracepoint(struct tracepoint *elem) | 279 | static void disable_tracepoint(struct tracepoint *elem) |
283 | { | 280 | { |
284 | if (elem->unregfunc && elem->state) | 281 | if (elem->unregfunc && jump_label_enabled(&elem->key)) |
285 | elem->unregfunc(); | 282 | elem->unregfunc(); |
286 | 283 | ||
287 | if (elem->state) { | 284 | if (jump_label_enabled(&elem->key)) |
288 | jump_label_disable(&elem->state); | 285 | jump_label_dec(&elem->key); |
289 | elem->state = 0; | ||
290 | } | ||
291 | rcu_assign_pointer(elem->funcs, NULL); | 286 | rcu_assign_pointer(elem->funcs, NULL); |
292 | } | 287 | } |
293 | 288 | ||