aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-09-24 03:12:05 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-24 03:12:05 -0400
commita5a2bad55de89a0adf7d6f783cb87ab7eb1a894f (patch)
tree452cb8addc26f5c79fdd4e2fea92c78dc17fc8e8
parentd0303d71c2fb9bcb90a8d48e6462c78c86f70ce6 (diff)
parent46eb3b64dddd20f44e76b08676fa642dd374bf1d (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
-rw-r--r--Makefile5
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/jump_label.h32
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/jump_label.c47
-rw-r--r--arch/sparc/kernel/module.c6
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/alternative.h11
-rw-r--r--arch/x86/include/asm/jump_label.h37
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/alternative.c68
-rw-r--r--arch/x86/kernel/ftrace.c63
-rw-r--r--arch/x86/kernel/jump_label.c50
-rw-r--r--arch/x86/kernel/kprobes.c3
-rw-r--r--arch/x86/kernel/module.c3
-rw-r--r--arch/x86/kernel/setup.c6
-rw-r--r--include/asm-generic/vmlinux.lds.h10
-rw-r--r--include/linux/dynamic_debug.h39
-rw-r--r--include/linux/jump_label.h64
-rw-r--r--include/linux/module.h5
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/jump_label.c429
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/trace/trace_workqueue.c10
-rw-r--r--kernel/tracepoint.c14
-rw-r--r--lib/dynamic_debug.c42
-rw-r--r--scripts/Makefile.lib11
-rw-r--r--scripts/basic/Makefile2
-rw-r--r--scripts/basic/hash.c64
-rw-r--r--scripts/gcc-goto.sh5
33 files changed, 843 insertions, 209 deletions
diff --git a/Makefile b/Makefile
index 3133a5772eeb..865126614a33 100644
--- a/Makefile
+++ b/Makefile
@@ -591,6 +591,11 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
591# conserve stack if available 591# conserve stack if available
592KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) 592KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
593 593
594# check for 'asm goto'
595ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
596 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
597endif
598
594# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments 599# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
595# But warn user when we do so 600# But warn user when we do so
596warn-assign = \ 601warn-assign = \
diff --git a/arch/Kconfig b/arch/Kconfig
index fe48fc7a3eba..53d7f619a1b9 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI
158 subsystem. Also has support for calculating CPU cycle events 158 subsystem. Also has support for calculating CPU cycle events
159 to determine how many clock cycles in a given period. 159 to determine how many clock cycles in a given period.
160 160
161config HAVE_ARCH_JUMP_LABEL
162 bool
163
161source "kernel/gcov/Kconfig" 164source "kernel/gcov/Kconfig"
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 491e9d6de191..9212cd42a832 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -30,6 +30,7 @@ config SPARC
30 select PERF_USE_VMALLOC 30 select PERF_USE_VMALLOC
31 select HAVE_DMA_ATTRS 31 select HAVE_DMA_ATTRS
32 select HAVE_DMA_API_DEBUG 32 select HAVE_DMA_API_DEBUG
33 select HAVE_ARCH_JUMP_LABEL
33 34
34config SPARC32 35config SPARC32
35 def_bool !64BIT 36 def_bool !64BIT
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
new file mode 100644
index 000000000000..62e66d7b2fb6
--- /dev/null
+++ b/arch/sparc/include/asm/jump_label.h
@@ -0,0 +1,32 @@
1#ifndef _ASM_SPARC_JUMP_LABEL_H
2#define _ASM_SPARC_JUMP_LABEL_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <asm/system.h>
8
9#define JUMP_LABEL_NOP_SIZE 4
10
11#define JUMP_LABEL(key, label) \
12 do { \
13 asm goto("1:\n\t" \
14 "nop\n\t" \
15 "nop\n\t" \
16 ".pushsection __jump_table, \"a\"\n\t"\
17 ".word 1b, %l[" #label "], %c0\n\t" \
18 ".popsection \n\t" \
19 : : "i" (key) : : label);\
20 } while (0)
21
22#endif /* __KERNEL__ */
23
24typedef u32 jump_label_t;
25
26struct jump_entry {
27 jump_label_t code;
28 jump_label_t target;
29 jump_label_t key;
30};
31
32#endif
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 0c2dc1f24a9a..599398fbbc7c 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -119,3 +119,5 @@ obj-$(CONFIG_COMPAT) += $(audit--y)
119 119
120pc--$(CONFIG_PERF_EVENTS) := perf_event.o 120pc--$(CONFIG_PERF_EVENTS) := perf_event.o
121obj-$(CONFIG_SPARC64) += $(pc--y) 121obj-$(CONFIG_SPARC64) += $(pc--y)
122
123obj-$(CONFIG_SPARC64) += jump_label.o
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
new file mode 100644
index 000000000000..ea2dafc93d78
--- /dev/null
+++ b/arch/sparc/kernel/jump_label.c
@@ -0,0 +1,47 @@
1#include <linux/kernel.h>
2#include <linux/types.h>
3#include <linux/mutex.h>
4#include <linux/cpu.h>
5
6#include <linux/jump_label.h>
7#include <linux/memory.h>
8
9#ifdef HAVE_JUMP_LABEL
10
11void arch_jump_label_transform(struct jump_entry *entry,
12 enum jump_label_type type)
13{
14 u32 val;
15 u32 *insn = (u32 *) (unsigned long) entry->code;
16
17 if (type == JUMP_LABEL_ENABLE) {
18 s32 off = (s32)entry->target - (s32)entry->code;
19
20#ifdef CONFIG_SPARC64
21 /* ba,pt %xcc, . + (off << 2) */
22 val = 0x10680000 | ((u32) off >> 2);
23#else
24 /* ba . + (off << 2) */
25 val = 0x10800000 | ((u32) off >> 2);
26#endif
27 } else {
28 val = 0x01000000;
29 }
30
31 get_online_cpus();
32 mutex_lock(&text_mutex);
33 *insn = val;
34 flushi(insn);
35 mutex_unlock(&text_mutex);
36 put_online_cpus();
37}
38
39void arch_jump_label_text_poke_early(jump_label_t addr)
40{
41 u32 *insn_p = (u32 *) (unsigned long) addr;
42
43 *insn_p = 0x01000000;
44 flushi(insn_p);
45}
46
47#endif
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index f848aadf54dc..ee3c7dde8d9f 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -18,6 +18,9 @@
18#include <asm/spitfire.h> 18#include <asm/spitfire.h>
19 19
20#ifdef CONFIG_SPARC64 20#ifdef CONFIG_SPARC64
21
22#include <linux/jump_label.h>
23
21static void *module_map(unsigned long size) 24static void *module_map(unsigned long size)
22{ 25{
23 struct vm_struct *area; 26 struct vm_struct *area;
@@ -227,6 +230,9 @@ int module_finalize(const Elf_Ehdr *hdr,
227 const Elf_Shdr *sechdrs, 230 const Elf_Shdr *sechdrs,
228 struct module *me) 231 struct module *me)
229{ 232{
233 /* make jump label nops */
234 jump_label_apply_nops(me);
235
230 /* Cheetah's I-cache is fully coherent. */ 236 /* Cheetah's I-cache is fully coherent. */
231 if (tlb_type == spitfire) { 237 if (tlb_type == spitfire) {
232 unsigned long va; 238 unsigned long va;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cea0cd9a316f..b431a0824a93 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -59,6 +59,7 @@ config X86
59 select ANON_INODES 59 select ANON_INODES
60 select HAVE_ARCH_KMEMCHECK 60 select HAVE_ARCH_KMEMCHECK
61 select HAVE_USER_RETURN_NOTIFIER 61 select HAVE_USER_RETURN_NOTIFIER
62 select HAVE_ARCH_JUMP_LABEL
62 63
63config INSTRUCTION_DECODER 64config INSTRUCTION_DECODER
64 def_bool (KPROBES || PERF_EVENTS) 65 def_bool (KPROBES || PERF_EVENTS)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index bc6abb7bc7ee..76561d20ea2f 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/stringify.h> 6#include <linux/stringify.h>
7#include <linux/jump_label.h>
7#include <asm/asm.h> 8#include <asm/asm.h>
8 9
9/* 10/*
@@ -160,6 +161,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
160#define __parainstructions_end NULL 161#define __parainstructions_end NULL
161#endif 162#endif
162 163
164extern void *text_poke_early(void *addr, const void *opcode, size_t len);
165
163/* 166/*
164 * Clear and restore the kernel write-protection flag on the local CPU. 167 * Clear and restore the kernel write-protection flag on the local CPU.
165 * Allows the kernel to edit read-only pages. 168 * Allows the kernel to edit read-only pages.
@@ -180,4 +183,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
180extern void *text_poke(void *addr, const void *opcode, size_t len); 183extern void *text_poke(void *addr, const void *opcode, size_t len);
181extern void *text_poke_smp(void *addr, const void *opcode, size_t len); 184extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
182 185
186#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
187#define IDEAL_NOP_SIZE_5 5
188extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
189extern void arch_init_ideal_nop5(void);
190#else
191static inline void arch_init_ideal_nop5(void) {}
192#endif
193
183#endif /* _ASM_X86_ALTERNATIVE_H */ 194#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
new file mode 100644
index 000000000000..f52d42e80585
--- /dev/null
+++ b/arch/x86/include/asm/jump_label.h
@@ -0,0 +1,37 @@
1#ifndef _ASM_X86_JUMP_LABEL_H
2#define _ASM_X86_JUMP_LABEL_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <asm/nops.h>
8
9#define JUMP_LABEL_NOP_SIZE 5
10
11# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
12
13# define JUMP_LABEL(key, label) \
14 do { \
15 asm goto("1:" \
16 JUMP_LABEL_INITIAL_NOP \
17 ".pushsection __jump_table, \"a\" \n\t"\
18 _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
19 ".popsection \n\t" \
20 : : "i" (key) : : label); \
21 } while (0)
22
23#endif /* __KERNEL__ */
24
25#ifdef CONFIG_X86_64
26typedef u64 jump_label_t;
27#else
28typedef u32 jump_label_t;
29#endif
30
31struct jump_entry {
32 jump_label_t code;
33 jump_label_t target;
34 jump_label_t key;
35};
36
37#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0925676266bd..24fa1718ddb9 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -32,7 +32,7 @@ GCOV_PROFILE_paravirt.o := n
32obj-y := process_$(BITS).o signal.o entry_$(BITS).o 32obj-y := process_$(BITS).o signal.o entry_$(BITS).o
33obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 33obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
34obj-y += time.o ioport.o ldt.o dumpstack.o 34obj-y += time.o ioport.o ldt.o dumpstack.o
35obj-y += setup.o x86_init.o i8259.o irqinit.o 35obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
36obj-$(CONFIG_X86_VISWS) += visws_quirks.o 36obj-$(CONFIG_X86_VISWS) += visws_quirks.o
37obj-$(CONFIG_X86_32) += probe_roms_32.o 37obj-$(CONFIG_X86_32) += probe_roms_32.o
38obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 38obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index f65ab8b014c4..cb0e6d385f6d 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -195,7 +195,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
195 195
196extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 196extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
197extern s32 __smp_locks[], __smp_locks_end[]; 197extern s32 __smp_locks[], __smp_locks_end[];
198static void *text_poke_early(void *addr, const void *opcode, size_t len); 198void *text_poke_early(void *addr, const void *opcode, size_t len);
199 199
200/* Replace instructions with better alternatives for this CPU type. 200/* Replace instructions with better alternatives for this CPU type.
201 This runs before SMP is initialized to avoid SMP problems with 201 This runs before SMP is initialized to avoid SMP problems with
@@ -522,7 +522,7 @@ void __init alternative_instructions(void)
522 * instructions. And on the local CPU you need to be protected again NMI or MCE 522 * instructions. And on the local CPU you need to be protected again NMI or MCE
523 * handlers seeing an inconsistent instruction while you patch. 523 * handlers seeing an inconsistent instruction while you patch.
524 */ 524 */
525static void *__init_or_module text_poke_early(void *addr, const void *opcode, 525void *__init_or_module text_poke_early(void *addr, const void *opcode,
526 size_t len) 526 size_t len)
527{ 527{
528 unsigned long flags; 528 unsigned long flags;
@@ -641,3 +641,67 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
641 return addr; 641 return addr;
642} 642}
643 643
644#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
645
646unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
647
648void __init arch_init_ideal_nop5(void)
649{
650 extern const unsigned char ftrace_test_p6nop[];
651 extern const unsigned char ftrace_test_nop5[];
652 extern const unsigned char ftrace_test_jmp[];
653 int faulted = 0;
654
655 /*
656 * There is no good nop for all x86 archs.
657 * We will default to using the P6_NOP5, but first we
658 * will test to make sure that the nop will actually
659 * work on this CPU. If it faults, we will then
660 * go to a lesser efficient 5 byte nop. If that fails
661 * we then just use a jmp as our nop. This isn't the most
662 * efficient nop, but we can not use a multi part nop
663 * since we would then risk being preempted in the middle
664 * of that nop, and if we enabled tracing then, it might
665 * cause a system crash.
666 *
667 * TODO: check the cpuid to determine the best nop.
668 */
669 asm volatile (
670 "ftrace_test_jmp:"
671 "jmp ftrace_test_p6nop\n"
672 "nop\n"
673 "nop\n"
674 "nop\n" /* 2 byte jmp + 3 bytes */
675 "ftrace_test_p6nop:"
676 P6_NOP5
677 "jmp 1f\n"
678 "ftrace_test_nop5:"
679 ".byte 0x66,0x66,0x66,0x66,0x90\n"
680 "1:"
681 ".section .fixup, \"ax\"\n"
682 "2: movl $1, %0\n"
683 " jmp ftrace_test_nop5\n"
684 "3: movl $2, %0\n"
685 " jmp 1b\n"
686 ".previous\n"
687 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
688 _ASM_EXTABLE(ftrace_test_nop5, 3b)
689 : "=r"(faulted) : "0" (faulted));
690
691 switch (faulted) {
692 case 0:
693 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
694 memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
695 break;
696 case 1:
697 pr_info("converting mcount calls to 66 66 66 66 90\n");
698 memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
699 break;
700 case 2:
701 pr_info("converting mcount calls to jmp . + 5\n");
702 memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
703 break;
704 }
705
706}
707#endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index cd37469b54ee..3afb33f14d2d 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -257,14 +257,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
257 return mod_code_status; 257 return mod_code_status;
258} 258}
259 259
260
261
262
263static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
264
265static unsigned char *ftrace_nop_replace(void) 260static unsigned char *ftrace_nop_replace(void)
266{ 261{
267 return ftrace_nop; 262 return ideal_nop5;
268} 263}
269 264
270static int 265static int
@@ -338,62 +333,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
338 333
339int __init ftrace_dyn_arch_init(void *data) 334int __init ftrace_dyn_arch_init(void *data)
340{ 335{
341 extern const unsigned char ftrace_test_p6nop[];
342 extern const unsigned char ftrace_test_nop5[];
343 extern const unsigned char ftrace_test_jmp[];
344 int faulted = 0;
345
346 /*
347 * There is no good nop for all x86 archs.
348 * We will default to using the P6_NOP5, but first we
349 * will test to make sure that the nop will actually
350 * work on this CPU. If it faults, we will then
351 * go to a lesser efficient 5 byte nop. If that fails
352 * we then just use a jmp as our nop. This isn't the most
353 * efficient nop, but we can not use a multi part nop
354 * since we would then risk being preempted in the middle
355 * of that nop, and if we enabled tracing then, it might
356 * cause a system crash.
357 *
358 * TODO: check the cpuid to determine the best nop.
359 */
360 asm volatile (
361 "ftrace_test_jmp:"
362 "jmp ftrace_test_p6nop\n"
363 "nop\n"
364 "nop\n"
365 "nop\n" /* 2 byte jmp + 3 bytes */
366 "ftrace_test_p6nop:"
367 P6_NOP5
368 "jmp 1f\n"
369 "ftrace_test_nop5:"
370 ".byte 0x66,0x66,0x66,0x66,0x90\n"
371 "1:"
372 ".section .fixup, \"ax\"\n"
373 "2: movl $1, %0\n"
374 " jmp ftrace_test_nop5\n"
375 "3: movl $2, %0\n"
376 " jmp 1b\n"
377 ".previous\n"
378 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
379 _ASM_EXTABLE(ftrace_test_nop5, 3b)
380 : "=r"(faulted) : "0" (faulted));
381
382 switch (faulted) {
383 case 0:
384 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
385 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
386 break;
387 case 1:
388 pr_info("converting mcount calls to 66 66 66 66 90\n");
389 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
390 break;
391 case 2:
392 pr_info("converting mcount calls to jmp . + 5\n");
393 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
394 break;
395 }
396
397 /* The return code is retured via data */ 336 /* The return code is retured via data */
398 *(unsigned long *)data = 0; 337 *(unsigned long *)data = 0;
399 338
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
new file mode 100644
index 000000000000..961b6b30ba90
--- /dev/null
+++ b/arch/x86/kernel/jump_label.c
@@ -0,0 +1,50 @@
1/*
2 * jump label x86 support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 *
6 */
7#include <linux/jump_label.h>
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/jhash.h>
13#include <linux/cpu.h>
14#include <asm/kprobes.h>
15#include <asm/alternative.h>
16
17#ifdef HAVE_JUMP_LABEL
18
19union jump_code_union {
20 char code[JUMP_LABEL_NOP_SIZE];
21 struct {
22 char jump;
23 int offset;
24 } __attribute__((packed));
25};
26
27void arch_jump_label_transform(struct jump_entry *entry,
28 enum jump_label_type type)
29{
30 union jump_code_union code;
31
32 if (type == JUMP_LABEL_ENABLE) {
33 code.jump = 0xe9;
34 code.offset = entry->target -
35 (entry->code + JUMP_LABEL_NOP_SIZE);
36 } else
37 memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE);
38 get_online_cpus();
39 mutex_lock(&text_mutex);
40 text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
41 mutex_unlock(&text_mutex);
42 put_online_cpus();
43}
44
45void arch_jump_label_text_poke_early(jump_label_t addr)
46{
47 text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE);
48}
49
50#endif
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index e05952af5d26..1cbd54c0df99 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -1218,7 +1218,8 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
1218 } 1218 }
1219 /* Check whether the address range is reserved */ 1219 /* Check whether the address range is reserved */
1220 if (ftrace_text_reserved(src, src + len - 1) || 1220 if (ftrace_text_reserved(src, src + len - 1) ||
1221 alternatives_text_reserved(src, src + len - 1)) 1221 alternatives_text_reserved(src, src + len - 1) ||
1222 jump_label_text_reserved(src, src + len - 1))
1222 return -EBUSY; 1223 return -EBUSY;
1223 1224
1224 return len; 1225 return len;
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index e0bc186d7501..5399f58de7ed 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -239,6 +239,9 @@ int module_finalize(const Elf_Ehdr *hdr,
239 apply_paravirt(pseg, pseg + para->sh_size); 239 apply_paravirt(pseg, pseg + para->sh_size);
240 } 240 }
241 241
242 /* make jump label nops */
243 jump_label_apply_nops(me);
244
242 return module_bug_finalize(hdr, sechdrs, me); 245 return module_bug_finalize(hdr, sechdrs, me);
243} 246}
244 247
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c3a4fbb2b996..00e167870f71 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -112,6 +112,7 @@
112#include <asm/numa_64.h> 112#include <asm/numa_64.h>
113#endif 113#endif
114#include <asm/mce.h> 114#include <asm/mce.h>
115#include <asm/alternative.h>
115 116
116/* 117/*
117 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. 118 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -726,6 +727,7 @@ void __init setup_arch(char **cmdline_p)
726{ 727{
727 int acpi = 0; 728 int acpi = 0;
728 int k8 = 0; 729 int k8 = 0;
730 unsigned long flags;
729 731
730#ifdef CONFIG_X86_32 732#ifdef CONFIG_X86_32
731 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 733 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
@@ -1071,6 +1073,10 @@ void __init setup_arch(char **cmdline_p)
1071 x86_init.oem.banner(); 1073 x86_init.oem.banner();
1072 1074
1073 mcheck_init(); 1075 mcheck_init();
1076
1077 local_irq_save(flags);
1078 arch_init_ideal_nop5();
1079 local_irq_restore(flags);
1074} 1080}
1075 1081
1076#ifdef CONFIG_X86_32 1082#ifdef CONFIG_X86_32
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8a92a170fb7d..ef2af9948eac 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -220,6 +220,8 @@
220 \ 220 \
221 BUG_TABLE \ 221 BUG_TABLE \
222 \ 222 \
223 JUMP_TABLE \
224 \
223 /* PCI quirks */ \ 225 /* PCI quirks */ \
224 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 226 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
225 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 227 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -563,6 +565,14 @@
563#define BUG_TABLE 565#define BUG_TABLE
564#endif 566#endif
565 567
568#define JUMP_TABLE \
569 . = ALIGN(8); \
570 __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
571 VMLINUX_SYMBOL(__start___jump_table) = .; \
572 *(__jump_table) \
573 VMLINUX_SYMBOL(__stop___jump_table) = .; \
574 }
575
566#ifdef CONFIG_PM_TRACE 576#ifdef CONFIG_PM_TRACE
567#define TRACEDATA \ 577#define TRACEDATA \
568 . = ALIGN(4); \ 578 . = ALIGN(4); \
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 52c0da4bdd18..bef3cda44c4c 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,6 +1,8 @@
1#ifndef _DYNAMIC_DEBUG_H 1#ifndef _DYNAMIC_DEBUG_H
2#define _DYNAMIC_DEBUG_H 2#define _DYNAMIC_DEBUG_H
3 3
4#include <linux/jump_label.h>
5
4/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which 6/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
5 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They 7 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
6 * use independent hash functions, to reduce the chance of false positives. 8 * use independent hash functions, to reduce the chance of false positives.
@@ -22,8 +24,6 @@ struct _ddebug {
22 const char *function; 24 const char *function;
23 const char *filename; 25 const char *filename;
24 const char *format; 26 const char *format;
25 char primary_hash;
26 char secondary_hash;
27 unsigned int lineno:24; 27 unsigned int lineno:24;
28 /* 28 /*
29 * The flags field controls the behaviour at the callsite. 29 * The flags field controls the behaviour at the callsite.
@@ -33,6 +33,7 @@ struct _ddebug {
33#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ 33#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
34#define _DPRINTK_FLAGS_DEFAULT 0 34#define _DPRINTK_FLAGS_DEFAULT 0
35 unsigned int flags:8; 35 unsigned int flags:8;
36 char enabled;
36} __attribute__((aligned(8))); 37} __attribute__((aligned(8)));
37 38
38 39
@@ -42,33 +43,35 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
42#if defined(CONFIG_DYNAMIC_DEBUG) 43#if defined(CONFIG_DYNAMIC_DEBUG)
43extern int ddebug_remove_module(const char *mod_name); 44extern int ddebug_remove_module(const char *mod_name);
44 45
45#define __dynamic_dbg_enabled(dd) ({ \
46 int __ret = 0; \
47 if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \
48 (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \
49 if (unlikely(dd.flags)) \
50 __ret = 1; \
51 __ret; })
52
53#define dynamic_pr_debug(fmt, ...) do { \ 46#define dynamic_pr_debug(fmt, ...) do { \
47 __label__ do_printk; \
48 __label__ out; \
54 static struct _ddebug descriptor \ 49 static struct _ddebug descriptor \
55 __used \ 50 __used \
56 __attribute__((section("__verbose"), aligned(8))) = \ 51 __attribute__((section("__verbose"), aligned(8))) = \
57 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ 52 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
58 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ 53 _DPRINTK_FLAGS_DEFAULT }; \
59 if (__dynamic_dbg_enabled(descriptor)) \ 54 JUMP_LABEL(&descriptor.enabled, do_printk); \
60 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ 55 goto out; \
56do_printk: \
57 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
58out: ; \
61 } while (0) 59 } while (0)
62 60
63 61
64#define dynamic_dev_dbg(dev, fmt, ...) do { \ 62#define dynamic_dev_dbg(dev, fmt, ...) do { \
63 __label__ do_printk; \
64 __label__ out; \
65 static struct _ddebug descriptor \ 65 static struct _ddebug descriptor \
66 __used \ 66 __used \
67 __attribute__((section("__verbose"), aligned(8))) = \ 67 __attribute__((section("__verbose"), aligned(8))) = \
68 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ 68 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
69 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ 69 _DPRINTK_FLAGS_DEFAULT }; \
70 if (__dynamic_dbg_enabled(descriptor)) \ 70 JUMP_LABEL(&descriptor.enabled, do_printk); \
71 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ 71 goto out; \
72do_printk: \
73 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
74out: ; \
72 } while (0) 75 } while (0)
73 76
74#else 77#else
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
new file mode 100644
index 000000000000..b72cd9f92c2e
--- /dev/null
+++ b/include/linux/jump_label.h
@@ -0,0 +1,64 @@
1#ifndef _LINUX_JUMP_LABEL_H
2#define _LINUX_JUMP_LABEL_H
3
4#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL)
5# include <asm/jump_label.h>
6# define HAVE_JUMP_LABEL
7#endif
8
9enum jump_label_type {
10 JUMP_LABEL_ENABLE,
11 JUMP_LABEL_DISABLE
12};
13
14struct module;
15
16#ifdef HAVE_JUMP_LABEL
17
18extern struct jump_entry __start___jump_table[];
19extern struct jump_entry __stop___jump_table[];
20
21extern void arch_jump_label_transform(struct jump_entry *entry,
22 enum jump_label_type type);
23extern void arch_jump_label_text_poke_early(jump_label_t addr);
24extern void jump_label_update(unsigned long key, enum jump_label_type type);
25extern void jump_label_apply_nops(struct module *mod);
26extern int jump_label_text_reserved(void *start, void *end);
27
28#define enable_jump_label(key) \
29 jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
30
31#define disable_jump_label(key) \
32 jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
33
34#else
35
36#define JUMP_LABEL(key, label) \
37do { \
38 if (unlikely(*key)) \
39 goto label; \
40} while (0)
41
42#define enable_jump_label(cond_var) \
43do { \
44 *(cond_var) = 1; \
45} while (0)
46
47#define disable_jump_label(cond_var) \
48do { \
49 *(cond_var) = 0; \
50} while (0)
51
52static inline int jump_label_apply_nops(struct module *mod)
53{
54 return 0;
55}
56
57static inline int jump_label_text_reserved(void *start, void *end)
58{
59 return 0;
60}
61
62#endif
63
64#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 8a6b9fdc7ffa..403ac26023ce 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -350,7 +350,10 @@ struct module
350 struct tracepoint *tracepoints; 350 struct tracepoint *tracepoints;
351 unsigned int num_tracepoints; 351 unsigned int num_tracepoints;
352#endif 352#endif
353 353#ifdef HAVE_JUMP_LABEL
354 struct jump_entry *jump_entries;
355 unsigned int num_jump_entries;
356#endif
354#ifdef CONFIG_TRACING 357#ifdef CONFIG_TRACING
355 const char **trace_bprintk_fmt_start; 358 const char **trace_bprintk_fmt_start;
356 unsigned int num_trace_bprintk_fmt; 359 unsigned int num_trace_bprintk_fmt;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 103d1b61aacb..a4a90b6726ce 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
20#include <linux/jump_label.h>
20 21
21struct module; 22struct module;
22struct tracepoint; 23struct tracepoint;
@@ -145,7 +146,9 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
145 extern struct tracepoint __tracepoint_##name; \ 146 extern struct tracepoint __tracepoint_##name; \
146 static inline void trace_##name(proto) \ 147 static inline void trace_##name(proto) \
147 { \ 148 { \
148 if (unlikely(__tracepoint_##name.state)) \ 149 JUMP_LABEL(&__tracepoint_##name.state, do_trace); \
150 return; \
151do_trace: \
149 __DO_TRACE(&__tracepoint_##name, \ 152 __DO_TRACE(&__tracepoint_##name, \
150 TP_PROTO(data_proto), \ 153 TP_PROTO(data_proto), \
151 TP_ARGS(data_args)); \ 154 TP_ARGS(data_args)); \
diff --git a/kernel/Makefile b/kernel/Makefile
index 0b72d1a74be0..d52b473c99a1 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ 12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
13 async.o range.o 13 async.o range.o jump_label.o
14obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o 14obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
15obj-y += groups.o 15obj-y += groups.o
16 16
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
new file mode 100644
index 000000000000..7be868bf25c6
--- /dev/null
+++ b/kernel/jump_label.c
@@ -0,0 +1,429 @@
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 *
6 */
7#include <linux/jump_label.h>
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/jhash.h>
13#include <linux/slab.h>
14#include <linux/sort.h>
15#include <linux/err.h>
16
17#ifdef HAVE_JUMP_LABEL
18
19#define JUMP_LABEL_HASH_BITS 6
20#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS)
21static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE];
22
23/* mutex to protect coming/going of the the jump_label table */
24static DEFINE_MUTEX(jump_label_mutex);
25
26struct jump_label_entry {
27 struct hlist_node hlist;
28 struct jump_entry *table;
29 int nr_entries;
30 /* hang modules off here */
31 struct hlist_head modules;
32 unsigned long key;
33};
34
35struct jump_label_module_entry {
36 struct hlist_node hlist;
37 struct jump_entry *table;
38 int nr_entries;
39 struct module *mod;
40};
41
42static int jump_label_cmp(const void *a, const void *b)
43{
44 const struct jump_entry *jea = a;
45 const struct jump_entry *jeb = b;
46
47 if (jea->key < jeb->key)
48 return -1;
49
50 if (jea->key > jeb->key)
51 return 1;
52
53 return 0;
54}
55
56static void
57sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop)
58{
59 unsigned long size;
60
61 size = (((unsigned long)stop - (unsigned long)start)
62 / sizeof(struct jump_entry));
63 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
64}
65
66static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
67{
68 struct hlist_head *head;
69 struct hlist_node *node;
70 struct jump_label_entry *e;
71 u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0);
72
73 head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
74 hlist_for_each_entry(e, node, head, hlist) {
75 if (key == e->key)
76 return e;
77 }
78 return NULL;
79}
80
81static struct jump_label_entry *
82add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table)
83{
84 struct hlist_head *head;
85 struct jump_label_entry *e;
86 u32 hash;
87
88 e = get_jump_label_entry(key);
89 if (e)
90 return ERR_PTR(-EEXIST);
91
92 e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL);
93 if (!e)
94 return ERR_PTR(-ENOMEM);
95
96 hash = jhash((void *)&key, sizeof(jump_label_t), 0);
97 head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
98 e->key = key;
99 e->table = table;
100 e->nr_entries = nr_entries;
101 INIT_HLIST_HEAD(&(e->modules));
102 hlist_add_head(&e->hlist, head);
103 return e;
104}
105
106static int
107build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop)
108{
109 struct jump_entry *iter, *iter_begin;
110 struct jump_label_entry *entry;
111 int count;
112
113 sort_jump_label_entries(start, stop);
114 iter = start;
115 while (iter < stop) {
116 entry = get_jump_label_entry(iter->key);
117 if (!entry) {
118 iter_begin = iter;
119 count = 0;
120 while ((iter < stop) &&
121 (iter->key == iter_begin->key)) {
122 iter++;
123 count++;
124 }
125 entry = add_jump_label_entry(iter_begin->key,
126 count, iter_begin);
127 if (IS_ERR(entry))
128 return PTR_ERR(entry);
129 } else {
130 WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n");
131 return -1;
132 }
133 }
134 return 0;
135}
136
137/***
138 * jump_label_update - update jump label text
139 * @key - key value associated with a a jump label
140 * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE
141 *
142 * Will enable/disable the jump for jump label @key, depending on the
143 * value of @type.
144 *
145 */
146
147void jump_label_update(unsigned long key, enum jump_label_type type)
148{
149 struct jump_entry *iter;
150 struct jump_label_entry *entry;
151 struct hlist_node *module_node;
152 struct jump_label_module_entry *e_module;
153 int count;
154
155 mutex_lock(&jump_label_mutex);
156 entry = get_jump_label_entry((jump_label_t)key);
157 if (entry) {
158 count = entry->nr_entries;
159 iter = entry->table;
160 while (count--) {
161 if (kernel_text_address(iter->code))
162 arch_jump_label_transform(iter, type);
163 iter++;
164 }
165 /* eanble/disable jump labels in modules */
166 hlist_for_each_entry(e_module, module_node, &(entry->modules),
167 hlist) {
168 count = e_module->nr_entries;
169 iter = e_module->table;
170 while (count--) {
171 if (kernel_text_address(iter->code))
172 arch_jump_label_transform(iter, type);
173 iter++;
174 }
175 }
176 }
177 mutex_unlock(&jump_label_mutex);
178}
179
180static int addr_conflict(struct jump_entry *entry, void *start, void *end)
181{
182 if (entry->code <= (unsigned long)end &&
183 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
184 return 1;
185
186 return 0;
187}
188
189#ifdef CONFIG_MODULES
190
191static int module_conflict(void *start, void *end)
192{
193 struct hlist_head *head;
194 struct hlist_node *node, *node_next, *module_node, *module_node_next;
195 struct jump_label_entry *e;
196 struct jump_label_module_entry *e_module;
197 struct jump_entry *iter;
198 int i, count;
199 int conflict = 0;
200
201 for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
202 head = &jump_label_table[i];
203 hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
204 hlist_for_each_entry_safe(e_module, module_node,
205 module_node_next,
206 &(e->modules), hlist) {
207 count = e_module->nr_entries;
208 iter = e_module->table;
209 while (count--) {
210 if (addr_conflict(iter, start, end)) {
211 conflict = 1;
212 goto out;
213 }
214 iter++;
215 }
216 }
217 }
218 }
219out:
220 return conflict;
221}
222
223#endif
224
225/***
226 * jump_label_text_reserved - check if addr range is reserved
227 * @start: start text addr
228 * @end: end text addr
229 *
230 * checks if the text addr located between @start and @end
231 * overlaps with any of the jump label patch addresses. Code
232 * that wants to modify kernel text should first verify that
233 * it does not overlap with any of the jump label addresses.
234 *
235 * returns 1 if there is an overlap, 0 otherwise
236 */
237int jump_label_text_reserved(void *start, void *end)
238{
239 struct jump_entry *iter;
240 struct jump_entry *iter_start = __start___jump_table;
241 struct jump_entry *iter_stop = __start___jump_table;
242 int conflict = 0;
243
244 mutex_lock(&jump_label_mutex);
245 iter = iter_start;
246 while (iter < iter_stop) {
247 if (addr_conflict(iter, start, end)) {
248 conflict = 1;
249 goto out;
250 }
251 iter++;
252 }
253
254 /* now check modules */
255#ifdef CONFIG_MODULES
256 conflict = module_conflict(start, end);
257#endif
258out:
259 mutex_unlock(&jump_label_mutex);
260 return conflict;
261}
262
263static __init int init_jump_label(void)
264{
265 int ret;
266 struct jump_entry *iter_start = __start___jump_table;
267 struct jump_entry *iter_stop = __stop___jump_table;
268 struct jump_entry *iter;
269
270 mutex_lock(&jump_label_mutex);
271 ret = build_jump_label_hashtable(__start___jump_table,
272 __stop___jump_table);
273 iter = iter_start;
274 while (iter < iter_stop) {
275 arch_jump_label_text_poke_early(iter->code);
276 iter++;
277 }
278 mutex_unlock(&jump_label_mutex);
279 return ret;
280}
281early_initcall(init_jump_label);
282
283#ifdef CONFIG_MODULES
284
285static struct jump_label_module_entry *
286add_jump_label_module_entry(struct jump_label_entry *entry,
287 struct jump_entry *iter_begin,
288 int count, struct module *mod)
289{
290 struct jump_label_module_entry *e;
291
292 e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL);
293 if (!e)
294 return ERR_PTR(-ENOMEM);
295 e->mod = mod;
296 e->nr_entries = count;
297 e->table = iter_begin;
298 hlist_add_head(&e->hlist, &entry->modules);
299 return e;
300}
301
302static int add_jump_label_module(struct module *mod)
303{
304 struct jump_entry *iter, *iter_begin;
305 struct jump_label_entry *entry;
306 struct jump_label_module_entry *module_entry;
307 int count;
308
309 /* if the module doesn't have jump label entries, just return */
310 if (!mod->num_jump_entries)
311 return 0;
312
313 sort_jump_label_entries(mod->jump_entries,
314 mod->jump_entries + mod->num_jump_entries);
315 iter = mod->jump_entries;
316 while (iter < mod->jump_entries + mod->num_jump_entries) {
317 entry = get_jump_label_entry(iter->key);
318 iter_begin = iter;
319 count = 0;
320 while ((iter < mod->jump_entries + mod->num_jump_entries) &&
321 (iter->key == iter_begin->key)) {
322 iter++;
323 count++;
324 }
325 if (!entry) {
326 entry = add_jump_label_entry(iter_begin->key, 0, NULL);
327 if (IS_ERR(entry))
328 return PTR_ERR(entry);
329 }
330 module_entry = add_jump_label_module_entry(entry, iter_begin,
331 count, mod);
332 if (IS_ERR(module_entry))
333 return PTR_ERR(module_entry);
334 }
335 return 0;
336}
337
338static void remove_jump_label_module(struct module *mod)
339{
340 struct hlist_head *head;
341 struct hlist_node *node, *node_next, *module_node, *module_node_next;
342 struct jump_label_entry *e;
343 struct jump_label_module_entry *e_module;
344 int i;
345
346 /* if the module doesn't have jump label entries, just return */
347 if (!mod->num_jump_entries)
348 return;
349
350 for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
351 head = &jump_label_table[i];
352 hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
353 hlist_for_each_entry_safe(e_module, module_node,
354 module_node_next,
355 &(e->modules), hlist) {
356 if (e_module->mod == mod) {
357 hlist_del(&e_module->hlist);
358 kfree(e_module);
359 }
360 }
361 if (hlist_empty(&e->modules) && (e->nr_entries == 0)) {
362 hlist_del(&e->hlist);
363 kfree(e);
364 }
365 }
366 }
367}
368
369static int
370jump_label_module_notify(struct notifier_block *self, unsigned long val,
371 void *data)
372{
373 struct module *mod = data;
374 int ret = 0;
375
376 switch (val) {
377 case MODULE_STATE_COMING:
378 mutex_lock(&jump_label_mutex);
379 ret = add_jump_label_module(mod);
380 if (ret)
381 remove_jump_label_module(mod);
382 mutex_unlock(&jump_label_mutex);
383 break;
384 case MODULE_STATE_GOING:
385 mutex_lock(&jump_label_mutex);
386 remove_jump_label_module(mod);
387 mutex_unlock(&jump_label_mutex);
388 break;
389 }
390 return ret;
391}
392
393/***
394 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
395 * @mod: module to patch
396 *
397 * Allow for run-time selection of the optimal nops. Before the module
398 * loads patch these with arch_get_jump_label_nop(), which is specified by
399 * the arch specific jump label code.
400 */
401void jump_label_apply_nops(struct module *mod)
402{
403 struct jump_entry *iter;
404
405 /* if the module doesn't have jump label entries, just return */
406 if (!mod->num_jump_entries)
407 return;
408
409 iter = mod->jump_entries;
410 while (iter < mod->jump_entries + mod->num_jump_entries) {
411 arch_jump_label_text_poke_early(iter->code);
412 iter++;
413 }
414}
415
416struct notifier_block jump_label_module_nb = {
417 .notifier_call = jump_label_module_notify,
418 .priority = 0,
419};
420
421static __init int init_jump_label_module(void)
422{
423 return register_module_notifier(&jump_label_module_nb);
424}
425early_initcall(init_jump_label_module);
426
427#endif /* CONFIG_MODULES */
428
429#endif
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6dd5359e1f0e..ec4210c6501e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -47,6 +47,7 @@
47#include <linux/memory.h> 47#include <linux/memory.h>
48#include <linux/ftrace.h> 48#include <linux/ftrace.h>
49#include <linux/cpu.h> 49#include <linux/cpu.h>
50#include <linux/jump_label.h>
50 51
51#include <asm-generic/sections.h> 52#include <asm-generic/sections.h>
52#include <asm/cacheflush.h> 53#include <asm/cacheflush.h>
@@ -1146,7 +1147,8 @@ int __kprobes register_kprobe(struct kprobe *p)
1146 preempt_disable(); 1147 preempt_disable();
1147 if (!kernel_text_address((unsigned long) p->addr) || 1148 if (!kernel_text_address((unsigned long) p->addr) ||
1148 in_kprobes_functions((unsigned long) p->addr) || 1149 in_kprobes_functions((unsigned long) p->addr) ||
1149 ftrace_text_reserved(p->addr, p->addr)) { 1150 ftrace_text_reserved(p->addr, p->addr) ||
1151 jump_label_text_reserved(p->addr, p->addr)) {
1150 preempt_enable(); 1152 preempt_enable();
1151 return -EINVAL; 1153 return -EINVAL;
1152 } 1154 }
diff --git a/kernel/module.c b/kernel/module.c
index d0b5f8db11b4..eba134157ef6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -55,6 +55,7 @@
55#include <linux/async.h> 55#include <linux/async.h>
56#include <linux/percpu.h> 56#include <linux/percpu.h>
57#include <linux/kmemleak.h> 57#include <linux/kmemleak.h>
58#include <linux/jump_label.h>
58 59
59#define CREATE_TRACE_POINTS 60#define CREATE_TRACE_POINTS
60#include <trace/events/module.h> 61#include <trace/events/module.h>
@@ -2308,6 +2309,11 @@ static void find_module_sections(struct module *mod, struct load_info *info)
2308 sizeof(*mod->tracepoints), 2309 sizeof(*mod->tracepoints),
2309 &mod->num_tracepoints); 2310 &mod->num_tracepoints);
2310#endif 2311#endif
2312#ifdef HAVE_JUMP_LABEL
2313 mod->jump_entries = section_objs(info, "__jump_table",
2314 sizeof(*mod->jump_entries),
2315 &mod->num_jump_entries);
2316#endif
2311#ifdef CONFIG_EVENT_TRACING 2317#ifdef CONFIG_EVENT_TRACING
2312 mod->trace_events = section_objs(info, "_ftrace_events", 2318 mod->trace_events = section_objs(info, "_ftrace_events",
2313 sizeof(*mod->trace_events), 2319 sizeof(*mod->trace_events),
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index a7cc3793baf6..209b379a4721 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -263,6 +263,11 @@ int __init trace_workqueue_early_init(void)
263{ 263{
264 int ret, cpu; 264 int ret, cpu;
265 265
266 for_each_possible_cpu(cpu) {
267 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
268 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
269 }
270
266 ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL); 271 ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
267 if (ret) 272 if (ret)
268 goto out; 273 goto out;
@@ -279,11 +284,6 @@ int __init trace_workqueue_early_init(void)
279 if (ret) 284 if (ret)
280 goto no_creation; 285 goto no_creation;
281 286
282 for_each_possible_cpu(cpu) {
283 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
284 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
285 }
286
287 return 0; 287 return 0;
288 288
289no_creation: 289no_creation:
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index c77f3eceea25..d6073a50a6ca 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -25,6 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/jump_label.h>
28 29
29extern struct tracepoint __start___tracepoints[]; 30extern struct tracepoint __start___tracepoints[];
30extern struct tracepoint __stop___tracepoints[]; 31extern struct tracepoint __stop___tracepoints[];
@@ -263,7 +264,13 @@ static void set_tracepoint(struct tracepoint_entry **entry,
263 * is used. 264 * is used.
264 */ 265 */
265 rcu_assign_pointer(elem->funcs, (*entry)->funcs); 266 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
266 elem->state = active; 267 if (!elem->state && active) {
268 enable_jump_label(&elem->state);
269 elem->state = active;
270 } else if (elem->state && !active) {
271 disable_jump_label(&elem->state);
272 elem->state = active;
273 }
267} 274}
268 275
269/* 276/*
@@ -277,7 +284,10 @@ static void disable_tracepoint(struct tracepoint *elem)
277 if (elem->unregfunc && elem->state) 284 if (elem->unregfunc && elem->state)
278 elem->unregfunc(); 285 elem->unregfunc();
279 286
280 elem->state = 0; 287 if (elem->state) {
288 disable_jump_label(&elem->state);
289 elem->state = 0;
290 }
281 rcu_assign_pointer(elem->funcs, NULL); 291 rcu_assign_pointer(elem->funcs, NULL);
282} 292}
283 293
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 02afc2533728..e925c7b960f1 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -26,19 +26,11 @@
26#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/jump_label.h>
29 30
30extern struct _ddebug __start___verbose[]; 31extern struct _ddebug __start___verbose[];
31extern struct _ddebug __stop___verbose[]; 32extern struct _ddebug __stop___verbose[];
32 33
33/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
34 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
35 * use independent hash functions, to reduce the chance of false positives.
36 */
37long long dynamic_debug_enabled;
38EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
39long long dynamic_debug_enabled2;
40EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
41
42struct ddebug_table { 34struct ddebug_table {
43 struct list_head link; 35 struct list_head link;
44 char *mod_name; 36 char *mod_name;
@@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
88} 80}
89 81
90/* 82/*
91 * must be called with ddebug_lock held
92 */
93
94static int disabled_hash(char hash, bool first_table)
95{
96 struct ddebug_table *dt;
97 char table_hash_value;
98
99 list_for_each_entry(dt, &ddebug_tables, link) {
100 if (first_table)
101 table_hash_value = dt->ddebugs->primary_hash;
102 else
103 table_hash_value = dt->ddebugs->secondary_hash;
104 if (dt->num_enabled && (hash == table_hash_value))
105 return 0;
106 }
107 return 1;
108}
109
110/*
111 * Search the tables for _ddebug's which match the given 83 * Search the tables for _ddebug's which match the given
112 * `query' and apply the `flags' and `mask' to them. Tells 84 * `query' and apply the `flags' and `mask' to them. Tells
113 * the user which ddebug's were changed, or whether none 85 * the user which ddebug's were changed, or whether none
@@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
170 dt->num_enabled++; 142 dt->num_enabled++;
171 dp->flags = newflags; 143 dp->flags = newflags;
172 if (newflags) { 144 if (newflags) {
173 dynamic_debug_enabled |= 145 enable_jump_label(&dp->enabled);
174 (1LL << dp->primary_hash);
175 dynamic_debug_enabled2 |=
176 (1LL << dp->secondary_hash);
177 } else { 146 } else {
178 if (disabled_hash(dp->primary_hash, true)) 147 disable_jump_label(&dp->enabled);
179 dynamic_debug_enabled &=
180 ~(1LL << dp->primary_hash);
181 if (disabled_hash(dp->secondary_hash, false))
182 dynamic_debug_enabled2 &=
183 ~(1LL << dp->secondary_hash);
184 } 148 }
185 if (verbose) 149 if (verbose)
186 printk(KERN_INFO 150 printk(KERN_INFO
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 54fd1b700131..7bfcf1a09ac5 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -101,14 +101,6 @@ basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
101modname_flags = $(if $(filter 1,$(words $(modname))),\ 101modname_flags = $(if $(filter 1,$(words $(modname))),\
102 -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))") 102 -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
103 103
104#hash values
105ifdef CONFIG_DYNAMIC_DEBUG
106debug_flags = -D"DEBUG_HASH=$(shell ./scripts/basic/hash djb2 $(@D)$(modname))"\
107 -D"DEBUG_HASH2=$(shell ./scripts/basic/hash r5 $(@D)$(modname))"
108else
109debug_flags =
110endif
111
112orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \ 104orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
113 $(ccflags-y) $(CFLAGS_$(basetarget).o) 105 $(ccflags-y) $(CFLAGS_$(basetarget).o)
114_c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags)) 106_c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
@@ -152,8 +144,7 @@ endif
152 144
153c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ 145c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
154 $(__c_flags) $(modkern_cflags) \ 146 $(__c_flags) $(modkern_cflags) \
155 -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags) \ 147 -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
156 $(debug_flags)
157 148
158a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ 149a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
159 $(__a_flags) $(modkern_aflags) 150 $(__a_flags) $(modkern_aflags)
diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile
index 09559951df12..4c324a1f1e0e 100644
--- a/scripts/basic/Makefile
+++ b/scripts/basic/Makefile
@@ -9,7 +9,7 @@
9# fixdep: Used to generate dependency information during build process 9# fixdep: Used to generate dependency information during build process
10# docproc: Used in Documentation/DocBook 10# docproc: Used in Documentation/DocBook
11 11
12hostprogs-y := fixdep docproc hash 12hostprogs-y := fixdep docproc
13always := $(hostprogs-y) 13always := $(hostprogs-y)
14 14
15# fixdep is needed to compile other host programs 15# fixdep is needed to compile other host programs
diff --git a/scripts/basic/hash.c b/scripts/basic/hash.c
deleted file mode 100644
index 2ef5d3f666b8..000000000000
--- a/scripts/basic/hash.c
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
3 *
4 */
5
6#include <stdio.h>
7#include <stdlib.h>
8#include <string.h>
9
10#define DYNAMIC_DEBUG_HASH_BITS 6
11
12static const char *program;
13
14static void usage(void)
15{
16 printf("Usage: %s <djb2|r5> <modname>\n", program);
17 exit(1);
18}
19
20/* djb2 hashing algorithm by Dan Bernstein. From:
21 * http://www.cse.yorku.ca/~oz/hash.html
22 */
23
24static unsigned int djb2_hash(char *str)
25{
26 unsigned long hash = 5381;
27 int c;
28
29 c = *str;
30 while (c) {
31 hash = ((hash << 5) + hash) + c;
32 c = *++str;
33 }
34 return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
35}
36
37static unsigned int r5_hash(char *str)
38{
39 unsigned long hash = 0;
40 int c;
41
42 c = *str;
43 while (c) {
44 hash = (hash + (c << 4) + (c >> 4)) * 11;
45 c = *++str;
46 }
47 return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
48}
49
50int main(int argc, char *argv[])
51{
52 program = argv[0];
53
54 if (argc != 3)
55 usage();
56 if (!strcmp(argv[1], "djb2"))
57 printf("%d\n", djb2_hash(argv[2]));
58 else if (!strcmp(argv[1], "r5"))
59 printf("%d\n", r5_hash(argv[2]));
60 else
61 usage();
62 exit(0);
63}
64
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
new file mode 100644
index 000000000000..8e82424be7aa
--- /dev/null
+++ b/scripts/gcc-goto.sh
@@ -0,0 +1,5 @@
1#!/bin/sh
2# Test for gcc 'asm goto' suport
3# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
4
5echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $1 -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"