aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig14
-rw-r--r--arch/sparc/include/asm/jump_label.h1
-rw-r--r--arch/x86/Makefile_32.cpu13
-rw-r--r--arch/x86/kernel/alternative.c69
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--drivers/oprofile/buffer_sync.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c10
-rw-r--r--drivers/oprofile/cpu_buffer.h1
-rw-r--r--drivers/oprofile/timer_int.c13
-rw-r--r--include/linux/jump_label.h7
-rw-r--r--kernel/jump_label.c77
-rw-r--r--kernel/kprobes.c26
12 files changed, 153 insertions, 82 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 53d7f619a1b9..8bf0fa652eb6 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -42,6 +42,20 @@ config KPROBES
42 for kernel debugging, non-intrusive instrumentation and testing. 42 for kernel debugging, non-intrusive instrumentation and testing.
43 If in doubt, say "N". 43 If in doubt, say "N".
44 44
45config JUMP_LABEL
46 bool "Optimize trace point call sites"
47 depends on HAVE_ARCH_JUMP_LABEL
48 help
49 If it is detected that the compiler has support for "asm goto",
50 the kernel will compile trace point locations with just a
51 nop instruction. When trace points are enabled, the nop will
52 be converted to a jump to the trace function. This technique
53 lowers overhead and stress on the branch prediction of the
54 processor.
55
56 On i386, options added to the compiler flags may increase
57 the size of the kernel slightly.
58
45config OPTPROBES 59config OPTPROBES
46 def_bool y 60 def_bool y
47 depends on KPROBES && HAVE_OPTPROBES 61 depends on KPROBES && HAVE_OPTPROBES
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 65c0d3029796..427d4684e0d2 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -13,6 +13,7 @@
13 "nop\n\t" \ 13 "nop\n\t" \
14 "nop\n\t" \ 14 "nop\n\t" \
15 ".pushsection __jump_table, \"a\"\n\t"\ 15 ".pushsection __jump_table, \"a\"\n\t"\
16 ".align 4\n\t" \
16 ".word 1b, %l[" #label "], %c0\n\t" \ 17 ".word 1b, %l[" #label "], %c0\n\t" \
17 ".popsection \n\t" \ 18 ".popsection \n\t" \
18 : : "i" (key) : : label);\ 19 : : "i" (key) : : label);\
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 1255d953c65d..f2ee1abb1df9 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -51,7 +51,18 @@ cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
51# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph 51# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
52# tracer assumptions. For i686, generic, core2 this is set by the 52# tracer assumptions. For i686, generic, core2 this is set by the
53# compiler anyway 53# compiler anyway
54cflags-$(CONFIG_FUNCTION_GRAPH_TRACER) += $(call cc-option,-maccumulate-outgoing-args) 54ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
55ADD_ACCUMULATE_OUTGOING_ARGS := y
56endif
57
58# Work around to a bug with asm goto with first implementations of it
59# in gcc causing gcc to mess up the push and pop of the stack in some
60# uses of asm goto.
61ifeq ($(CONFIG_JUMP_LABEL), y)
62ADD_ACCUMULATE_OUTGOING_ARGS := y
63endif
64
65cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
55 66
56# Bug fix for binutils: this option is required in order to keep 67# Bug fix for binutils: this option is required in order to keep
57# binutils from generating NOPL instructions against our will. 68# binutils from generating NOPL instructions against our will.
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 5ceeca382820..5079f24c955a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -644,65 +644,26 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
644 644
645#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 645#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
646 646
647unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; 647#ifdef CONFIG_X86_64
648unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
649#else
650unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
651#endif
648 652
649void __init arch_init_ideal_nop5(void) 653void __init arch_init_ideal_nop5(void)
650{ 654{
651 extern const unsigned char ftrace_test_p6nop[];
652 extern const unsigned char ftrace_test_nop5[];
653 extern const unsigned char ftrace_test_jmp[];
654 int faulted = 0;
655
656 /* 655 /*
657 * There is no good nop for all x86 archs. 656 * There is no good nop for all x86 archs. This selection
658 * We will default to using the P6_NOP5, but first we 657 * algorithm should be unified with the one in find_nop_table(),
659 * will test to make sure that the nop will actually 658 * but this should be good enough for now.
660 * work on this CPU. If it faults, we will then
661 * go to a lesser efficient 5 byte nop. If that fails
662 * we then just use a jmp as our nop. This isn't the most
663 * efficient nop, but we can not use a multi part nop
664 * since we would then risk being preempted in the middle
665 * of that nop, and if we enabled tracing then, it might
666 * cause a system crash.
667 * 659 *
668 * TODO: check the cpuid to determine the best nop. 660 * For cases other than the ones below, use the safe (as in
661 * always functional) defaults above.
669 */ 662 */
670 asm volatile ( 663#ifdef CONFIG_X86_64
671 "ftrace_test_jmp:" 664 /* Don't use these on 32 bits due to broken virtualizers */
672 "jmp ftrace_test_p6nop\n" 665 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
673 "nop\n" 666 memcpy(ideal_nop5, p6_nops[5], 5);
674 "nop\n" 667#endif
675 "nop\n" /* 2 byte jmp + 3 bytes */
676 "ftrace_test_p6nop:"
677 P6_NOP5
678 "jmp 1f\n"
679 "ftrace_test_nop5:"
680 ".byte 0x66,0x66,0x66,0x66,0x90\n"
681 "1:"
682 ".section .fixup, \"ax\"\n"
683 "2: movl $1, %0\n"
684 " jmp ftrace_test_nop5\n"
685 "3: movl $2, %0\n"
686 " jmp 1b\n"
687 ".previous\n"
688 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
689 _ASM_EXTABLE(ftrace_test_nop5, 3b)
690 : "=r"(faulted) : "0" (faulted));
691
692 switch (faulted) {
693 case 0:
694 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
695 memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
696 break;
697 case 1:
698 pr_info("converting mcount calls to 66 66 66 66 90\n");
699 memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
700 break;
701 case 2:
702 pr_info("converting mcount calls to jmp . + 5\n");
703 memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
704 break;
705 }
706
707} 668}
708#endif 669#endif
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 0929191d83cf..7cc0a721f628 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3109,7 +3109,7 @@ void destroy_irq(unsigned int irq)
3109 3109
3110 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3110 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3111 3111
3112 if (intr_remapping_enabled) 3112 if (irq_remapped(cfg))
3113 free_irte(irq); 3113 free_irte(irq);
3114 raw_spin_lock_irqsave(&vector_lock, flags); 3114 raw_spin_lock_irqsave(&vector_lock, flags);
3115 __clear_irq_vector(irq, cfg); 3115 __clear_irq_vector(irq, cfg);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index b7e755f4178a..a3984f4ef192 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -190,7 +190,7 @@ void sync_stop(void)
190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
191 task_handoff_unregister(&task_free_nb); 191 task_handoff_unregister(&task_free_nb);
192 mutex_unlock(&buffer_mutex); 192 mutex_unlock(&buffer_mutex);
193 flush_scheduled_work(); 193 flush_cpu_work();
194 194
195 /* make sure we don't leak task structs */ 195 /* make sure we don't leak task structs */
196 process_task_mortuary(); 196 process_task_mortuary();
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index f179ac2ea801..59f55441e075 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -111,14 +111,18 @@ void start_cpu_work(void)
111 111
112void end_cpu_work(void) 112void end_cpu_work(void)
113{ 113{
114 int i;
115
116 work_enabled = 0; 114 work_enabled = 0;
115}
116
117void flush_cpu_work(void)
118{
119 int i;
117 120
118 for_each_online_cpu(i) { 121 for_each_online_cpu(i) {
119 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); 122 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
120 123
121 cancel_delayed_work(&b->work); 124 /* these works are per-cpu, no need for flush_sync */
125 flush_delayed_work(&b->work);
122 } 126 }
123} 127}
124 128
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 68ea16ab645f..e1d097e250ae 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -25,6 +25,7 @@ void free_cpu_buffers(void);
25 25
26void start_cpu_work(void); 26void start_cpu_work(void);
27void end_cpu_work(void); 27void end_cpu_work(void);
28void flush_cpu_work(void);
28 29
29/* CPU buffer is composed of such entries (which are 30/* CPU buffer is composed of such entries (which are
30 * also used for context switch notes) 31 * also used for context switch notes)
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index dc0ae4d14dff..010725117dbb 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -21,6 +21,7 @@
21#include "oprof.h" 21#include "oprof.h"
22 22
23static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer); 23static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
24static int ctr_running;
24 25
25static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer) 26static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
26{ 27{
@@ -33,6 +34,9 @@ static void __oprofile_hrtimer_start(void *unused)
33{ 34{
34 struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer); 35 struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
35 36
37 if (!ctr_running)
38 return;
39
36 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 40 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
37 hrtimer->function = oprofile_hrtimer_notify; 41 hrtimer->function = oprofile_hrtimer_notify;
38 42
@@ -42,7 +46,10 @@ static void __oprofile_hrtimer_start(void *unused)
42 46
43static int oprofile_hrtimer_start(void) 47static int oprofile_hrtimer_start(void)
44{ 48{
49 get_online_cpus();
50 ctr_running = 1;
45 on_each_cpu(__oprofile_hrtimer_start, NULL, 1); 51 on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
52 put_online_cpus();
46 return 0; 53 return 0;
47} 54}
48 55
@@ -50,6 +57,9 @@ static void __oprofile_hrtimer_stop(int cpu)
50{ 57{
51 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); 58 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
52 59
60 if (!ctr_running)
61 return;
62
53 hrtimer_cancel(hrtimer); 63 hrtimer_cancel(hrtimer);
54} 64}
55 65
@@ -57,8 +67,11 @@ static void oprofile_hrtimer_stop(void)
57{ 67{
58 int cpu; 68 int cpu;
59 69
70 get_online_cpus();
60 for_each_online_cpu(cpu) 71 for_each_online_cpu(cpu)
61 __oprofile_hrtimer_stop(cpu); 72 __oprofile_hrtimer_stop(cpu);
73 ctr_running = 0;
74 put_online_cpus();
62} 75}
63 76
64static int __cpuinit oprofile_cpu_notify(struct notifier_block *self, 77static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b67cb180e6e9..7880f18e4b86 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -1,7 +1,7 @@
1#ifndef _LINUX_JUMP_LABEL_H 1#ifndef _LINUX_JUMP_LABEL_H
2#define _LINUX_JUMP_LABEL_H 2#define _LINUX_JUMP_LABEL_H
3 3
4#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL) 4#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
5# include <asm/jump_label.h> 5# include <asm/jump_label.h>
6# define HAVE_JUMP_LABEL 6# define HAVE_JUMP_LABEL
7#endif 7#endif
@@ -18,6 +18,8 @@ struct module;
18extern struct jump_entry __start___jump_table[]; 18extern struct jump_entry __start___jump_table[];
19extern struct jump_entry __stop___jump_table[]; 19extern struct jump_entry __stop___jump_table[];
20 20
21extern void jump_label_lock(void);
22extern void jump_label_unlock(void);
21extern void arch_jump_label_transform(struct jump_entry *entry, 23extern void arch_jump_label_transform(struct jump_entry *entry,
22 enum jump_label_type type); 24 enum jump_label_type type);
23extern void arch_jump_label_text_poke_early(jump_label_t addr); 25extern void arch_jump_label_text_poke_early(jump_label_t addr);
@@ -59,6 +61,9 @@ static inline int jump_label_text_reserved(void *start, void *end)
59 return 0; 61 return 0;
60} 62}
61 63
64static inline void jump_label_lock(void) {}
65static inline void jump_label_unlock(void) {}
66
62#endif 67#endif
63 68
64#define COND_STMT(key, stmt) \ 69#define COND_STMT(key, stmt) \
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 7be868bf25c6..3b79bd938330 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -39,6 +39,16 @@ struct jump_label_module_entry {
39 struct module *mod; 39 struct module *mod;
40}; 40};
41 41
42void jump_label_lock(void)
43{
44 mutex_lock(&jump_label_mutex);
45}
46
47void jump_label_unlock(void)
48{
49 mutex_unlock(&jump_label_mutex);
50}
51
42static int jump_label_cmp(const void *a, const void *b) 52static int jump_label_cmp(const void *a, const void *b)
43{ 53{
44 const struct jump_entry *jea = a; 54 const struct jump_entry *jea = a;
@@ -152,7 +162,7 @@ void jump_label_update(unsigned long key, enum jump_label_type type)
152 struct jump_label_module_entry *e_module; 162 struct jump_label_module_entry *e_module;
153 int count; 163 int count;
154 164
155 mutex_lock(&jump_label_mutex); 165 jump_label_lock();
156 entry = get_jump_label_entry((jump_label_t)key); 166 entry = get_jump_label_entry((jump_label_t)key);
157 if (entry) { 167 if (entry) {
158 count = entry->nr_entries; 168 count = entry->nr_entries;
@@ -168,13 +178,14 @@ void jump_label_update(unsigned long key, enum jump_label_type type)
168 count = e_module->nr_entries; 178 count = e_module->nr_entries;
169 iter = e_module->table; 179 iter = e_module->table;
170 while (count--) { 180 while (count--) {
171 if (kernel_text_address(iter->code)) 181 if (iter->key &&
182 kernel_text_address(iter->code))
172 arch_jump_label_transform(iter, type); 183 arch_jump_label_transform(iter, type);
173 iter++; 184 iter++;
174 } 185 }
175 } 186 }
176 } 187 }
177 mutex_unlock(&jump_label_mutex); 188 jump_label_unlock();
178} 189}
179 190
180static int addr_conflict(struct jump_entry *entry, void *start, void *end) 191static int addr_conflict(struct jump_entry *entry, void *start, void *end)
@@ -231,6 +242,7 @@ out:
231 * overlaps with any of the jump label patch addresses. Code 242 * overlaps with any of the jump label patch addresses. Code
232 * that wants to modify kernel text should first verify that 243 * that wants to modify kernel text should first verify that
233 * it does not overlap with any of the jump label addresses. 244 * it does not overlap with any of the jump label addresses.
245 * Caller must hold jump_label_mutex.
234 * 246 *
235 * returns 1 if there is an overlap, 0 otherwise 247 * returns 1 if there is an overlap, 0 otherwise
236 */ 248 */
@@ -241,7 +253,6 @@ int jump_label_text_reserved(void *start, void *end)
241 struct jump_entry *iter_stop = __start___jump_table; 253 struct jump_entry *iter_stop = __start___jump_table;
242 int conflict = 0; 254 int conflict = 0;
243 255
244 mutex_lock(&jump_label_mutex);
245 iter = iter_start; 256 iter = iter_start;
246 while (iter < iter_stop) { 257 while (iter < iter_stop) {
247 if (addr_conflict(iter, start, end)) { 258 if (addr_conflict(iter, start, end)) {
@@ -256,10 +267,16 @@ int jump_label_text_reserved(void *start, void *end)
256 conflict = module_conflict(start, end); 267 conflict = module_conflict(start, end);
257#endif 268#endif
258out: 269out:
259 mutex_unlock(&jump_label_mutex);
260 return conflict; 270 return conflict;
261} 271}
262 272
273/*
274 * Not all archs need this.
275 */
276void __weak arch_jump_label_text_poke_early(jump_label_t addr)
277{
278}
279
263static __init int init_jump_label(void) 280static __init int init_jump_label(void)
264{ 281{
265 int ret; 282 int ret;
@@ -267,7 +284,7 @@ static __init int init_jump_label(void)
267 struct jump_entry *iter_stop = __stop___jump_table; 284 struct jump_entry *iter_stop = __stop___jump_table;
268 struct jump_entry *iter; 285 struct jump_entry *iter;
269 286
270 mutex_lock(&jump_label_mutex); 287 jump_label_lock();
271 ret = build_jump_label_hashtable(__start___jump_table, 288 ret = build_jump_label_hashtable(__start___jump_table,
272 __stop___jump_table); 289 __stop___jump_table);
273 iter = iter_start; 290 iter = iter_start;
@@ -275,7 +292,7 @@ static __init int init_jump_label(void)
275 arch_jump_label_text_poke_early(iter->code); 292 arch_jump_label_text_poke_early(iter->code);
276 iter++; 293 iter++;
277 } 294 }
278 mutex_unlock(&jump_label_mutex); 295 jump_label_unlock();
279 return ret; 296 return ret;
280} 297}
281early_initcall(init_jump_label); 298early_initcall(init_jump_label);
@@ -366,6 +383,39 @@ static void remove_jump_label_module(struct module *mod)
366 } 383 }
367} 384}
368 385
386static void remove_jump_label_module_init(struct module *mod)
387{
388 struct hlist_head *head;
389 struct hlist_node *node, *node_next, *module_node, *module_node_next;
390 struct jump_label_entry *e;
391 struct jump_label_module_entry *e_module;
392 struct jump_entry *iter;
393 int i, count;
394
395 /* if the module doesn't have jump label entries, just return */
396 if (!mod->num_jump_entries)
397 return;
398
399 for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
400 head = &jump_label_table[i];
401 hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
402 hlist_for_each_entry_safe(e_module, module_node,
403 module_node_next,
404 &(e->modules), hlist) {
405 if (e_module->mod != mod)
406 continue;
407 count = e_module->nr_entries;
408 iter = e_module->table;
409 while (count--) {
410 if (within_module_init(iter->code, mod))
411 iter->key = 0;
412 iter++;
413 }
414 }
415 }
416 }
417}
418
369static int 419static int
370jump_label_module_notify(struct notifier_block *self, unsigned long val, 420jump_label_module_notify(struct notifier_block *self, unsigned long val,
371 void *data) 421 void *data)
@@ -375,16 +425,21 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
375 425
376 switch (val) { 426 switch (val) {
377 case MODULE_STATE_COMING: 427 case MODULE_STATE_COMING:
378 mutex_lock(&jump_label_mutex); 428 jump_label_lock();
379 ret = add_jump_label_module(mod); 429 ret = add_jump_label_module(mod);
380 if (ret) 430 if (ret)
381 remove_jump_label_module(mod); 431 remove_jump_label_module(mod);
382 mutex_unlock(&jump_label_mutex); 432 jump_label_unlock();
383 break; 433 break;
384 case MODULE_STATE_GOING: 434 case MODULE_STATE_GOING:
385 mutex_lock(&jump_label_mutex); 435 jump_label_lock();
386 remove_jump_label_module(mod); 436 remove_jump_label_module(mod);
387 mutex_unlock(&jump_label_mutex); 437 jump_label_unlock();
438 break;
439 case MODULE_STATE_LIVE:
440 jump_label_lock();
441 remove_jump_label_module_init(mod);
442 jump_label_unlock();
388 break; 443 break;
389 } 444 }
390 return ret; 445 return ret;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 99865c33a60d..9737a76e106f 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1145,14 +1145,13 @@ int __kprobes register_kprobe(struct kprobe *p)
1145 if (ret) 1145 if (ret)
1146 return ret; 1146 return ret;
1147 1147
1148 jump_label_lock();
1148 preempt_disable(); 1149 preempt_disable();
1149 if (!kernel_text_address((unsigned long) p->addr) || 1150 if (!kernel_text_address((unsigned long) p->addr) ||
1150 in_kprobes_functions((unsigned long) p->addr) || 1151 in_kprobes_functions((unsigned long) p->addr) ||
1151 ftrace_text_reserved(p->addr, p->addr) || 1152 ftrace_text_reserved(p->addr, p->addr) ||
1152 jump_label_text_reserved(p->addr, p->addr)) { 1153 jump_label_text_reserved(p->addr, p->addr))
1153 preempt_enable(); 1154 goto fail_with_jump_label;
1154 return -EINVAL;
1155 }
1156 1155
1157 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1156 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1158 p->flags &= KPROBE_FLAG_DISABLED; 1157 p->flags &= KPROBE_FLAG_DISABLED;
@@ -1166,10 +1165,9 @@ int __kprobes register_kprobe(struct kprobe *p)
1166 * We must hold a refcount of the probed module while updating 1165 * We must hold a refcount of the probed module while updating
1167 * its code to prohibit unexpected unloading. 1166 * its code to prohibit unexpected unloading.
1168 */ 1167 */
1169 if (unlikely(!try_module_get(probed_mod))) { 1168 if (unlikely(!try_module_get(probed_mod)))
1170 preempt_enable(); 1169 goto fail_with_jump_label;
1171 return -EINVAL; 1170
1172 }
1173 /* 1171 /*
1174 * If the module freed .init.text, we couldn't insert 1172 * If the module freed .init.text, we couldn't insert
1175 * kprobes in there. 1173 * kprobes in there.
@@ -1177,16 +1175,18 @@ int __kprobes register_kprobe(struct kprobe *p)
1177 if (within_module_init((unsigned long)p->addr, probed_mod) && 1175 if (within_module_init((unsigned long)p->addr, probed_mod) &&
1178 probed_mod->state != MODULE_STATE_COMING) { 1176 probed_mod->state != MODULE_STATE_COMING) {
1179 module_put(probed_mod); 1177 module_put(probed_mod);
1180 preempt_enable(); 1178 goto fail_with_jump_label;
1181 return -EINVAL;
1182 } 1179 }
1183 } 1180 }
1184 preempt_enable(); 1181 preempt_enable();
1182 jump_label_unlock();
1185 1183
1186 p->nmissed = 0; 1184 p->nmissed = 0;
1187 INIT_LIST_HEAD(&p->list); 1185 INIT_LIST_HEAD(&p->list);
1188 mutex_lock(&kprobe_mutex); 1186 mutex_lock(&kprobe_mutex);
1189 1187
1188 jump_label_lock(); /* needed to call jump_label_text_reserved() */
1189
1190 get_online_cpus(); /* For avoiding text_mutex deadlock. */ 1190 get_online_cpus(); /* For avoiding text_mutex deadlock. */
1191 mutex_lock(&text_mutex); 1191 mutex_lock(&text_mutex);
1192 1192
@@ -1214,12 +1214,18 @@ int __kprobes register_kprobe(struct kprobe *p)
1214out: 1214out:
1215 mutex_unlock(&text_mutex); 1215 mutex_unlock(&text_mutex);
1216 put_online_cpus(); 1216 put_online_cpus();
1217 jump_label_unlock();
1217 mutex_unlock(&kprobe_mutex); 1218 mutex_unlock(&kprobe_mutex);
1218 1219
1219 if (probed_mod) 1220 if (probed_mod)
1220 module_put(probed_mod); 1221 module_put(probed_mod);
1221 1222
1222 return ret; 1223 return ret;
1224
1225fail_with_jump_label:
1226 preempt_enable();
1227 jump_label_unlock();
1228 return -EINVAL;
1223} 1229}
1224EXPORT_SYMBOL_GPL(register_kprobe); 1230EXPORT_SYMBOL_GPL(register_kprobe);
1225 1231