aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /kernel/kprobes.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c754
1 files changed, 614 insertions, 140 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5240d75f4c60..0ed46f3e51e9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -42,8 +42,11 @@
42#include <linux/freezer.h> 42#include <linux/freezer.h>
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/debugfs.h> 44#include <linux/debugfs.h>
45#include <linux/sysctl.h>
45#include <linux/kdebug.h> 46#include <linux/kdebug.h>
46#include <linux/memory.h> 47#include <linux/memory.h>
48#include <linux/ftrace.h>
49#include <linux/cpu.h>
47 50
48#include <asm-generic/sections.h> 51#include <asm-generic/sections.h>
49#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
@@ -90,6 +93,10 @@ static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
90 */ 93 */
91static struct kprobe_blackpoint kprobe_blacklist[] = { 94static struct kprobe_blackpoint kprobe_blacklist[] = {
92 {"preempt_schedule",}, 95 {"preempt_schedule",},
96 {"native_get_debugreg",},
97 {"irq_entries_start",},
98 {"common_interrupt",},
99 {"mcount",}, /* mcount can be called from everywhere */
93 {NULL} /* Terminator */ 100 {NULL} /* Terminator */
94}; 101};
95 102
@@ -100,81 +107,74 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
100 * stepping on the instruction on a vmalloced/kmalloced/data page 107 * stepping on the instruction on a vmalloced/kmalloced/data page
101 * is a recipe for disaster 108 * is a recipe for disaster
102 */ 109 */
103#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104
105struct kprobe_insn_page { 110struct kprobe_insn_page {
106 struct list_head list; 111 struct list_head list;
107 kprobe_opcode_t *insns; /* Page of instruction slots */ 112 kprobe_opcode_t *insns; /* Page of instruction slots */
108 char slot_used[INSNS_PER_PAGE];
109 int nused; 113 int nused;
110 int ngarbage; 114 int ngarbage;
115 char slot_used[];
116};
117
118#define KPROBE_INSN_PAGE_SIZE(slots) \
119 (offsetof(struct kprobe_insn_page, slot_used) + \
120 (sizeof(char) * (slots)))
121
122struct kprobe_insn_cache {
123 struct list_head pages; /* list of kprobe_insn_page */
124 size_t insn_size; /* size of instruction slot */
125 int nr_garbage;
111}; 126};
112 127
128static int slots_per_page(struct kprobe_insn_cache *c)
129{
130 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
131}
132
113enum kprobe_slot_state { 133enum kprobe_slot_state {
114 SLOT_CLEAN = 0, 134 SLOT_CLEAN = 0,
115 SLOT_DIRTY = 1, 135 SLOT_DIRTY = 1,
116 SLOT_USED = 2, 136 SLOT_USED = 2,
117}; 137};
118 138
119static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ 139static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
120static LIST_HEAD(kprobe_insn_pages); 140static struct kprobe_insn_cache kprobe_insn_slots = {
121static int kprobe_garbage_slots; 141 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
122static int collect_garbage_slots(void); 142 .insn_size = MAX_INSN_SIZE,
123 143 .nr_garbage = 0,
124static int __kprobes check_safety(void) 144};
125{ 145static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
126 int ret = 0;
127#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
128 ret = freeze_processes();
129 if (ret == 0) {
130 struct task_struct *p, *q;
131 do_each_thread(p, q) {
132 if (p != current && p->state == TASK_RUNNING &&
133 p->pid != 0) {
134 printk("Check failed: %s is running\n",p->comm);
135 ret = -1;
136 goto loop_end;
137 }
138 } while_each_thread(p, q);
139 }
140loop_end:
141 thaw_processes();
142#else
143 synchronize_sched();
144#endif
145 return ret;
146}
147 146
148/** 147/**
149 * __get_insn_slot() - Find a slot on an executable page for an instruction. 148 * __get_insn_slot() - Find a slot on an executable page for an instruction.
150 * We allocate an executable page if there's no room on existing ones. 149 * We allocate an executable page if there's no room on existing ones.
151 */ 150 */
152static kprobe_opcode_t __kprobes *__get_insn_slot(void) 151static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
153{ 152{
154 struct kprobe_insn_page *kip; 153 struct kprobe_insn_page *kip;
155 154
156 retry: 155 retry:
157 list_for_each_entry(kip, &kprobe_insn_pages, list) { 156 list_for_each_entry(kip, &c->pages, list) {
158 if (kip->nused < INSNS_PER_PAGE) { 157 if (kip->nused < slots_per_page(c)) {
159 int i; 158 int i;
160 for (i = 0; i < INSNS_PER_PAGE; i++) { 159 for (i = 0; i < slots_per_page(c); i++) {
161 if (kip->slot_used[i] == SLOT_CLEAN) { 160 if (kip->slot_used[i] == SLOT_CLEAN) {
162 kip->slot_used[i] = SLOT_USED; 161 kip->slot_used[i] = SLOT_USED;
163 kip->nused++; 162 kip->nused++;
164 return kip->insns + (i * MAX_INSN_SIZE); 163 return kip->insns + (i * c->insn_size);
165 } 164 }
166 } 165 }
167 /* Surprise! No unused slots. Fix kip->nused. */ 166 /* kip->nused is broken. Fix it. */
168 kip->nused = INSNS_PER_PAGE; 167 kip->nused = slots_per_page(c);
168 WARN_ON(1);
169 } 169 }
170 } 170 }
171 171
172 /* If there are any garbage slots, collect it and try again. */ 172 /* If there are any garbage slots, collect it and try again. */
173 if (kprobe_garbage_slots && collect_garbage_slots() == 0) { 173 if (c->nr_garbage && collect_garbage_slots(c) == 0)
174 goto retry; 174 goto retry;
175 } 175
176 /* All out of space. Need to allocate a new page. Use slot 0. */ 176 /* All out of space. Need to allocate a new page. */
177 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); 177 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
178 if (!kip) 178 if (!kip)
179 return NULL; 179 return NULL;
180 180
@@ -189,20 +189,23 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
189 return NULL; 189 return NULL;
190 } 190 }
191 INIT_LIST_HEAD(&kip->list); 191 INIT_LIST_HEAD(&kip->list);
192 list_add(&kip->list, &kprobe_insn_pages); 192 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
193 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
194 kip->slot_used[0] = SLOT_USED; 193 kip->slot_used[0] = SLOT_USED;
195 kip->nused = 1; 194 kip->nused = 1;
196 kip->ngarbage = 0; 195 kip->ngarbage = 0;
196 list_add(&kip->list, &c->pages);
197 return kip->insns; 197 return kip->insns;
198} 198}
199 199
200
200kprobe_opcode_t __kprobes *get_insn_slot(void) 201kprobe_opcode_t __kprobes *get_insn_slot(void)
201{ 202{
202 kprobe_opcode_t *ret; 203 kprobe_opcode_t *ret = NULL;
204
203 mutex_lock(&kprobe_insn_mutex); 205 mutex_lock(&kprobe_insn_mutex);
204 ret = __get_insn_slot(); 206 ret = __get_insn_slot(&kprobe_insn_slots);
205 mutex_unlock(&kprobe_insn_mutex); 207 mutex_unlock(&kprobe_insn_mutex);
208
206 return ret; 209 return ret;
207} 210}
208 211
@@ -218,7 +221,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
218 * so as not to have to set it up again the 221 * so as not to have to set it up again the
219 * next time somebody inserts a probe. 222 * next time somebody inserts a probe.
220 */ 223 */
221 if (!list_is_singular(&kprobe_insn_pages)) { 224 if (!list_is_singular(&kip->list)) {
222 list_del(&kip->list); 225 list_del(&kip->list);
223 module_free(NULL, kip->insns); 226 module_free(NULL, kip->insns);
224 kfree(kip); 227 kfree(kip);
@@ -228,52 +231,85 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
228 return 0; 231 return 0;
229} 232}
230 233
231static int __kprobes collect_garbage_slots(void) 234static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
232{ 235{
233 struct kprobe_insn_page *kip, *next; 236 struct kprobe_insn_page *kip, *next;
234 237
235 /* Ensure no-one is preepmted on the garbages */ 238 /* Ensure no-one is interrupted on the garbages */
236 if (check_safety()) 239 synchronize_sched();
237 return -EAGAIN;
238 240
239 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { 241 list_for_each_entry_safe(kip, next, &c->pages, list) {
240 int i; 242 int i;
241 if (kip->ngarbage == 0) 243 if (kip->ngarbage == 0)
242 continue; 244 continue;
243 kip->ngarbage = 0; /* we will collect all garbages */ 245 kip->ngarbage = 0; /* we will collect all garbages */
244 for (i = 0; i < INSNS_PER_PAGE; i++) { 246 for (i = 0; i < slots_per_page(c); i++) {
245 if (kip->slot_used[i] == SLOT_DIRTY && 247 if (kip->slot_used[i] == SLOT_DIRTY &&
246 collect_one_slot(kip, i)) 248 collect_one_slot(kip, i))
247 break; 249 break;
248 } 250 }
249 } 251 }
250 kprobe_garbage_slots = 0; 252 c->nr_garbage = 0;
251 return 0; 253 return 0;
252} 254}
253 255
254void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 256static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
257 kprobe_opcode_t *slot, int dirty)
255{ 258{
256 struct kprobe_insn_page *kip; 259 struct kprobe_insn_page *kip;
257 260
258 mutex_lock(&kprobe_insn_mutex); 261 list_for_each_entry(kip, &c->pages, list) {
259 list_for_each_entry(kip, &kprobe_insn_pages, list) { 262 long idx = ((long)slot - (long)kip->insns) /
260 if (kip->insns <= slot && 263 (c->insn_size * sizeof(kprobe_opcode_t));
261 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 264 if (idx >= 0 && idx < slots_per_page(c)) {
262 int i = (slot - kip->insns) / MAX_INSN_SIZE; 265 WARN_ON(kip->slot_used[idx] != SLOT_USED);
263 if (dirty) { 266 if (dirty) {
264 kip->slot_used[i] = SLOT_DIRTY; 267 kip->slot_used[idx] = SLOT_DIRTY;
265 kip->ngarbage++; 268 kip->ngarbage++;
269 if (++c->nr_garbage > slots_per_page(c))
270 collect_garbage_slots(c);
266 } else 271 } else
267 collect_one_slot(kip, i); 272 collect_one_slot(kip, idx);
268 break; 273 return;
269 } 274 }
270 } 275 }
276 /* Could not free this slot. */
277 WARN_ON(1);
278}
271 279
272 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) 280void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
273 collect_garbage_slots(); 281{
274 282 mutex_lock(&kprobe_insn_mutex);
283 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
275 mutex_unlock(&kprobe_insn_mutex); 284 mutex_unlock(&kprobe_insn_mutex);
276} 285}
286#ifdef CONFIG_OPTPROBES
287/* For optimized_kprobe buffer */
288static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
289static struct kprobe_insn_cache kprobe_optinsn_slots = {
290 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
291 /* .insn_size is initialized later */
292 .nr_garbage = 0,
293};
294/* Get a slot for optimized_kprobe buffer */
295kprobe_opcode_t __kprobes *get_optinsn_slot(void)
296{
297 kprobe_opcode_t *ret = NULL;
298
299 mutex_lock(&kprobe_optinsn_mutex);
300 ret = __get_insn_slot(&kprobe_optinsn_slots);
301 mutex_unlock(&kprobe_optinsn_mutex);
302
303 return ret;
304}
305
306void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
307{
308 mutex_lock(&kprobe_optinsn_mutex);
309 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
310 mutex_unlock(&kprobe_optinsn_mutex);
311}
312#endif
277#endif 313#endif
278 314
279/* We have preemption disabled.. so it is safe to use __ versions */ 315/* We have preemption disabled.. so it is safe to use __ versions */
@@ -304,23 +340,401 @@ struct kprobe __kprobes *get_kprobe(void *addr)
304 if (p->addr == addr) 340 if (p->addr == addr)
305 return p; 341 return p;
306 } 342 }
343
307 return NULL; 344 return NULL;
308} 345}
309 346
347static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
348
349/* Return true if the kprobe is an aggregator */
350static inline int kprobe_aggrprobe(struct kprobe *p)
351{
352 return p->pre_handler == aggr_pre_handler;
353}
354
355/*
356 * Keep all fields in the kprobe consistent
357 */
358static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
359{
360 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
361 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
362}
363
364#ifdef CONFIG_OPTPROBES
365/* NOTE: change this value only with kprobe_mutex held */
366static bool kprobes_allow_optimization;
367
368/*
369 * Call all pre_handler on the list, but ignores its return value.
370 * This must be called from arch-dep optimized caller.
371 */
372void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
373{
374 struct kprobe *kp;
375
376 list_for_each_entry_rcu(kp, &p->list, list) {
377 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
378 set_kprobe_instance(kp);
379 kp->pre_handler(kp, regs);
380 }
381 reset_kprobe_instance();
382 }
383}
384
385/* Return true(!0) if the kprobe is ready for optimization. */
386static inline int kprobe_optready(struct kprobe *p)
387{
388 struct optimized_kprobe *op;
389
390 if (kprobe_aggrprobe(p)) {
391 op = container_of(p, struct optimized_kprobe, kp);
392 return arch_prepared_optinsn(&op->optinsn);
393 }
394
395 return 0;
396}
397
398/*
399 * Return an optimized kprobe whose optimizing code replaces
400 * instructions including addr (exclude breakpoint).
401 */
402struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
403{
404 int i;
405 struct kprobe *p = NULL;
406 struct optimized_kprobe *op;
407
408 /* Don't check i == 0, since that is a breakpoint case. */
409 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
410 p = get_kprobe((void *)(addr - i));
411
412 if (p && kprobe_optready(p)) {
413 op = container_of(p, struct optimized_kprobe, kp);
414 if (arch_within_optimized_kprobe(op, addr))
415 return p;
416 }
417
418 return NULL;
419}
420
421/* Optimization staging list, protected by kprobe_mutex */
422static LIST_HEAD(optimizing_list);
423
424static void kprobe_optimizer(struct work_struct *work);
425static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
426#define OPTIMIZE_DELAY 5
427
428/* Kprobe jump optimizer */
429static __kprobes void kprobe_optimizer(struct work_struct *work)
430{
431 struct optimized_kprobe *op, *tmp;
432
433 /* Lock modules while optimizing kprobes */
434 mutex_lock(&module_mutex);
435 mutex_lock(&kprobe_mutex);
436 if (kprobes_all_disarmed || !kprobes_allow_optimization)
437 goto end;
438
439 /*
440 * Wait for quiesence period to ensure all running interrupts
441 * are done. Because optprobe may modify multiple instructions
442 * there is a chance that Nth instruction is interrupted. In that
443 * case, running interrupt can return to 2nd-Nth byte of jump
444 * instruction. This wait is for avoiding it.
445 */
446 synchronize_sched();
447
448 /*
449 * The optimization/unoptimization refers online_cpus via
450 * stop_machine() and cpu-hotplug modifies online_cpus.
451 * And same time, text_mutex will be held in cpu-hotplug and here.
452 * This combination can cause a deadlock (cpu-hotplug try to lock
453 * text_mutex but stop_machine can not be done because online_cpus
454 * has been changed)
455 * To avoid this deadlock, we need to call get_online_cpus()
456 * for preventing cpu-hotplug outside of text_mutex locking.
457 */
458 get_online_cpus();
459 mutex_lock(&text_mutex);
460 list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
461 WARN_ON(kprobe_disabled(&op->kp));
462 if (arch_optimize_kprobe(op) < 0)
463 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
464 list_del_init(&op->list);
465 }
466 mutex_unlock(&text_mutex);
467 put_online_cpus();
468end:
469 mutex_unlock(&kprobe_mutex);
470 mutex_unlock(&module_mutex);
471}
472
473/* Optimize kprobe if p is ready to be optimized */
474static __kprobes void optimize_kprobe(struct kprobe *p)
475{
476 struct optimized_kprobe *op;
477
478 /* Check if the kprobe is disabled or not ready for optimization. */
479 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
480 (kprobe_disabled(p) || kprobes_all_disarmed))
481 return;
482
483 /* Both of break_handler and post_handler are not supported. */
484 if (p->break_handler || p->post_handler)
485 return;
486
487 op = container_of(p, struct optimized_kprobe, kp);
488
489 /* Check there is no other kprobes at the optimized instructions */
490 if (arch_check_optimized_kprobe(op) < 0)
491 return;
492
493 /* Check if it is already optimized. */
494 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
495 return;
496
497 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
498 list_add(&op->list, &optimizing_list);
499 if (!delayed_work_pending(&optimizing_work))
500 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
501}
502
503/* Unoptimize a kprobe if p is optimized */
504static __kprobes void unoptimize_kprobe(struct kprobe *p)
505{
506 struct optimized_kprobe *op;
507
508 if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
509 op = container_of(p, struct optimized_kprobe, kp);
510 if (!list_empty(&op->list))
511 /* Dequeue from the optimization queue */
512 list_del_init(&op->list);
513 else
514 /* Replace jump with break */
515 arch_unoptimize_kprobe(op);
516 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
517 }
518}
519
520/* Remove optimized instructions */
521static void __kprobes kill_optimized_kprobe(struct kprobe *p)
522{
523 struct optimized_kprobe *op;
524
525 op = container_of(p, struct optimized_kprobe, kp);
526 if (!list_empty(&op->list)) {
527 /* Dequeue from the optimization queue */
528 list_del_init(&op->list);
529 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
530 }
531 /* Don't unoptimize, because the target code will be freed. */
532 arch_remove_optimized_kprobe(op);
533}
534
535/* Try to prepare optimized instructions */
536static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
537{
538 struct optimized_kprobe *op;
539
540 op = container_of(p, struct optimized_kprobe, kp);
541 arch_prepare_optimized_kprobe(op);
542}
543
544/* Free optimized instructions and optimized_kprobe */
545static __kprobes void free_aggr_kprobe(struct kprobe *p)
546{
547 struct optimized_kprobe *op;
548
549 op = container_of(p, struct optimized_kprobe, kp);
550 arch_remove_optimized_kprobe(op);
551 kfree(op);
552}
553
554/* Allocate new optimized_kprobe and try to prepare optimized instructions */
555static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
556{
557 struct optimized_kprobe *op;
558
559 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
560 if (!op)
561 return NULL;
562
563 INIT_LIST_HEAD(&op->list);
564 op->kp.addr = p->addr;
565 arch_prepare_optimized_kprobe(op);
566
567 return &op->kp;
568}
569
570static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
571
572/*
573 * Prepare an optimized_kprobe and optimize it
574 * NOTE: p must be a normal registered kprobe
575 */
576static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
577{
578 struct kprobe *ap;
579 struct optimized_kprobe *op;
580
581 ap = alloc_aggr_kprobe(p);
582 if (!ap)
583 return;
584
585 op = container_of(ap, struct optimized_kprobe, kp);
586 if (!arch_prepared_optinsn(&op->optinsn)) {
587 /* If failed to setup optimizing, fallback to kprobe */
588 free_aggr_kprobe(ap);
589 return;
590 }
591
592 init_aggr_kprobe(ap, p);
593 optimize_kprobe(ap);
594}
595
596#ifdef CONFIG_SYSCTL
597static void __kprobes optimize_all_kprobes(void)
598{
599 struct hlist_head *head;
600 struct hlist_node *node;
601 struct kprobe *p;
602 unsigned int i;
603
604 /* If optimization is already allowed, just return */
605 if (kprobes_allow_optimization)
606 return;
607
608 kprobes_allow_optimization = true;
609 mutex_lock(&text_mutex);
610 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
611 head = &kprobe_table[i];
612 hlist_for_each_entry_rcu(p, node, head, hlist)
613 if (!kprobe_disabled(p))
614 optimize_kprobe(p);
615 }
616 mutex_unlock(&text_mutex);
617 printk(KERN_INFO "Kprobes globally optimized\n");
618}
619
620static void __kprobes unoptimize_all_kprobes(void)
621{
622 struct hlist_head *head;
623 struct hlist_node *node;
624 struct kprobe *p;
625 unsigned int i;
626
627 /* If optimization is already prohibited, just return */
628 if (!kprobes_allow_optimization)
629 return;
630
631 kprobes_allow_optimization = false;
632 printk(KERN_INFO "Kprobes globally unoptimized\n");
633 get_online_cpus(); /* For avoiding text_mutex deadlock */
634 mutex_lock(&text_mutex);
635 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
636 head = &kprobe_table[i];
637 hlist_for_each_entry_rcu(p, node, head, hlist) {
638 if (!kprobe_disabled(p))
639 unoptimize_kprobe(p);
640 }
641 }
642
643 mutex_unlock(&text_mutex);
644 put_online_cpus();
645 /* Allow all currently running kprobes to complete */
646 synchronize_sched();
647}
648
649int sysctl_kprobes_optimization;
650int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
651 void __user *buffer, size_t *length,
652 loff_t *ppos)
653{
654 int ret;
655
656 mutex_lock(&kprobe_mutex);
657 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
658 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
659
660 if (sysctl_kprobes_optimization)
661 optimize_all_kprobes();
662 else
663 unoptimize_all_kprobes();
664 mutex_unlock(&kprobe_mutex);
665
666 return ret;
667}
668#endif /* CONFIG_SYSCTL */
669
670static void __kprobes __arm_kprobe(struct kprobe *p)
671{
672 struct kprobe *old_p;
673
674 /* Check collision with other optimized kprobes */
675 old_p = get_optimized_kprobe((unsigned long)p->addr);
676 if (unlikely(old_p))
677 unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */
678
679 arch_arm_kprobe(p);
680 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
681}
682
683static void __kprobes __disarm_kprobe(struct kprobe *p)
684{
685 struct kprobe *old_p;
686
687 unoptimize_kprobe(p); /* Try to unoptimize */
688 arch_disarm_kprobe(p);
689
690 /* If another kprobe was blocked, optimize it. */
691 old_p = get_optimized_kprobe((unsigned long)p->addr);
692 if (unlikely(old_p))
693 optimize_kprobe(old_p);
694}
695
696#else /* !CONFIG_OPTPROBES */
697
698#define optimize_kprobe(p) do {} while (0)
699#define unoptimize_kprobe(p) do {} while (0)
700#define kill_optimized_kprobe(p) do {} while (0)
701#define prepare_optimized_kprobe(p) do {} while (0)
702#define try_to_optimize_kprobe(p) do {} while (0)
703#define __arm_kprobe(p) arch_arm_kprobe(p)
704#define __disarm_kprobe(p) arch_disarm_kprobe(p)
705
706static __kprobes void free_aggr_kprobe(struct kprobe *p)
707{
708 kfree(p);
709}
710
711static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
712{
713 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
714}
715#endif /* CONFIG_OPTPROBES */
716
310/* Arm a kprobe with text_mutex */ 717/* Arm a kprobe with text_mutex */
311static void __kprobes arm_kprobe(struct kprobe *kp) 718static void __kprobes arm_kprobe(struct kprobe *kp)
312{ 719{
720 /*
721 * Here, since __arm_kprobe() doesn't use stop_machine(),
722 * this doesn't cause deadlock on text_mutex. So, we don't
723 * need get_online_cpus().
724 */
313 mutex_lock(&text_mutex); 725 mutex_lock(&text_mutex);
314 arch_arm_kprobe(kp); 726 __arm_kprobe(kp);
315 mutex_unlock(&text_mutex); 727 mutex_unlock(&text_mutex);
316} 728}
317 729
318/* Disarm a kprobe with text_mutex */ 730/* Disarm a kprobe with text_mutex */
319static void __kprobes disarm_kprobe(struct kprobe *kp) 731static void __kprobes disarm_kprobe(struct kprobe *kp)
320{ 732{
733 get_online_cpus(); /* For avoiding text_mutex deadlock */
321 mutex_lock(&text_mutex); 734 mutex_lock(&text_mutex);
322 arch_disarm_kprobe(kp); 735 __disarm_kprobe(kp);
323 mutex_unlock(&text_mutex); 736 mutex_unlock(&text_mutex);
737 put_online_cpus();
324} 738}
325 739
326/* 740/*
@@ -389,7 +803,7 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
389void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 803void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
390{ 804{
391 struct kprobe *kp; 805 struct kprobe *kp;
392 if (p->pre_handler != aggr_pre_handler) { 806 if (!kprobe_aggrprobe(p)) {
393 p->nmissed++; 807 p->nmissed++;
394 } else { 808 } else {
395 list_for_each_entry_rcu(kp, &p->list, list) 809 list_for_each_entry_rcu(kp, &p->list, list)
@@ -513,21 +927,16 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
513} 927}
514 928
515/* 929/*
516 * Keep all fields in the kprobe consistent
517 */
518static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
519{
520 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
521 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
522}
523
524/*
525* Add the new probe to ap->list. Fail if this is the 930* Add the new probe to ap->list. Fail if this is the
526* second jprobe at the address - two jprobes can't coexist 931* second jprobe at the address - two jprobes can't coexist
527*/ 932*/
528static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 933static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
529{ 934{
530 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 935 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
936
937 if (p->break_handler || p->post_handler)
938 unoptimize_kprobe(ap); /* Fall back to normal kprobe */
939
531 if (p->break_handler) { 940 if (p->break_handler) {
532 if (ap->break_handler) 941 if (ap->break_handler)
533 return -EEXIST; 942 return -EEXIST;
@@ -542,7 +951,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
542 ap->flags &= ~KPROBE_FLAG_DISABLED; 951 ap->flags &= ~KPROBE_FLAG_DISABLED;
543 if (!kprobes_all_disarmed) 952 if (!kprobes_all_disarmed)
544 /* Arm the breakpoint again. */ 953 /* Arm the breakpoint again. */
545 arm_kprobe(ap); 954 __arm_kprobe(ap);
546 } 955 }
547 return 0; 956 return 0;
548} 957}
@@ -551,12 +960,13 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
551 * Fill in the required fields of the "manager kprobe". Replace the 960 * Fill in the required fields of the "manager kprobe". Replace the
552 * earlier kprobe in the hlist with the manager kprobe 961 * earlier kprobe in the hlist with the manager kprobe
553 */ 962 */
554static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 963static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
555{ 964{
965 /* Copy p's insn slot to ap */
556 copy_kprobe(p, ap); 966 copy_kprobe(p, ap);
557 flush_insn_slot(ap); 967 flush_insn_slot(ap);
558 ap->addr = p->addr; 968 ap->addr = p->addr;
559 ap->flags = p->flags; 969 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
560 ap->pre_handler = aggr_pre_handler; 970 ap->pre_handler = aggr_pre_handler;
561 ap->fault_handler = aggr_fault_handler; 971 ap->fault_handler = aggr_fault_handler;
562 /* We don't care the kprobe which has gone. */ 972 /* We don't care the kprobe which has gone. */
@@ -566,8 +976,9 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
566 ap->break_handler = aggr_break_handler; 976 ap->break_handler = aggr_break_handler;
567 977
568 INIT_LIST_HEAD(&ap->list); 978 INIT_LIST_HEAD(&ap->list);
569 list_add_rcu(&p->list, &ap->list); 979 INIT_HLIST_NODE(&ap->hlist);
570 980
981 list_add_rcu(&p->list, &ap->list);
571 hlist_replace_rcu(&p->hlist, &ap->hlist); 982 hlist_replace_rcu(&p->hlist, &ap->hlist);
572} 983}
573 984
@@ -581,12 +992,12 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
581 int ret = 0; 992 int ret = 0;
582 struct kprobe *ap = old_p; 993 struct kprobe *ap = old_p;
583 994
584 if (old_p->pre_handler != aggr_pre_handler) { 995 if (!kprobe_aggrprobe(old_p)) {
585 /* If old_p is not an aggr_probe, create new aggr_kprobe. */ 996 /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
586 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); 997 ap = alloc_aggr_kprobe(old_p);
587 if (!ap) 998 if (!ap)
588 return -ENOMEM; 999 return -ENOMEM;
589 add_aggr_kprobe(ap, old_p); 1000 init_aggr_kprobe(ap, old_p);
590 } 1001 }
591 1002
592 if (kprobe_gone(ap)) { 1003 if (kprobe_gone(ap)) {
@@ -605,6 +1016,9 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
605 */ 1016 */
606 return ret; 1017 return ret;
607 1018
1019 /* Prepare optimized instructions if possible. */
1020 prepare_optimized_kprobe(ap);
1021
608 /* 1022 /*
609 * Clear gone flag to prevent allocating new slot again, and 1023 * Clear gone flag to prevent allocating new slot again, and
610 * set disabled flag because it is not armed yet. 1024 * set disabled flag because it is not armed yet.
@@ -613,6 +1027,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
613 | KPROBE_FLAG_DISABLED; 1027 | KPROBE_FLAG_DISABLED;
614 } 1028 }
615 1029
1030 /* Copy ap's insn slot to p */
616 copy_kprobe(ap, p); 1031 copy_kprobe(ap, p);
617 return add_new_kprobe(ap, p); 1032 return add_new_kprobe(ap, p);
618} 1033}
@@ -673,6 +1088,40 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
673 return (kprobe_opcode_t *)(((char *)addr) + p->offset); 1088 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
674} 1089}
675 1090
1091/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1092static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1093{
1094 struct kprobe *old_p, *list_p;
1095
1096 old_p = get_kprobe(p->addr);
1097 if (unlikely(!old_p))
1098 return NULL;
1099
1100 if (p != old_p) {
1101 list_for_each_entry_rcu(list_p, &old_p->list, list)
1102 if (list_p == p)
1103 /* kprobe p is a valid probe */
1104 goto valid;
1105 return NULL;
1106 }
1107valid:
1108 return old_p;
1109}
1110
1111/* Return error if the kprobe is being re-registered */
1112static inline int check_kprobe_rereg(struct kprobe *p)
1113{
1114 int ret = 0;
1115 struct kprobe *old_p;
1116
1117 mutex_lock(&kprobe_mutex);
1118 old_p = __get_valid_kprobe(p);
1119 if (old_p)
1120 ret = -EINVAL;
1121 mutex_unlock(&kprobe_mutex);
1122 return ret;
1123}
1124
676int __kprobes register_kprobe(struct kprobe *p) 1125int __kprobes register_kprobe(struct kprobe *p)
677{ 1126{
678 int ret = 0; 1127 int ret = 0;
@@ -685,9 +1134,14 @@ int __kprobes register_kprobe(struct kprobe *p)
685 return -EINVAL; 1134 return -EINVAL;
686 p->addr = addr; 1135 p->addr = addr;
687 1136
1137 ret = check_kprobe_rereg(p);
1138 if (ret)
1139 return ret;
1140
688 preempt_disable(); 1141 preempt_disable();
689 if (!kernel_text_address((unsigned long) p->addr) || 1142 if (!kernel_text_address((unsigned long) p->addr) ||
690 in_kprobes_functions((unsigned long) p->addr)) { 1143 in_kprobes_functions((unsigned long) p->addr) ||
1144 ftrace_text_reserved(p->addr, p->addr)) {
691 preempt_enable(); 1145 preempt_enable();
692 return -EINVAL; 1146 return -EINVAL;
693 } 1147 }
@@ -724,27 +1178,34 @@ int __kprobes register_kprobe(struct kprobe *p)
724 p->nmissed = 0; 1178 p->nmissed = 0;
725 INIT_LIST_HEAD(&p->list); 1179 INIT_LIST_HEAD(&p->list);
726 mutex_lock(&kprobe_mutex); 1180 mutex_lock(&kprobe_mutex);
1181
1182 get_online_cpus(); /* For avoiding text_mutex deadlock. */
1183 mutex_lock(&text_mutex);
1184
727 old_p = get_kprobe(p->addr); 1185 old_p = get_kprobe(p->addr);
728 if (old_p) { 1186 if (old_p) {
1187 /* Since this may unoptimize old_p, locking text_mutex. */
729 ret = register_aggr_kprobe(old_p, p); 1188 ret = register_aggr_kprobe(old_p, p);
730 goto out; 1189 goto out;
731 } 1190 }
732 1191
733 mutex_lock(&text_mutex);
734 ret = arch_prepare_kprobe(p); 1192 ret = arch_prepare_kprobe(p);
735 if (ret) 1193 if (ret)
736 goto out_unlock_text; 1194 goto out;
737 1195
738 INIT_HLIST_NODE(&p->hlist); 1196 INIT_HLIST_NODE(&p->hlist);
739 hlist_add_head_rcu(&p->hlist, 1197 hlist_add_head_rcu(&p->hlist,
740 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1198 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
741 1199
742 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1200 if (!kprobes_all_disarmed && !kprobe_disabled(p))
743 arch_arm_kprobe(p); 1201 __arm_kprobe(p);
1202
1203 /* Try to optimize kprobe */
1204 try_to_optimize_kprobe(p);
744 1205
745out_unlock_text:
746 mutex_unlock(&text_mutex);
747out: 1206out:
1207 mutex_unlock(&text_mutex);
1208 put_online_cpus();
748 mutex_unlock(&kprobe_mutex); 1209 mutex_unlock(&kprobe_mutex);
749 1210
750 if (probed_mod) 1211 if (probed_mod)
@@ -754,26 +1215,6 @@ out:
754} 1215}
755EXPORT_SYMBOL_GPL(register_kprobe); 1216EXPORT_SYMBOL_GPL(register_kprobe);
756 1217
757/* Check passed kprobe is valid and return kprobe in kprobe_table. */
758static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
759{
760 struct kprobe *old_p, *list_p;
761
762 old_p = get_kprobe(p->addr);
763 if (unlikely(!old_p))
764 return NULL;
765
766 if (p != old_p) {
767 list_for_each_entry_rcu(list_p, &old_p->list, list)
768 if (list_p == p)
769 /* kprobe p is a valid probe */
770 goto valid;
771 return NULL;
772 }
773valid:
774 return old_p;
775}
776
777/* 1218/*
778 * Unregister a kprobe without a scheduler synchronization. 1219 * Unregister a kprobe without a scheduler synchronization.
779 */ 1220 */
@@ -786,7 +1227,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
786 return -EINVAL; 1227 return -EINVAL;
787 1228
788 if (old_p == p || 1229 if (old_p == p ||
789 (old_p->pre_handler == aggr_pre_handler && 1230 (kprobe_aggrprobe(old_p) &&
790 list_is_singular(&old_p->list))) { 1231 list_is_singular(&old_p->list))) {
791 /* 1232 /*
792 * Only probe on the hash list. Disarm only if kprobes are 1233 * Only probe on the hash list. Disarm only if kprobes are
@@ -794,7 +1235,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
794 * already have been removed. We save on flushing icache. 1235 * already have been removed. We save on flushing icache.
795 */ 1236 */
796 if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) 1237 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
797 disarm_kprobe(p); 1238 disarm_kprobe(old_p);
798 hlist_del_rcu(&old_p->hlist); 1239 hlist_del_rcu(&old_p->hlist);
799 } else { 1240 } else {
800 if (p->break_handler && !kprobe_gone(p)) 1241 if (p->break_handler && !kprobe_gone(p))
@@ -810,8 +1251,13 @@ noclean:
810 list_del_rcu(&p->list); 1251 list_del_rcu(&p->list);
811 if (!kprobe_disabled(old_p)) { 1252 if (!kprobe_disabled(old_p)) {
812 try_to_disable_aggr_kprobe(old_p); 1253 try_to_disable_aggr_kprobe(old_p);
813 if (!kprobes_all_disarmed && kprobe_disabled(old_p)) 1254 if (!kprobes_all_disarmed) {
814 disarm_kprobe(old_p); 1255 if (kprobe_disabled(old_p))
1256 disarm_kprobe(old_p);
1257 else
1258 /* Try to optimize this probe again */
1259 optimize_kprobe(old_p);
1260 }
815 } 1261 }
816 } 1262 }
817 return 0; 1263 return 0;
@@ -828,7 +1274,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
828 old_p = list_entry(p->list.next, struct kprobe, list); 1274 old_p = list_entry(p->list.next, struct kprobe, list);
829 list_del(&p->list); 1275 list_del(&p->list);
830 arch_remove_kprobe(old_p); 1276 arch_remove_kprobe(old_p);
831 kfree(old_p); 1277 free_aggr_kprobe(old_p);
832 } 1278 }
833} 1279}
834 1280
@@ -1014,9 +1460,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
1014 /* Pre-allocate memory for max kretprobe instances */ 1460 /* Pre-allocate memory for max kretprobe instances */
1015 if (rp->maxactive <= 0) { 1461 if (rp->maxactive <= 0) {
1016#ifdef CONFIG_PREEMPT 1462#ifdef CONFIG_PREEMPT
1017 rp->maxactive = max(10, 2 * NR_CPUS); 1463 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1018#else 1464#else
1019 rp->maxactive = NR_CPUS; 1465 rp->maxactive = num_possible_cpus();
1020#endif 1466#endif
1021 } 1467 }
1022 spin_lock_init(&rp->lock); 1468 spin_lock_init(&rp->lock);
@@ -1124,7 +1570,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1124 struct kprobe *kp; 1570 struct kprobe *kp;
1125 1571
1126 p->flags |= KPROBE_FLAG_GONE; 1572 p->flags |= KPROBE_FLAG_GONE;
1127 if (p->pre_handler == aggr_pre_handler) { 1573 if (kprobe_aggrprobe(p)) {
1128 /* 1574 /*
1129 * If this is an aggr_kprobe, we have to list all the 1575 * If this is an aggr_kprobe, we have to list all the
1130 * chained probes and mark them GONE. 1576 * chained probes and mark them GONE.
@@ -1133,6 +1579,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1133 kp->flags |= KPROBE_FLAG_GONE; 1579 kp->flags |= KPROBE_FLAG_GONE;
1134 p->post_handler = NULL; 1580 p->post_handler = NULL;
1135 p->break_handler = NULL; 1581 p->break_handler = NULL;
1582 kill_optimized_kprobe(p);
1136 } 1583 }
1137 /* 1584 /*
1138 * Here, we can remove insn_slot safely, because no thread calls 1585 * Here, we can remove insn_slot safely, because no thread calls
@@ -1141,6 +1588,13 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1141 arch_remove_kprobe(p); 1588 arch_remove_kprobe(p);
1142} 1589}
1143 1590
1591void __kprobes dump_kprobe(struct kprobe *kp)
1592{
1593 printk(KERN_WARNING "Dumping kprobe:\n");
1594 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1595 kp->symbol_name, kp->addr, kp->offset);
1596}
1597
1144/* Module notifier call back, checking kprobes on the module */ 1598/* Module notifier call back, checking kprobes on the module */
1145static int __kprobes kprobes_module_callback(struct notifier_block *nb, 1599static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1146 unsigned long val, void *data) 1600 unsigned long val, void *data)
@@ -1235,6 +1689,15 @@ static int __init init_kprobes(void)
1235 } 1689 }
1236 } 1690 }
1237 1691
1692#if defined(CONFIG_OPTPROBES)
1693#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
1694 /* Init kprobe_optinsn_slots */
1695 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
1696#endif
1697 /* By default, kprobes can be optimized */
1698 kprobes_allow_optimization = true;
1699#endif
1700
1238 /* By default, kprobes are armed */ 1701 /* By default, kprobes are armed */
1239 kprobes_all_disarmed = false; 1702 kprobes_all_disarmed = false;
1240 1703
@@ -1253,7 +1716,7 @@ static int __init init_kprobes(void)
1253 1716
1254#ifdef CONFIG_DEBUG_FS 1717#ifdef CONFIG_DEBUG_FS
1255static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 1718static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1256 const char *sym, int offset,char *modname) 1719 const char *sym, int offset, char *modname, struct kprobe *pp)
1257{ 1720{
1258 char *kprobe_type; 1721 char *kprobe_type;
1259 1722
@@ -1263,19 +1726,21 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1263 kprobe_type = "j"; 1726 kprobe_type = "j";
1264 else 1727 else
1265 kprobe_type = "k"; 1728 kprobe_type = "k";
1729
1266 if (sym) 1730 if (sym)
1267 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", 1731 seq_printf(pi, "%p %s %s+0x%x %s ",
1268 p->addr, kprobe_type, sym, offset, 1732 p->addr, kprobe_type, sym, offset,
1269 (modname ? modname : " "), 1733 (modname ? modname : " "));
1270 (kprobe_gone(p) ? "[GONE]" : ""),
1271 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1272 "[DISABLED]" : ""));
1273 else 1734 else
1274 seq_printf(pi, "%p %s %p %s%s\n", 1735 seq_printf(pi, "%p %s %p ",
1275 p->addr, kprobe_type, p->addr, 1736 p->addr, kprobe_type, p->addr);
1276 (kprobe_gone(p) ? "[GONE]" : ""), 1737
1277 ((kprobe_disabled(p) && !kprobe_gone(p)) ? 1738 if (!pp)
1278 "[DISABLED]" : "")); 1739 pp = p;
1740 seq_printf(pi, "%s%s%s\n",
1741 (kprobe_gone(p) ? "[GONE]" : ""),
1742 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
1743 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
1279} 1744}
1280 1745
1281static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 1746static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1311,11 +1776,11 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1311 hlist_for_each_entry_rcu(p, node, head, hlist) { 1776 hlist_for_each_entry_rcu(p, node, head, hlist) {
1312 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 1777 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1313 &offset, &modname, namebuf); 1778 &offset, &modname, namebuf);
1314 if (p->pre_handler == aggr_pre_handler) { 1779 if (kprobe_aggrprobe(p)) {
1315 list_for_each_entry_rcu(kp, &p->list, list) 1780 list_for_each_entry_rcu(kp, &p->list, list)
1316 report_probe(pi, kp, sym, offset, modname); 1781 report_probe(pi, kp, sym, offset, modname, p);
1317 } else 1782 } else
1318 report_probe(pi, p, sym, offset, modname); 1783 report_probe(pi, p, sym, offset, modname, NULL);
1319 } 1784 }
1320 preempt_enable(); 1785 preempt_enable();
1321 return 0; 1786 return 0;
@@ -1393,12 +1858,13 @@ int __kprobes enable_kprobe(struct kprobe *kp)
1393 goto out; 1858 goto out;
1394 } 1859 }
1395 1860
1396 if (!kprobes_all_disarmed && kprobe_disabled(p))
1397 arm_kprobe(p);
1398
1399 p->flags &= ~KPROBE_FLAG_DISABLED;
1400 if (p != kp) 1861 if (p != kp)
1401 kp->flags &= ~KPROBE_FLAG_DISABLED; 1862 kp->flags &= ~KPROBE_FLAG_DISABLED;
1863
1864 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
1865 p->flags &= ~KPROBE_FLAG_DISABLED;
1866 arm_kprobe(p);
1867 }
1402out: 1868out:
1403 mutex_unlock(&kprobe_mutex); 1869 mutex_unlock(&kprobe_mutex);
1404 return ret; 1870 return ret;
@@ -1418,12 +1884,13 @@ static void __kprobes arm_all_kprobes(void)
1418 if (!kprobes_all_disarmed) 1884 if (!kprobes_all_disarmed)
1419 goto already_enabled; 1885 goto already_enabled;
1420 1886
1887 /* Arming kprobes doesn't optimize kprobe itself */
1421 mutex_lock(&text_mutex); 1888 mutex_lock(&text_mutex);
1422 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1889 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1423 head = &kprobe_table[i]; 1890 head = &kprobe_table[i];
1424 hlist_for_each_entry_rcu(p, node, head, hlist) 1891 hlist_for_each_entry_rcu(p, node, head, hlist)
1425 if (!kprobe_disabled(p)) 1892 if (!kprobe_disabled(p))
1426 arch_arm_kprobe(p); 1893 __arm_kprobe(p);
1427 } 1894 }
1428 mutex_unlock(&text_mutex); 1895 mutex_unlock(&text_mutex);
1429 1896
@@ -1450,16 +1917,23 @@ static void __kprobes disarm_all_kprobes(void)
1450 1917
1451 kprobes_all_disarmed = true; 1918 kprobes_all_disarmed = true;
1452 printk(KERN_INFO "Kprobes globally disabled\n"); 1919 printk(KERN_INFO "Kprobes globally disabled\n");
1920
1921 /*
1922 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
1923 * because disarming may also unoptimize kprobes.
1924 */
1925 get_online_cpus();
1453 mutex_lock(&text_mutex); 1926 mutex_lock(&text_mutex);
1454 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1927 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1455 head = &kprobe_table[i]; 1928 head = &kprobe_table[i];
1456 hlist_for_each_entry_rcu(p, node, head, hlist) { 1929 hlist_for_each_entry_rcu(p, node, head, hlist) {
1457 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 1930 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1458 arch_disarm_kprobe(p); 1931 __disarm_kprobe(p);
1459 } 1932 }
1460 } 1933 }
1461 1934
1462 mutex_unlock(&text_mutex); 1935 mutex_unlock(&text_mutex);
1936 put_online_cpus();
1463 mutex_unlock(&kprobe_mutex); 1937 mutex_unlock(&kprobe_mutex);
1464 /* Allow all currently running kprobes to complete */ 1938 /* Allow all currently running kprobes to complete */
1465 synchronize_sched(); 1939 synchronize_sched();