aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-02-17 03:27:41 -0500
committerIngo Molnar <mingo@elte.hu>2012-02-17 04:18:07 -0500
commit7b2d81d48a2d8e37efb6ce7b4d5ef58822b30d89 (patch)
tree23987f194dcd91b0ba6d27f7f6e08c178797488e /kernel
parent2b144498350860b6ee9dc57ff27a93ad488de5dc (diff)
uprobes/core: Clean up, refactor and improve the code
Make the uprobes code readable to me: - improve the Kconfig text so that a mere mortal gets some idea what CONFIG_UPROBES=y is really about - do trivial renames to standardize around the uprobes_*() namespace - clean up and simplify various code flow details - separate basic blocks of functionality - line break artifact and white space related removal - use standard local varible definition blocks - use vertical spacing to make things more readable - remove unnecessary volatile - restructure comment blocks to make them more uniform and more readable in general Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Jim Keniston <jkenisto@us.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Arnaldo Carvalho de Melo <acme@infradead.org> Cc: Anton Arapov <anton@redhat.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Link: http://lkml.kernel.org/n/tip-ewbwhb8o6navvllsauu7k07p@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/uprobes.c219
1 files changed, 127 insertions, 92 deletions
diff --git a/kernel/uprobes.c b/kernel/uprobes.c
index 72e8bb3b52cd..884817f1b0d3 100644
--- a/kernel/uprobes.c
+++ b/kernel/uprobes.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Userspace Probes (UProbes) 2 * User-space Probes (UProbes)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -29,24 +29,26 @@
29#include <linux/rmap.h> /* anon_vma_prepare */ 29#include <linux/rmap.h> /* anon_vma_prepare */
30#include <linux/mmu_notifier.h> /* set_pte_at_notify */ 30#include <linux/mmu_notifier.h> /* set_pte_at_notify */
31#include <linux/swap.h> /* try_to_free_swap */ 31#include <linux/swap.h> /* try_to_free_swap */
32
32#include <linux/uprobes.h> 33#include <linux/uprobes.h>
33 34
34static struct rb_root uprobes_tree = RB_ROOT; 35static struct rb_root uprobes_tree = RB_ROOT;
36
35static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ 37static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
36 38
37#define UPROBES_HASH_SZ 13 39#define UPROBES_HASH_SZ 13
40
38/* serialize (un)register */ 41/* serialize (un)register */
39static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; 42static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
40#define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) %\ 43
41 UPROBES_HASH_SZ]) 44#define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
42 45
43/* serialize uprobe->pending_list */ 46/* serialize uprobe->pending_list */
44static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 47static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
45#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) %\ 48#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
46 UPROBES_HASH_SZ])
47 49
48/* 50/*
49 * uprobe_events allows us to skip the mmap_uprobe if there are no uprobe 51 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
50 * events active at this time. Probably a fine grained per inode count is 52 * events active at this time. Probably a fine grained per inode count is
51 * better? 53 * better?
52 */ 54 */
@@ -58,9 +60,9 @@ static atomic_t uprobe_events = ATOMIC_INIT(0);
58 * vm_area_struct wasnt recommended. 60 * vm_area_struct wasnt recommended.
59 */ 61 */
60struct vma_info { 62struct vma_info {
61 struct list_head probe_list; 63 struct list_head probe_list;
62 struct mm_struct *mm; 64 struct mm_struct *mm;
63 loff_t vaddr; 65 loff_t vaddr;
64}; 66};
65 67
66/* 68/*
@@ -79,8 +81,7 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register)
79 if (!is_register) 81 if (!is_register)
80 return true; 82 return true;
81 83
82 if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == 84 if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
83 (VM_READ|VM_EXEC))
84 return true; 85 return true;
85 86
86 return false; 87 return false;
@@ -92,6 +93,7 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
92 93
93 vaddr = vma->vm_start + offset; 94 vaddr = vma->vm_start + offset;
94 vaddr -= vma->vm_pgoff << PAGE_SHIFT; 95 vaddr -= vma->vm_pgoff << PAGE_SHIFT;
96
95 return vaddr; 97 return vaddr;
96} 98}
97 99
@@ -105,8 +107,7 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
105 * 107 *
106 * Returns 0 on success, -EFAULT on failure. 108 * Returns 0 on success, -EFAULT on failure.
107 */ 109 */
108static int __replace_page(struct vm_area_struct *vma, struct page *page, 110static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
109 struct page *kpage)
110{ 111{
111 struct mm_struct *mm = vma->vm_mm; 112 struct mm_struct *mm = vma->vm_mm;
112 pgd_t *pgd; 113 pgd_t *pgd;
@@ -163,7 +164,7 @@ out:
163 */ 164 */
164bool __weak is_bkpt_insn(uprobe_opcode_t *insn) 165bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
165{ 166{
166 return (*insn == UPROBES_BKPT_INSN); 167 return *insn == UPROBES_BKPT_INSN;
167} 168}
168 169
169/* 170/*
@@ -203,6 +204,7 @@ static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe,
203 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); 204 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
204 if (ret <= 0) 205 if (ret <= 0)
205 return ret; 206 return ret;
207
206 ret = -EINVAL; 208 ret = -EINVAL;
207 209
208 /* 210 /*
@@ -239,6 +241,7 @@ static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe,
239 vaddr_new = kmap_atomic(new_page); 241 vaddr_new = kmap_atomic(new_page);
240 242
241 memcpy(vaddr_new, vaddr_old, PAGE_SIZE); 243 memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
244
242 /* poke the new insn in, ASSUMES we don't cross page boundary */ 245 /* poke the new insn in, ASSUMES we don't cross page boundary */
243 vaddr &= ~PAGE_MASK; 246 vaddr &= ~PAGE_MASK;
244 BUG_ON(vaddr + uprobe_opcode_sz > PAGE_SIZE); 247 BUG_ON(vaddr + uprobe_opcode_sz > PAGE_SIZE);
@@ -260,7 +263,8 @@ unlock_out:
260 page_cache_release(new_page); 263 page_cache_release(new_page);
261 264
262put_out: 265put_out:
263 put_page(old_page); /* we did a get_page in the beginning */ 266 put_page(old_page);
267
264 return ret; 268 return ret;
265} 269}
266 270
@@ -276,8 +280,7 @@ put_out:
276 * For mm @mm, read the opcode at @vaddr and store it in @opcode. 280 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
277 * Return 0 (success) or a negative errno. 281 * Return 0 (success) or a negative errno.
278 */ 282 */
279static int read_opcode(struct mm_struct *mm, unsigned long vaddr, 283static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
280 uprobe_opcode_t *opcode)
281{ 284{
282 struct page *page; 285 struct page *page;
283 void *vaddr_new; 286 void *vaddr_new;
@@ -293,15 +296,18 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr,
293 memcpy(opcode, vaddr_new + vaddr, uprobe_opcode_sz); 296 memcpy(opcode, vaddr_new + vaddr, uprobe_opcode_sz);
294 kunmap_atomic(vaddr_new); 297 kunmap_atomic(vaddr_new);
295 unlock_page(page); 298 unlock_page(page);
296 put_page(page); /* we did a get_user_pages in the beginning */ 299
300 put_page(page);
301
297 return 0; 302 return 0;
298} 303}
299 304
300static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr) 305static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
301{ 306{
302 uprobe_opcode_t opcode; 307 uprobe_opcode_t opcode;
303 int result = read_opcode(mm, vaddr, &opcode); 308 int result;
304 309
310 result = read_opcode(mm, vaddr, &opcode);
305 if (result) 311 if (result)
306 return result; 312 return result;
307 313
@@ -320,11 +326,11 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
320 * For mm @mm, store the breakpoint instruction at @vaddr. 326 * For mm @mm, store the breakpoint instruction at @vaddr.
321 * Return 0 (success) or a negative errno. 327 * Return 0 (success) or a negative errno.
322 */ 328 */
323int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe, 329int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr)
324 unsigned long vaddr)
325{ 330{
326 int result = is_bkpt_at_addr(mm, vaddr); 331 int result;
327 332
333 result = is_bkpt_at_addr(mm, vaddr);
328 if (result == 1) 334 if (result == 1)
329 return -EEXIST; 335 return -EEXIST;
330 336
@@ -344,35 +350,35 @@ int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe,
344 * For mm @mm, restore the original opcode (opcode) at @vaddr. 350 * For mm @mm, restore the original opcode (opcode) at @vaddr.
345 * Return 0 (success) or a negative errno. 351 * Return 0 (success) or a negative errno.
346 */ 352 */
347int __weak set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe, 353int __weak
348 unsigned long vaddr, bool verify) 354set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr, bool verify)
349{ 355{
350 if (verify) { 356 if (verify) {
351 int result = is_bkpt_at_addr(mm, vaddr); 357 int result;
352 358
359 result = is_bkpt_at_addr(mm, vaddr);
353 if (!result) 360 if (!result)
354 return -EINVAL; 361 return -EINVAL;
355 362
356 if (result != 1) 363 if (result != 1)
357 return result; 364 return result;
358 } 365 }
359 return write_opcode(mm, uprobe, vaddr, 366 return write_opcode(mm, uprobe, vaddr, *(uprobe_opcode_t *)uprobe->insn);
360 *(uprobe_opcode_t *)uprobe->insn);
361} 367}
362 368
363static int match_uprobe(struct uprobe *l, struct uprobe *r) 369static int match_uprobe(struct uprobe *l, struct uprobe *r)
364{ 370{
365 if (l->inode < r->inode) 371 if (l->inode < r->inode)
366 return -1; 372 return -1;
373
367 if (l->inode > r->inode) 374 if (l->inode > r->inode)
368 return 1; 375 return 1;
369 else {
370 if (l->offset < r->offset)
371 return -1;
372 376
373 if (l->offset > r->offset) 377 if (l->offset < r->offset)
374 return 1; 378 return -1;
375 } 379
380 if (l->offset > r->offset)
381 return 1;
376 382
377 return 0; 383 return 0;
378} 384}
@@ -391,6 +397,7 @@ static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
391 atomic_inc(&uprobe->ref); 397 atomic_inc(&uprobe->ref);
392 return uprobe; 398 return uprobe;
393 } 399 }
400
394 if (match < 0) 401 if (match < 0)
395 n = n->rb_left; 402 n = n->rb_left;
396 else 403 else
@@ -411,6 +418,7 @@ static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
411 spin_lock_irqsave(&uprobes_treelock, flags); 418 spin_lock_irqsave(&uprobes_treelock, flags);
412 uprobe = __find_uprobe(inode, offset); 419 uprobe = __find_uprobe(inode, offset);
413 spin_unlock_irqrestore(&uprobes_treelock, flags); 420 spin_unlock_irqrestore(&uprobes_treelock, flags);
421
414 return uprobe; 422 return uprobe;
415} 423}
416 424
@@ -436,16 +444,18 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
436 p = &parent->rb_right; 444 p = &parent->rb_right;
437 445
438 } 446 }
447
439 u = NULL; 448 u = NULL;
440 rb_link_node(&uprobe->rb_node, parent, p); 449 rb_link_node(&uprobe->rb_node, parent, p);
441 rb_insert_color(&uprobe->rb_node, &uprobes_tree); 450 rb_insert_color(&uprobe->rb_node, &uprobes_tree);
442 /* get access + creation ref */ 451 /* get access + creation ref */
443 atomic_set(&uprobe->ref, 2); 452 atomic_set(&uprobe->ref, 2);
453
444 return u; 454 return u;
445} 455}
446 456
447/* 457/*
448 * Acquires uprobes_treelock. 458 * Acquire uprobes_treelock.
449 * Matching uprobe already exists in rbtree; 459 * Matching uprobe already exists in rbtree;
450 * increment (access refcount) and return the matching uprobe. 460 * increment (access refcount) and return the matching uprobe.
451 * 461 *
@@ -460,6 +470,7 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
460 spin_lock_irqsave(&uprobes_treelock, flags); 470 spin_lock_irqsave(&uprobes_treelock, flags);
461 u = __insert_uprobe(uprobe); 471 u = __insert_uprobe(uprobe);
462 spin_unlock_irqrestore(&uprobes_treelock, flags); 472 spin_unlock_irqrestore(&uprobes_treelock, flags);
473
463 return u; 474 return u;
464} 475}
465 476
@@ -490,19 +501,22 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
490 kfree(uprobe); 501 kfree(uprobe);
491 uprobe = cur_uprobe; 502 uprobe = cur_uprobe;
492 iput(inode); 503 iput(inode);
493 } else 504 } else {
494 atomic_inc(&uprobe_events); 505 atomic_inc(&uprobe_events);
506 }
507
495 return uprobe; 508 return uprobe;
496} 509}
497 510
498/* Returns the previous consumer */ 511/* Returns the previous consumer */
499static struct uprobe_consumer *add_consumer(struct uprobe *uprobe, 512static struct uprobe_consumer *
500 struct uprobe_consumer *consumer) 513consumer_add(struct uprobe *uprobe, struct uprobe_consumer *consumer)
501{ 514{
502 down_write(&uprobe->consumer_rwsem); 515 down_write(&uprobe->consumer_rwsem);
503 consumer->next = uprobe->consumers; 516 consumer->next = uprobe->consumers;
504 uprobe->consumers = consumer; 517 uprobe->consumers = consumer;
505 up_write(&uprobe->consumer_rwsem); 518 up_write(&uprobe->consumer_rwsem);
519
506 return consumer->next; 520 return consumer->next;
507} 521}
508 522
@@ -511,8 +525,7 @@ static struct uprobe_consumer *add_consumer(struct uprobe *uprobe,
511 * Return true if the @consumer is deleted successfully 525 * Return true if the @consumer is deleted successfully
512 * or return false. 526 * or return false.
513 */ 527 */
514static bool del_consumer(struct uprobe *uprobe, 528static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *consumer)
515 struct uprobe_consumer *consumer)
516{ 529{
517 struct uprobe_consumer **con; 530 struct uprobe_consumer **con;
518 bool ret = false; 531 bool ret = false;
@@ -526,6 +539,7 @@ static bool del_consumer(struct uprobe *uprobe,
526 } 539 }
527 } 540 }
528 up_write(&uprobe->consumer_rwsem); 541 up_write(&uprobe->consumer_rwsem);
542
529 return ret; 543 return ret;
530} 544}
531 545
@@ -557,15 +571,15 @@ static int __copy_insn(struct address_space *mapping,
557 memcpy(insn, vaddr + off1, nbytes); 571 memcpy(insn, vaddr + off1, nbytes);
558 kunmap_atomic(vaddr); 572 kunmap_atomic(vaddr);
559 page_cache_release(page); 573 page_cache_release(page);
574
560 return 0; 575 return 0;
561} 576}
562 577
563static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, 578static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
564 unsigned long addr)
565{ 579{
566 struct address_space *mapping; 580 struct address_space *mapping;
567 int bytes;
568 unsigned long nbytes; 581 unsigned long nbytes;
582 int bytes;
569 583
570 addr &= ~PAGE_MASK; 584 addr &= ~PAGE_MASK;
571 nbytes = PAGE_SIZE - addr; 585 nbytes = PAGE_SIZE - addr;
@@ -605,6 +619,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
605 return -EEXIST; 619 return -EEXIST;
606 620
607 addr = (unsigned long)vaddr; 621 addr = (unsigned long)vaddr;
622
608 if (!(uprobe->flags & UPROBES_COPY_INSN)) { 623 if (!(uprobe->flags & UPROBES_COPY_INSN)) {
609 ret = copy_insn(uprobe, vma, addr); 624 ret = copy_insn(uprobe, vma, addr);
610 if (ret) 625 if (ret)
@@ -613,7 +628,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
613 if (is_bkpt_insn((uprobe_opcode_t *)uprobe->insn)) 628 if (is_bkpt_insn((uprobe_opcode_t *)uprobe->insn))
614 return -EEXIST; 629 return -EEXIST;
615 630
616 ret = analyze_insn(mm, uprobe); 631 ret = arch_uprobes_analyze_insn(mm, uprobe);
617 if (ret) 632 if (ret)
618 return ret; 633 return ret;
619 634
@@ -624,8 +639,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
624 return ret; 639 return ret;
625} 640}
626 641
627static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, 642static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, loff_t vaddr)
628 loff_t vaddr)
629{ 643{
630 set_orig_insn(mm, uprobe, (unsigned long)vaddr, true); 644 set_orig_insn(mm, uprobe, (unsigned long)vaddr, true);
631} 645}
@@ -649,9 +663,11 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
649 struct prio_tree_iter iter; 663 struct prio_tree_iter iter;
650 struct vm_area_struct *vma; 664 struct vm_area_struct *vma;
651 struct vma_info *tmpvi; 665 struct vma_info *tmpvi;
652 loff_t vaddr; 666 unsigned long pgoff;
653 unsigned long pgoff = offset >> PAGE_SHIFT;
654 int existing_vma; 667 int existing_vma;
668 loff_t vaddr;
669
670 pgoff = offset >> PAGE_SHIFT;
655 671
656 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 672 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
657 if (!valid_vma(vma, is_register)) 673 if (!valid_vma(vma, is_register))
@@ -659,6 +675,7 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
659 675
660 existing_vma = 0; 676 existing_vma = 0;
661 vaddr = vma_address(vma, offset); 677 vaddr = vma_address(vma, offset);
678
662 list_for_each_entry(tmpvi, head, probe_list) { 679 list_for_each_entry(tmpvi, head, probe_list) {
663 if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) { 680 if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
664 existing_vma = 1; 681 existing_vma = 1;
@@ -670,14 +687,15 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
670 * Another vma needs a probe to be installed. However skip 687 * Another vma needs a probe to be installed. However skip
671 * installing the probe if the vma is about to be unlinked. 688 * installing the probe if the vma is about to be unlinked.
672 */ 689 */
673 if (!existing_vma && 690 if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
674 atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
675 vi->mm = vma->vm_mm; 691 vi->mm = vma->vm_mm;
676 vi->vaddr = vaddr; 692 vi->vaddr = vaddr;
677 list_add(&vi->probe_list, head); 693 list_add(&vi->probe_list, head);
694
678 return vi; 695 return vi;
679 } 696 }
680 } 697 }
698
681 return NULL; 699 return NULL;
682} 700}
683 701
@@ -685,11 +703,12 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
685 * Iterate in the rmap prio tree and find a vma where a probe has not 703 * Iterate in the rmap prio tree and find a vma where a probe has not
686 * yet been inserted. 704 * yet been inserted.
687 */ 705 */
688static struct vma_info *find_next_vma_info(struct list_head *head, 706static struct vma_info *
689 loff_t offset, struct address_space *mapping, 707find_next_vma_info(struct list_head *head, loff_t offset, struct address_space *mapping,
690 bool is_register) 708 bool is_register)
691{ 709{
692 struct vma_info *vi, *retvi; 710 struct vma_info *vi, *retvi;
711
693 vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL); 712 vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
694 if (!vi) 713 if (!vi)
695 return ERR_PTR(-ENOMEM); 714 return ERR_PTR(-ENOMEM);
@@ -700,6 +719,7 @@ static struct vma_info *find_next_vma_info(struct list_head *head,
700 719
701 if (!retvi) 720 if (!retvi)
702 kfree(vi); 721 kfree(vi);
722
703 return retvi; 723 return retvi;
704} 724}
705 725
@@ -711,16 +731,23 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
711 struct vma_info *vi, *tmpvi; 731 struct vma_info *vi, *tmpvi;
712 struct mm_struct *mm; 732 struct mm_struct *mm;
713 loff_t vaddr; 733 loff_t vaddr;
714 int ret = 0; 734 int ret;
715 735
716 mapping = uprobe->inode->i_mapping; 736 mapping = uprobe->inode->i_mapping;
717 INIT_LIST_HEAD(&try_list); 737 INIT_LIST_HEAD(&try_list);
718 while ((vi = find_next_vma_info(&try_list, uprobe->offset, 738
719 mapping, is_register)) != NULL) { 739 ret = 0;
740
741 for (;;) {
742 vi = find_next_vma_info(&try_list, uprobe->offset, mapping, is_register);
743 if (!vi)
744 break;
745
720 if (IS_ERR(vi)) { 746 if (IS_ERR(vi)) {
721 ret = PTR_ERR(vi); 747 ret = PTR_ERR(vi);
722 break; 748 break;
723 } 749 }
750
724 mm = vi->mm; 751 mm = vi->mm;
725 down_read(&mm->mmap_sem); 752 down_read(&mm->mmap_sem);
726 vma = find_vma(mm, (unsigned long)vi->vaddr); 753 vma = find_vma(mm, (unsigned long)vi->vaddr);
@@ -755,19 +782,21 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
755 break; 782 break;
756 } 783 }
757 } 784 }
785
758 list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) { 786 list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
759 list_del(&vi->probe_list); 787 list_del(&vi->probe_list);
760 kfree(vi); 788 kfree(vi);
761 } 789 }
790
762 return ret; 791 return ret;
763} 792}
764 793
765static int __register_uprobe(struct uprobe *uprobe) 794static int __uprobe_register(struct uprobe *uprobe)
766{ 795{
767 return register_for_each_vma(uprobe, true); 796 return register_for_each_vma(uprobe, true);
768} 797}
769 798
770static void __unregister_uprobe(struct uprobe *uprobe) 799static void __uprobe_unregister(struct uprobe *uprobe)
771{ 800{
772 if (!register_for_each_vma(uprobe, false)) 801 if (!register_for_each_vma(uprobe, false))
773 delete_uprobe(uprobe); 802 delete_uprobe(uprobe);
@@ -776,15 +805,15 @@ static void __unregister_uprobe(struct uprobe *uprobe)
776} 805}
777 806
778/* 807/*
779 * register_uprobe - register a probe 808 * uprobe_register - register a probe
780 * @inode: the file in which the probe has to be placed. 809 * @inode: the file in which the probe has to be placed.
781 * @offset: offset from the start of the file. 810 * @offset: offset from the start of the file.
782 * @consumer: information on howto handle the probe.. 811 * @consumer: information on howto handle the probe..
783 * 812 *
784 * Apart from the access refcount, register_uprobe() takes a creation 813 * Apart from the access refcount, uprobe_register() takes a creation
785 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting 814 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
786 * inserted into the rbtree (i.e first consumer for a @inode:@offset 815 * inserted into the rbtree (i.e first consumer for a @inode:@offset
787 * tuple). Creation refcount stops unregister_uprobe from freeing the 816 * tuple). Creation refcount stops uprobe_unregister from freeing the
788 * @uprobe even before the register operation is complete. Creation 817 * @uprobe even before the register operation is complete. Creation
789 * refcount is released when the last @consumer for the @uprobe 818 * refcount is released when the last @consumer for the @uprobe
790 * unregisters. 819 * unregisters.
@@ -792,28 +821,29 @@ static void __unregister_uprobe(struct uprobe *uprobe)
792 * Return errno if it cannot successully install probes 821 * Return errno if it cannot successully install probes
793 * else return 0 (success) 822 * else return 0 (success)
794 */ 823 */
795int register_uprobe(struct inode *inode, loff_t offset, 824int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
796 struct uprobe_consumer *consumer)
797{ 825{
798 struct uprobe *uprobe; 826 struct uprobe *uprobe;
799 int ret = -EINVAL; 827 int ret;
800 828
801 if (!inode || !consumer || consumer->next) 829 if (!inode || !consumer || consumer->next)
802 return ret; 830 return -EINVAL;
803 831
804 if (offset > i_size_read(inode)) 832 if (offset > i_size_read(inode))
805 return ret; 833 return -EINVAL;
806 834
807 ret = 0; 835 ret = 0;
808 mutex_lock(uprobes_hash(inode)); 836 mutex_lock(uprobes_hash(inode));
809 uprobe = alloc_uprobe(inode, offset); 837 uprobe = alloc_uprobe(inode, offset);
810 if (uprobe && !add_consumer(uprobe, consumer)) { 838
811 ret = __register_uprobe(uprobe); 839 if (uprobe && !consumer_add(uprobe, consumer)) {
840 ret = __uprobe_register(uprobe);
812 if (ret) { 841 if (ret) {
813 uprobe->consumers = NULL; 842 uprobe->consumers = NULL;
814 __unregister_uprobe(uprobe); 843 __uprobe_unregister(uprobe);
815 } else 844 } else {
816 uprobe->flags |= UPROBES_RUN_HANDLER; 845 uprobe->flags |= UPROBES_RUN_HANDLER;
846 }
817 } 847 }
818 848
819 mutex_unlock(uprobes_hash(inode)); 849 mutex_unlock(uprobes_hash(inode));
@@ -823,15 +853,14 @@ int register_uprobe(struct inode *inode, loff_t offset,
823} 853}
824 854
825/* 855/*
826 * unregister_uprobe - unregister a already registered probe. 856 * uprobe_unregister - unregister a already registered probe.
827 * @inode: the file in which the probe has to be removed. 857 * @inode: the file in which the probe has to be removed.
828 * @offset: offset from the start of the file. 858 * @offset: offset from the start of the file.
829 * @consumer: identify which probe if multiple probes are colocated. 859 * @consumer: identify which probe if multiple probes are colocated.
830 */ 860 */
831void unregister_uprobe(struct inode *inode, loff_t offset, 861void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
832 struct uprobe_consumer *consumer)
833{ 862{
834 struct uprobe *uprobe = NULL; 863 struct uprobe *uprobe;
835 864
836 if (!inode || !consumer) 865 if (!inode || !consumer)
837 return; 866 return;
@@ -841,15 +870,14 @@ void unregister_uprobe(struct inode *inode, loff_t offset,
841 return; 870 return;
842 871
843 mutex_lock(uprobes_hash(inode)); 872 mutex_lock(uprobes_hash(inode));
844 if (!del_consumer(uprobe, consumer))
845 goto unreg_out;
846 873
847 if (!uprobe->consumers) { 874 if (consumer_del(uprobe, consumer)) {
848 __unregister_uprobe(uprobe); 875 if (!uprobe->consumers) {
849 uprobe->flags &= ~UPROBES_RUN_HANDLER; 876 __uprobe_unregister(uprobe);
877 uprobe->flags &= ~UPROBES_RUN_HANDLER;
878 }
850 } 879 }
851 880
852unreg_out:
853 mutex_unlock(uprobes_hash(inode)); 881 mutex_unlock(uprobes_hash(inode));
854 if (uprobe) 882 if (uprobe)
855 put_uprobe(uprobe); 883 put_uprobe(uprobe);
@@ -870,6 +898,7 @@ static struct rb_node *find_least_offset_node(struct inode *inode)
870 while (n) { 898 while (n) {
871 uprobe = rb_entry(n, struct uprobe, rb_node); 899 uprobe = rb_entry(n, struct uprobe, rb_node);
872 match = match_uprobe(&u, uprobe); 900 match = match_uprobe(&u, uprobe);
901
873 if (uprobe->inode == inode) 902 if (uprobe->inode == inode)
874 close_node = n; 903 close_node = n;
875 904
@@ -881,6 +910,7 @@ static struct rb_node *find_least_offset_node(struct inode *inode)
881 else 910 else
882 n = n->rb_right; 911 n = n->rb_right;
883 } 912 }
913
884 return close_node; 914 return close_node;
885} 915}
886 916
@@ -890,11 +920,13 @@ static struct rb_node *find_least_offset_node(struct inode *inode)
890static void build_probe_list(struct inode *inode, struct list_head *head) 920static void build_probe_list(struct inode *inode, struct list_head *head)
891{ 921{
892 struct uprobe *uprobe; 922 struct uprobe *uprobe;
893 struct rb_node *n;
894 unsigned long flags; 923 unsigned long flags;
924 struct rb_node *n;
895 925
896 spin_lock_irqsave(&uprobes_treelock, flags); 926 spin_lock_irqsave(&uprobes_treelock, flags);
927
897 n = find_least_offset_node(inode); 928 n = find_least_offset_node(inode);
929
898 for (; n; n = rb_next(n)) { 930 for (; n; n = rb_next(n)) {
899 uprobe = rb_entry(n, struct uprobe, rb_node); 931 uprobe = rb_entry(n, struct uprobe, rb_node);
900 if (uprobe->inode != inode) 932 if (uprobe->inode != inode)
@@ -903,6 +935,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
903 list_add(&uprobe->pending_list, head); 935 list_add(&uprobe->pending_list, head);
904 atomic_inc(&uprobe->ref); 936 atomic_inc(&uprobe->ref);
905 } 937 }
938
906 spin_unlock_irqrestore(&uprobes_treelock, flags); 939 spin_unlock_irqrestore(&uprobes_treelock, flags);
907} 940}
908 941
@@ -912,42 +945,44 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
912 * 945 *
913 * Return -ve no if we fail to insert probes and we cannot 946 * Return -ve no if we fail to insert probes and we cannot
914 * bail-out. 947 * bail-out.
915 * Return 0 otherwise. i.e : 948 * Return 0 otherwise. i.e:
949 *
916 * - successful insertion of probes 950 * - successful insertion of probes
917 * - (or) no possible probes to be inserted. 951 * - (or) no possible probes to be inserted.
918 * - (or) insertion of probes failed but we can bail-out. 952 * - (or) insertion of probes failed but we can bail-out.
919 */ 953 */
920int mmap_uprobe(struct vm_area_struct *vma) 954int uprobe_mmap(struct vm_area_struct *vma)
921{ 955{
922 struct list_head tmp_list; 956 struct list_head tmp_list;
923 struct uprobe *uprobe, *u; 957 struct uprobe *uprobe, *u;
924 struct inode *inode; 958 struct inode *inode;
925 int ret = 0; 959 int ret;
926 960
927 if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) 961 if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
928 return ret; /* Bail-out */ 962 return 0;
929 963
930 inode = vma->vm_file->f_mapping->host; 964 inode = vma->vm_file->f_mapping->host;
931 if (!inode) 965 if (!inode)
932 return ret; 966 return 0;
933 967
934 INIT_LIST_HEAD(&tmp_list); 968 INIT_LIST_HEAD(&tmp_list);
935 mutex_lock(uprobes_mmap_hash(inode)); 969 mutex_lock(uprobes_mmap_hash(inode));
936 build_probe_list(inode, &tmp_list); 970 build_probe_list(inode, &tmp_list);
971
972 ret = 0;
973
937 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 974 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
938 loff_t vaddr; 975 loff_t vaddr;
939 976
940 list_del(&uprobe->pending_list); 977 list_del(&uprobe->pending_list);
941 if (!ret) { 978 if (!ret) {
942 vaddr = vma_address(vma, uprobe->offset); 979 vaddr = vma_address(vma, uprobe->offset);
943 if (vaddr < vma->vm_start || vaddr >= vma->vm_end) { 980 if (vaddr >= vma->vm_start && vaddr < vma->vm_end) {
944 put_uprobe(uprobe); 981 ret = install_breakpoint(vma->vm_mm, uprobe, vma, vaddr);
945 continue; 982 /* Ignore double add: */
983 if (ret == -EEXIST)
984 ret = 0;
946 } 985 }
947 ret = install_breakpoint(vma->vm_mm, uprobe, vma,
948 vaddr);
949 if (ret == -EEXIST)
950 ret = 0;
951 } 986 }
952 put_uprobe(uprobe); 987 put_uprobe(uprobe);
953 } 988 }