aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/uprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events/uprobes.c')
-rw-r--r--kernel/events/uprobes.c1662
1 files changed, 1662 insertions, 0 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
new file mode 100644
index 000000000000..29e881b0137d
--- /dev/null
+++ b/kernel/events/uprobes.c
@@ -0,0 +1,1662 @@
1/*
2 * User-space Probes (UProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2008-2012
19 * Authors:
20 * Srikar Dronamraju
21 * Jim Keniston
22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23 */
24
25#include <linux/kernel.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h> /* read_mapping_page */
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include <linux/rmap.h> /* anon_vma_prepare */
31#include <linux/mmu_notifier.h> /* set_pte_at_notify */
32#include <linux/swap.h> /* try_to_free_swap */
33#include <linux/ptrace.h> /* user_enable_single_step */
34#include <linux/kdebug.h> /* notifier mechanism */
35
36#include <linux/uprobes.h>
37
38#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
39#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
40
41static struct srcu_struct uprobes_srcu;
42static struct rb_root uprobes_tree = RB_ROOT;
43
44static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
45
46#define UPROBES_HASH_SZ 13
47
48/* serialize (un)register */
49static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
50
51#define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
52
53/* serialize uprobe->pending_list */
54static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
55#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
56
57/*
58 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
59 * events active at this time. Probably a fine grained per inode count is
60 * better?
61 */
62static atomic_t uprobe_events = ATOMIC_INIT(0);
63
64/*
65 * Maintain a temporary per vma info that can be used to search if a vma
66 * has already been handled. This structure is introduced since extending
67 * vm_area_struct wasnt recommended.
68 */
69struct vma_info {
70 struct list_head probe_list;
71 struct mm_struct *mm;
72 loff_t vaddr;
73};
74
75struct uprobe {
76 struct rb_node rb_node; /* node in the rb tree */
77 atomic_t ref;
78 struct rw_semaphore consumer_rwsem;
79 struct list_head pending_list;
80 struct uprobe_consumer *consumers;
81 struct inode *inode; /* Also hold a ref to inode */
82 loff_t offset;
83 int flags;
84 struct arch_uprobe arch;
85};
86
87/*
88 * valid_vma: Verify if the specified vma is an executable vma
89 * Relax restrictions while unregistering: vm_flags might have
90 * changed after breakpoint was inserted.
91 * - is_register: indicates if we are in register context.
92 * - Return 1 if the specified virtual address is in an
93 * executable vma.
94 */
95static bool valid_vma(struct vm_area_struct *vma, bool is_register)
96{
97 if (!vma->vm_file)
98 return false;
99
100 if (!is_register)
101 return true;
102
103 if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
104 return true;
105
106 return false;
107}
108
109static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
110{
111 loff_t vaddr;
112
113 vaddr = vma->vm_start + offset;
114 vaddr -= vma->vm_pgoff << PAGE_SHIFT;
115
116 return vaddr;
117}
118
119/**
120 * __replace_page - replace page in vma by new page.
121 * based on replace_page in mm/ksm.c
122 *
123 * @vma: vma that holds the pte pointing to page
124 * @page: the cowed page we are replacing by kpage
125 * @kpage: the modified page we replace page by
126 *
127 * Returns 0 on success, -EFAULT on failure.
128 */
129static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
130{
131 struct mm_struct *mm = vma->vm_mm;
132 pgd_t *pgd;
133 pud_t *pud;
134 pmd_t *pmd;
135 pte_t *ptep;
136 spinlock_t *ptl;
137 unsigned long addr;
138 int err = -EFAULT;
139
140 addr = page_address_in_vma(page, vma);
141 if (addr == -EFAULT)
142 goto out;
143
144 pgd = pgd_offset(mm, addr);
145 if (!pgd_present(*pgd))
146 goto out;
147
148 pud = pud_offset(pgd, addr);
149 if (!pud_present(*pud))
150 goto out;
151
152 pmd = pmd_offset(pud, addr);
153 if (!pmd_present(*pmd))
154 goto out;
155
156 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
157 if (!ptep)
158 goto out;
159
160 get_page(kpage);
161 page_add_new_anon_rmap(kpage, vma, addr);
162
163 flush_cache_page(vma, addr, pte_pfn(*ptep));
164 ptep_clear_flush(vma, addr, ptep);
165 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
166
167 page_remove_rmap(page);
168 if (!page_mapped(page))
169 try_to_free_swap(page);
170 put_page(page);
171 pte_unmap_unlock(ptep, ptl);
172 err = 0;
173
174out:
175 return err;
176}
177
178/**
179 * is_swbp_insn - check if instruction is breakpoint instruction.
180 * @insn: instruction to be checked.
181 * Default implementation of is_swbp_insn
182 * Returns true if @insn is a breakpoint instruction.
183 */
184bool __weak is_swbp_insn(uprobe_opcode_t *insn)
185{
186 return *insn == UPROBE_SWBP_INSN;
187}
188
189/*
190 * NOTE:
191 * Expect the breakpoint instruction to be the smallest size instruction for
192 * the architecture. If an arch has variable length instruction and the
193 * breakpoint instruction is not of the smallest length instruction
194 * supported by that architecture then we need to modify read_opcode /
195 * write_opcode accordingly. This would never be a problem for archs that
196 * have fixed length instructions.
197 */
198
199/*
200 * write_opcode - write the opcode at a given virtual address.
201 * @auprobe: arch breakpointing information.
202 * @mm: the probed process address space.
203 * @vaddr: the virtual address to store the opcode.
204 * @opcode: opcode to be written at @vaddr.
205 *
206 * Called with mm->mmap_sem held (for read and with a reference to
207 * mm).
208 *
209 * For mm @mm, write the opcode at @vaddr.
210 * Return 0 (success) or a negative errno.
211 */
212static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
213 unsigned long vaddr, uprobe_opcode_t opcode)
214{
215 struct page *old_page, *new_page;
216 struct address_space *mapping;
217 void *vaddr_old, *vaddr_new;
218 struct vm_area_struct *vma;
219 struct uprobe *uprobe;
220 loff_t addr;
221 int ret;
222
223 /* Read the page with vaddr into memory */
224 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
225 if (ret <= 0)
226 return ret;
227
228 ret = -EINVAL;
229
230 /*
231 * We are interested in text pages only. Our pages of interest
232 * should be mapped for read and execute only. We desist from
233 * adding probes in write mapped pages since the breakpoints
234 * might end up in the file copy.
235 */
236 if (!valid_vma(vma, is_swbp_insn(&opcode)))
237 goto put_out;
238
239 uprobe = container_of(auprobe, struct uprobe, arch);
240 mapping = uprobe->inode->i_mapping;
241 if (mapping != vma->vm_file->f_mapping)
242 goto put_out;
243
244 addr = vma_address(vma, uprobe->offset);
245 if (vaddr != (unsigned long)addr)
246 goto put_out;
247
248 ret = -ENOMEM;
249 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
250 if (!new_page)
251 goto put_out;
252
253 __SetPageUptodate(new_page);
254
255 /*
256 * lock page will serialize against do_wp_page()'s
257 * PageAnon() handling
258 */
259 lock_page(old_page);
260 /* copy the page now that we've got it stable */
261 vaddr_old = kmap_atomic(old_page);
262 vaddr_new = kmap_atomic(new_page);
263
264 memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
265
266 /* poke the new insn in, ASSUMES we don't cross page boundary */
267 vaddr &= ~PAGE_MASK;
268 BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
269 memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
270
271 kunmap_atomic(vaddr_new);
272 kunmap_atomic(vaddr_old);
273
274 ret = anon_vma_prepare(vma);
275 if (ret)
276 goto unlock_out;
277
278 lock_page(new_page);
279 ret = __replace_page(vma, old_page, new_page);
280 unlock_page(new_page);
281
282unlock_out:
283 unlock_page(old_page);
284 page_cache_release(new_page);
285
286put_out:
287 put_page(old_page);
288
289 return ret;
290}
291
292/**
293 * read_opcode - read the opcode at a given virtual address.
294 * @mm: the probed process address space.
295 * @vaddr: the virtual address to read the opcode.
296 * @opcode: location to store the read opcode.
297 *
298 * Called with mm->mmap_sem held (for read and with a reference to
299 * mm.
300 *
301 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
302 * Return 0 (success) or a negative errno.
303 */
304static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
305{
306 struct page *page;
307 void *vaddr_new;
308 int ret;
309
310 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
311 if (ret <= 0)
312 return ret;
313
314 lock_page(page);
315 vaddr_new = kmap_atomic(page);
316 vaddr &= ~PAGE_MASK;
317 memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
318 kunmap_atomic(vaddr_new);
319 unlock_page(page);
320
321 put_page(page);
322
323 return 0;
324}
325
326static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
327{
328 uprobe_opcode_t opcode;
329 int result;
330
331 result = read_opcode(mm, vaddr, &opcode);
332 if (result)
333 return result;
334
335 if (is_swbp_insn(&opcode))
336 return 1;
337
338 return 0;
339}
340
341/**
342 * set_swbp - store breakpoint at a given address.
343 * @auprobe: arch specific probepoint information.
344 * @mm: the probed process address space.
345 * @vaddr: the virtual address to insert the opcode.
346 *
347 * For mm @mm, store the breakpoint instruction at @vaddr.
348 * Return 0 (success) or a negative errno.
349 */
350int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
351{
352 int result;
353
354 result = is_swbp_at_addr(mm, vaddr);
355 if (result == 1)
356 return -EEXIST;
357
358 if (result)
359 return result;
360
361 return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
362}
363
364/**
365 * set_orig_insn - Restore the original instruction.
366 * @mm: the probed process address space.
367 * @auprobe: arch specific probepoint information.
368 * @vaddr: the virtual address to insert the opcode.
369 * @verify: if true, verify existance of breakpoint instruction.
370 *
371 * For mm @mm, restore the original opcode (opcode) at @vaddr.
372 * Return 0 (success) or a negative errno.
373 */
374int __weak
375set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify)
376{
377 if (verify) {
378 int result;
379
380 result = is_swbp_at_addr(mm, vaddr);
381 if (!result)
382 return -EINVAL;
383
384 if (result != 1)
385 return result;
386 }
387 return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
388}
389
390static int match_uprobe(struct uprobe *l, struct uprobe *r)
391{
392 if (l->inode < r->inode)
393 return -1;
394
395 if (l->inode > r->inode)
396 return 1;
397
398 if (l->offset < r->offset)
399 return -1;
400
401 if (l->offset > r->offset)
402 return 1;
403
404 return 0;
405}
406
407static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
408{
409 struct uprobe u = { .inode = inode, .offset = offset };
410 struct rb_node *n = uprobes_tree.rb_node;
411 struct uprobe *uprobe;
412 int match;
413
414 while (n) {
415 uprobe = rb_entry(n, struct uprobe, rb_node);
416 match = match_uprobe(&u, uprobe);
417 if (!match) {
418 atomic_inc(&uprobe->ref);
419 return uprobe;
420 }
421
422 if (match < 0)
423 n = n->rb_left;
424 else
425 n = n->rb_right;
426 }
427 return NULL;
428}
429
430/*
431 * Find a uprobe corresponding to a given inode:offset
432 * Acquires uprobes_treelock
433 */
434static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
435{
436 struct uprobe *uprobe;
437 unsigned long flags;
438
439 spin_lock_irqsave(&uprobes_treelock, flags);
440 uprobe = __find_uprobe(inode, offset);
441 spin_unlock_irqrestore(&uprobes_treelock, flags);
442
443 return uprobe;
444}
445
446static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
447{
448 struct rb_node **p = &uprobes_tree.rb_node;
449 struct rb_node *parent = NULL;
450 struct uprobe *u;
451 int match;
452
453 while (*p) {
454 parent = *p;
455 u = rb_entry(parent, struct uprobe, rb_node);
456 match = match_uprobe(uprobe, u);
457 if (!match) {
458 atomic_inc(&u->ref);
459 return u;
460 }
461
462 if (match < 0)
463 p = &parent->rb_left;
464 else
465 p = &parent->rb_right;
466
467 }
468
469 u = NULL;
470 rb_link_node(&uprobe->rb_node, parent, p);
471 rb_insert_color(&uprobe->rb_node, &uprobes_tree);
472 /* get access + creation ref */
473 atomic_set(&uprobe->ref, 2);
474
475 return u;
476}
477
478/*
479 * Acquire uprobes_treelock.
480 * Matching uprobe already exists in rbtree;
481 * increment (access refcount) and return the matching uprobe.
482 *
483 * No matching uprobe; insert the uprobe in rb_tree;
484 * get a double refcount (access + creation) and return NULL.
485 */
486static struct uprobe *insert_uprobe(struct uprobe *uprobe)
487{
488 unsigned long flags;
489 struct uprobe *u;
490
491 spin_lock_irqsave(&uprobes_treelock, flags);
492 u = __insert_uprobe(uprobe);
493 spin_unlock_irqrestore(&uprobes_treelock, flags);
494
495 /* For now assume that the instruction need not be single-stepped */
496 uprobe->flags |= UPROBE_SKIP_SSTEP;
497
498 return u;
499}
500
501static void put_uprobe(struct uprobe *uprobe)
502{
503 if (atomic_dec_and_test(&uprobe->ref))
504 kfree(uprobe);
505}
506
507static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
508{
509 struct uprobe *uprobe, *cur_uprobe;
510
511 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
512 if (!uprobe)
513 return NULL;
514
515 uprobe->inode = igrab(inode);
516 uprobe->offset = offset;
517 init_rwsem(&uprobe->consumer_rwsem);
518 INIT_LIST_HEAD(&uprobe->pending_list);
519
520 /* add to uprobes_tree, sorted on inode:offset */
521 cur_uprobe = insert_uprobe(uprobe);
522
523 /* a uprobe exists for this inode:offset combination */
524 if (cur_uprobe) {
525 kfree(uprobe);
526 uprobe = cur_uprobe;
527 iput(inode);
528 } else {
529 atomic_inc(&uprobe_events);
530 }
531
532 return uprobe;
533}
534
535static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
536{
537 struct uprobe_consumer *uc;
538
539 if (!(uprobe->flags & UPROBE_RUN_HANDLER))
540 return;
541
542 down_read(&uprobe->consumer_rwsem);
543 for (uc = uprobe->consumers; uc; uc = uc->next) {
544 if (!uc->filter || uc->filter(uc, current))
545 uc->handler(uc, regs);
546 }
547 up_read(&uprobe->consumer_rwsem);
548}
549
550/* Returns the previous consumer */
551static struct uprobe_consumer *
552consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
553{
554 down_write(&uprobe->consumer_rwsem);
555 uc->next = uprobe->consumers;
556 uprobe->consumers = uc;
557 up_write(&uprobe->consumer_rwsem);
558
559 return uc->next;
560}
561
562/*
563 * For uprobe @uprobe, delete the consumer @uc.
564 * Return true if the @uc is deleted successfully
565 * or return false.
566 */
567static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
568{
569 struct uprobe_consumer **con;
570 bool ret = false;
571
572 down_write(&uprobe->consumer_rwsem);
573 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
574 if (*con == uc) {
575 *con = uc->next;
576 ret = true;
577 break;
578 }
579 }
580 up_write(&uprobe->consumer_rwsem);
581
582 return ret;
583}
584
585static int
586__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
587 unsigned long nbytes, unsigned long offset)
588{
589 struct file *filp = vma->vm_file;
590 struct page *page;
591 void *vaddr;
592 unsigned long off1;
593 unsigned long idx;
594
595 if (!filp)
596 return -EINVAL;
597
598 idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
599 off1 = offset &= ~PAGE_MASK;
600
601 /*
602 * Ensure that the page that has the original instruction is
603 * populated and in page-cache.
604 */
605 page = read_mapping_page(mapping, idx, filp);
606 if (IS_ERR(page))
607 return PTR_ERR(page);
608
609 vaddr = kmap_atomic(page);
610 memcpy(insn, vaddr + off1, nbytes);
611 kunmap_atomic(vaddr);
612 page_cache_release(page);
613
614 return 0;
615}
616
617static int
618copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
619{
620 struct address_space *mapping;
621 unsigned long nbytes;
622 int bytes;
623
624 addr &= ~PAGE_MASK;
625 nbytes = PAGE_SIZE - addr;
626 mapping = uprobe->inode->i_mapping;
627
628 /* Instruction at end of binary; copy only available bytes */
629 if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
630 bytes = uprobe->inode->i_size - uprobe->offset;
631 else
632 bytes = MAX_UINSN_BYTES;
633
634 /* Instruction at the page-boundary; copy bytes in second page */
635 if (nbytes < bytes) {
636 if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
637 bytes - nbytes, uprobe->offset + nbytes))
638 return -ENOMEM;
639
640 bytes = nbytes;
641 }
642 return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
643}
644
645/*
646 * How mm->uprobes_state.count gets updated
647 * uprobe_mmap() increments the count if
648 * - it successfully adds a breakpoint.
649 * - it cannot add a breakpoint, but sees that there is a underlying
650 * breakpoint (via a is_swbp_at_addr()).
651 *
652 * uprobe_munmap() decrements the count if
653 * - it sees a underlying breakpoint, (via is_swbp_at_addr)
654 * (Subsequent uprobe_unregister wouldnt find the breakpoint
655 * unless a uprobe_mmap kicks in, since the old vma would be
656 * dropped just after uprobe_munmap.)
657 *
658 * uprobe_register increments the count if:
659 * - it successfully adds a breakpoint.
660 *
661 * uprobe_unregister decrements the count if:
662 * - it sees a underlying breakpoint and removes successfully.
663 * (via is_swbp_at_addr)
664 * (Subsequent uprobe_munmap wouldnt find the breakpoint
665 * since there is no underlying breakpoint after the
666 * breakpoint removal.)
667 */
668static int
669install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
670 struct vm_area_struct *vma, loff_t vaddr)
671{
672 unsigned long addr;
673 int ret;
674
675 /*
676 * If probe is being deleted, unregister thread could be done with
677 * the vma-rmap-walk through. Adding a probe now can be fatal since
678 * nobody will be able to cleanup. Also we could be from fork or
679 * mremap path, where the probe might have already been inserted.
680 * Hence behave as if probe already existed.
681 */
682 if (!uprobe->consumers)
683 return -EEXIST;
684
685 addr = (unsigned long)vaddr;
686
687 if (!(uprobe->flags & UPROBE_COPY_INSN)) {
688 ret = copy_insn(uprobe, vma, addr);
689 if (ret)
690 return ret;
691
692 if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
693 return -EEXIST;
694
695 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
696 if (ret)
697 return ret;
698
699 uprobe->flags |= UPROBE_COPY_INSN;
700 }
701
702 /*
703 * Ideally, should be updating the probe count after the breakpoint
704 * has been successfully inserted. However a thread could hit the
705 * breakpoint we just inserted even before the probe count is
706 * incremented. If this is the first breakpoint placed, breakpoint
707 * notifier might ignore uprobes and pass the trap to the thread.
708 * Hence increment before and decrement on failure.
709 */
710 atomic_inc(&mm->uprobes_state.count);
711 ret = set_swbp(&uprobe->arch, mm, addr);
712 if (ret)
713 atomic_dec(&mm->uprobes_state.count);
714
715 return ret;
716}
717
718static void
719remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
720{
721 if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true))
722 atomic_dec(&mm->uprobes_state.count);
723}
724
725/*
726 * There could be threads that have hit the breakpoint and are entering the
727 * notifier code and trying to acquire the uprobes_treelock. The thread
728 * calling delete_uprobe() that is removing the uprobe from the rb_tree can
729 * race with these threads and might acquire the uprobes_treelock compared
730 * to some of the breakpoint hit threads. In such a case, the breakpoint
731 * hit threads will not find the uprobe. The current unregistering thread
732 * waits till all other threads have hit a breakpoint, to acquire the
733 * uprobes_treelock before the uprobe is removed from the rbtree.
734 */
735static void delete_uprobe(struct uprobe *uprobe)
736{
737 unsigned long flags;
738
739 synchronize_srcu(&uprobes_srcu);
740 spin_lock_irqsave(&uprobes_treelock, flags);
741 rb_erase(&uprobe->rb_node, &uprobes_tree);
742 spin_unlock_irqrestore(&uprobes_treelock, flags);
743 iput(uprobe->inode);
744 put_uprobe(uprobe);
745 atomic_dec(&uprobe_events);
746}
747
748static struct vma_info *
749__find_next_vma_info(struct address_space *mapping, struct list_head *head,
750 struct vma_info *vi, loff_t offset, bool is_register)
751{
752 struct prio_tree_iter iter;
753 struct vm_area_struct *vma;
754 struct vma_info *tmpvi;
755 unsigned long pgoff;
756 int existing_vma;
757 loff_t vaddr;
758
759 pgoff = offset >> PAGE_SHIFT;
760
761 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
762 if (!valid_vma(vma, is_register))
763 continue;
764
765 existing_vma = 0;
766 vaddr = vma_address(vma, offset);
767
768 list_for_each_entry(tmpvi, head, probe_list) {
769 if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
770 existing_vma = 1;
771 break;
772 }
773 }
774
775 /*
776 * Another vma needs a probe to be installed. However skip
777 * installing the probe if the vma is about to be unlinked.
778 */
779 if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
780 vi->mm = vma->vm_mm;
781 vi->vaddr = vaddr;
782 list_add(&vi->probe_list, head);
783
784 return vi;
785 }
786 }
787
788 return NULL;
789}
790
791/*
792 * Iterate in the rmap prio tree and find a vma where a probe has not
793 * yet been inserted.
794 */
795static struct vma_info *
796find_next_vma_info(struct address_space *mapping, struct list_head *head,
797 loff_t offset, bool is_register)
798{
799 struct vma_info *vi, *retvi;
800
801 vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
802 if (!vi)
803 return ERR_PTR(-ENOMEM);
804
805 mutex_lock(&mapping->i_mmap_mutex);
806 retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
807 mutex_unlock(&mapping->i_mmap_mutex);
808
809 if (!retvi)
810 kfree(vi);
811
812 return retvi;
813}
814
815static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
816{
817 struct list_head try_list;
818 struct vm_area_struct *vma;
819 struct address_space *mapping;
820 struct vma_info *vi, *tmpvi;
821 struct mm_struct *mm;
822 loff_t vaddr;
823 int ret;
824
825 mapping = uprobe->inode->i_mapping;
826 INIT_LIST_HEAD(&try_list);
827
828 ret = 0;
829
830 for (;;) {
831 vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
832 if (!vi)
833 break;
834
835 if (IS_ERR(vi)) {
836 ret = PTR_ERR(vi);
837 break;
838 }
839
840 mm = vi->mm;
841 down_read(&mm->mmap_sem);
842 vma = find_vma(mm, (unsigned long)vi->vaddr);
843 if (!vma || !valid_vma(vma, is_register)) {
844 list_del(&vi->probe_list);
845 kfree(vi);
846 up_read(&mm->mmap_sem);
847 mmput(mm);
848 continue;
849 }
850 vaddr = vma_address(vma, uprobe->offset);
851 if (vma->vm_file->f_mapping->host != uprobe->inode ||
852 vaddr != vi->vaddr) {
853 list_del(&vi->probe_list);
854 kfree(vi);
855 up_read(&mm->mmap_sem);
856 mmput(mm);
857 continue;
858 }
859
860 if (is_register)
861 ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
862 else
863 remove_breakpoint(uprobe, mm, vi->vaddr);
864
865 up_read(&mm->mmap_sem);
866 mmput(mm);
867 if (is_register) {
868 if (ret && ret == -EEXIST)
869 ret = 0;
870 if (ret)
871 break;
872 }
873 }
874
875 list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
876 list_del(&vi->probe_list);
877 kfree(vi);
878 }
879
880 return ret;
881}
882
883static int __uprobe_register(struct uprobe *uprobe)
884{
885 return register_for_each_vma(uprobe, true);
886}
887
888static void __uprobe_unregister(struct uprobe *uprobe)
889{
890 if (!register_for_each_vma(uprobe, false))
891 delete_uprobe(uprobe);
892
893 /* TODO : cant unregister? schedule a worker thread */
894}
895
896/*
897 * uprobe_register - register a probe
898 * @inode: the file in which the probe has to be placed.
899 * @offset: offset from the start of the file.
900 * @uc: information on howto handle the probe..
901 *
902 * Apart from the access refcount, uprobe_register() takes a creation
903 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
904 * inserted into the rbtree (i.e first consumer for a @inode:@offset
905 * tuple). Creation refcount stops uprobe_unregister from freeing the
906 * @uprobe even before the register operation is complete. Creation
907 * refcount is released when the last @uc for the @uprobe
908 * unregisters.
909 *
910 * Return errno if it cannot successully install probes
911 * else return 0 (success)
912 */
913int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
914{
915 struct uprobe *uprobe;
916 int ret;
917
918 if (!inode || !uc || uc->next)
919 return -EINVAL;
920
921 if (offset > i_size_read(inode))
922 return -EINVAL;
923
924 ret = 0;
925 mutex_lock(uprobes_hash(inode));
926 uprobe = alloc_uprobe(inode, offset);
927
928 if (uprobe && !consumer_add(uprobe, uc)) {
929 ret = __uprobe_register(uprobe);
930 if (ret) {
931 uprobe->consumers = NULL;
932 __uprobe_unregister(uprobe);
933 } else {
934 uprobe->flags |= UPROBE_RUN_HANDLER;
935 }
936 }
937
938 mutex_unlock(uprobes_hash(inode));
939 put_uprobe(uprobe);
940
941 return ret;
942}
943
944/*
945 * uprobe_unregister - unregister a already registered probe.
946 * @inode: the file in which the probe has to be removed.
947 * @offset: offset from the start of the file.
948 * @uc: identify which probe if multiple probes are colocated.
949 */
950void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
951{
952 struct uprobe *uprobe;
953
954 if (!inode || !uc)
955 return;
956
957 uprobe = find_uprobe(inode, offset);
958 if (!uprobe)
959 return;
960
961 mutex_lock(uprobes_hash(inode));
962
963 if (consumer_del(uprobe, uc)) {
964 if (!uprobe->consumers) {
965 __uprobe_unregister(uprobe);
966 uprobe->flags &= ~UPROBE_RUN_HANDLER;
967 }
968 }
969
970 mutex_unlock(uprobes_hash(inode));
971 if (uprobe)
972 put_uprobe(uprobe);
973}
974
975/*
976 * Of all the nodes that correspond to the given inode, return the node
977 * with the least offset.
978 */
979static struct rb_node *find_least_offset_node(struct inode *inode)
980{
981 struct uprobe u = { .inode = inode, .offset = 0};
982 struct rb_node *n = uprobes_tree.rb_node;
983 struct rb_node *close_node = NULL;
984 struct uprobe *uprobe;
985 int match;
986
987 while (n) {
988 uprobe = rb_entry(n, struct uprobe, rb_node);
989 match = match_uprobe(&u, uprobe);
990
991 if (uprobe->inode == inode)
992 close_node = n;
993
994 if (!match)
995 return close_node;
996
997 if (match < 0)
998 n = n->rb_left;
999 else
1000 n = n->rb_right;
1001 }
1002
1003 return close_node;
1004}
1005
1006/*
1007 * For a given inode, build a list of probes that need to be inserted.
1008 */
1009static void build_probe_list(struct inode *inode, struct list_head *head)
1010{
1011 struct uprobe *uprobe;
1012 unsigned long flags;
1013 struct rb_node *n;
1014
1015 spin_lock_irqsave(&uprobes_treelock, flags);
1016
1017 n = find_least_offset_node(inode);
1018
1019 for (; n; n = rb_next(n)) {
1020 uprobe = rb_entry(n, struct uprobe, rb_node);
1021 if (uprobe->inode != inode)
1022 break;
1023
1024 list_add(&uprobe->pending_list, head);
1025 atomic_inc(&uprobe->ref);
1026 }
1027
1028 spin_unlock_irqrestore(&uprobes_treelock, flags);
1029}
1030
1031/*
1032 * Called from mmap_region.
1033 * called with mm->mmap_sem acquired.
1034 *
1035 * Return -ve no if we fail to insert probes and we cannot
1036 * bail-out.
1037 * Return 0 otherwise. i.e:
1038 *
1039 * - successful insertion of probes
1040 * - (or) no possible probes to be inserted.
1041 * - (or) insertion of probes failed but we can bail-out.
1042 */
1043int uprobe_mmap(struct vm_area_struct *vma)
1044{
1045 struct list_head tmp_list;
1046 struct uprobe *uprobe, *u;
1047 struct inode *inode;
1048 int ret, count;
1049
1050 if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
1051 return 0;
1052
1053 inode = vma->vm_file->f_mapping->host;
1054 if (!inode)
1055 return 0;
1056
1057 INIT_LIST_HEAD(&tmp_list);
1058 mutex_lock(uprobes_mmap_hash(inode));
1059 build_probe_list(inode, &tmp_list);
1060
1061 ret = 0;
1062 count = 0;
1063
1064 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1065 loff_t vaddr;
1066
1067 list_del(&uprobe->pending_list);
1068 if (!ret) {
1069 vaddr = vma_address(vma, uprobe->offset);
1070
1071 if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
1072 put_uprobe(uprobe);
1073 continue;
1074 }
1075
1076 ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1077
1078 /* Ignore double add: */
1079 if (ret == -EEXIST) {
1080 ret = 0;
1081
1082 if (!is_swbp_at_addr(vma->vm_mm, vaddr))
1083 continue;
1084
1085 /*
1086 * Unable to insert a breakpoint, but
1087 * breakpoint lies underneath. Increment the
1088 * probe count.
1089 */
1090 atomic_inc(&vma->vm_mm->uprobes_state.count);
1091 }
1092
1093 if (!ret)
1094 count++;
1095 }
1096 put_uprobe(uprobe);
1097 }
1098
1099 mutex_unlock(uprobes_mmap_hash(inode));
1100
1101 if (ret)
1102 atomic_sub(count, &vma->vm_mm->uprobes_state.count);
1103
1104 return ret;
1105}
1106
1107/*
1108 * Called in context of a munmap of a vma.
1109 */
1110void uprobe_munmap(struct vm_area_struct *vma)
1111{
1112 struct list_head tmp_list;
1113 struct uprobe *uprobe, *u;
1114 struct inode *inode;
1115
1116 if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
1117 return;
1118
1119 if (!atomic_read(&vma->vm_mm->uprobes_state.count))
1120 return;
1121
1122 inode = vma->vm_file->f_mapping->host;
1123 if (!inode)
1124 return;
1125
1126 INIT_LIST_HEAD(&tmp_list);
1127 mutex_lock(uprobes_mmap_hash(inode));
1128 build_probe_list(inode, &tmp_list);
1129
1130 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1131 loff_t vaddr;
1132
1133 list_del(&uprobe->pending_list);
1134 vaddr = vma_address(vma, uprobe->offset);
1135
1136 if (vaddr >= vma->vm_start && vaddr < vma->vm_end) {
1137 /*
1138 * An unregister could have removed the probe before
1139 * unmap. So check before we decrement the count.
1140 */
1141 if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1)
1142 atomic_dec(&vma->vm_mm->uprobes_state.count);
1143 }
1144 put_uprobe(uprobe);
1145 }
1146 mutex_unlock(uprobes_mmap_hash(inode));
1147}
1148
1149/* Slot allocation for XOL */
1150static int xol_add_vma(struct xol_area *area)
1151{
1152 struct mm_struct *mm;
1153 int ret;
1154
1155 area->page = alloc_page(GFP_HIGHUSER);
1156 if (!area->page)
1157 return -ENOMEM;
1158
1159 ret = -EALREADY;
1160 mm = current->mm;
1161
1162 down_write(&mm->mmap_sem);
1163 if (mm->uprobes_state.xol_area)
1164 goto fail;
1165
1166 ret = -ENOMEM;
1167
1168 /* Try to map as high as possible, this is only a hint. */
1169 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
1170 if (area->vaddr & ~PAGE_MASK) {
1171 ret = area->vaddr;
1172 goto fail;
1173 }
1174
1175 ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1176 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
1177 if (ret)
1178 goto fail;
1179
1180 smp_wmb(); /* pairs with get_xol_area() */
1181 mm->uprobes_state.xol_area = area;
1182 ret = 0;
1183
1184fail:
1185 up_write(&mm->mmap_sem);
1186 if (ret)
1187 __free_page(area->page);
1188
1189 return ret;
1190}
1191
1192static struct xol_area *get_xol_area(struct mm_struct *mm)
1193{
1194 struct xol_area *area;
1195
1196 area = mm->uprobes_state.xol_area;
1197 smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
1198
1199 return area;
1200}
1201
1202/*
1203 * xol_alloc_area - Allocate process's xol_area.
1204 * This area will be used for storing instructions for execution out of
1205 * line.
1206 *
1207 * Returns the allocated area or NULL.
1208 */
1209static struct xol_area *xol_alloc_area(void)
1210{
1211 struct xol_area *area;
1212
1213 area = kzalloc(sizeof(*area), GFP_KERNEL);
1214 if (unlikely(!area))
1215 return NULL;
1216
1217 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1218
1219 if (!area->bitmap)
1220 goto fail;
1221
1222 init_waitqueue_head(&area->wq);
1223 if (!xol_add_vma(area))
1224 return area;
1225
1226fail:
1227 kfree(area->bitmap);
1228 kfree(area);
1229
1230 return get_xol_area(current->mm);
1231}
1232
1233/*
1234 * uprobe_clear_state - Free the area allocated for slots.
1235 */
1236void uprobe_clear_state(struct mm_struct *mm)
1237{
1238 struct xol_area *area = mm->uprobes_state.xol_area;
1239
1240 if (!area)
1241 return;
1242
1243 put_page(area->page);
1244 kfree(area->bitmap);
1245 kfree(area);
1246}
1247
1248/*
1249 * uprobe_reset_state - Free the area allocated for slots.
1250 */
1251void uprobe_reset_state(struct mm_struct *mm)
1252{
1253 mm->uprobes_state.xol_area = NULL;
1254 atomic_set(&mm->uprobes_state.count, 0);
1255}
1256
1257/*
1258 * - search for a free slot.
1259 */
1260static unsigned long xol_take_insn_slot(struct xol_area *area)
1261{
1262 unsigned long slot_addr;
1263 int slot_nr;
1264
1265 do {
1266 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1267 if (slot_nr < UINSNS_PER_PAGE) {
1268 if (!test_and_set_bit(slot_nr, area->bitmap))
1269 break;
1270
1271 slot_nr = UINSNS_PER_PAGE;
1272 continue;
1273 }
1274 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1275 } while (slot_nr >= UINSNS_PER_PAGE);
1276
1277 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1278 atomic_inc(&area->slot_count);
1279
1280 return slot_addr;
1281}
1282
1283/*
1284 * xol_get_insn_slot - If was not allocated a slot, then
1285 * allocate a slot.
1286 * Returns the allocated slot address or 0.
1287 */
1288static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr)
1289{
1290 struct xol_area *area;
1291 unsigned long offset;
1292 void *vaddr;
1293
1294 area = get_xol_area(current->mm);
1295 if (!area) {
1296 area = xol_alloc_area();
1297 if (!area)
1298 return 0;
1299 }
1300 current->utask->xol_vaddr = xol_take_insn_slot(area);
1301
1302 /*
1303 * Initialize the slot if xol_vaddr points to valid
1304 * instruction slot.
1305 */
1306 if (unlikely(!current->utask->xol_vaddr))
1307 return 0;
1308
1309 current->utask->vaddr = slot_addr;
1310 offset = current->utask->xol_vaddr & ~PAGE_MASK;
1311 vaddr = kmap_atomic(area->page);
1312 memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
1313 kunmap_atomic(vaddr);
1314
1315 return current->utask->xol_vaddr;
1316}
1317
1318/*
1319 * xol_free_insn_slot - If slot was earlier allocated by
1320 * @xol_get_insn_slot(), make the slot available for
1321 * subsequent requests.
1322 */
1323static void xol_free_insn_slot(struct task_struct *tsk)
1324{
1325 struct xol_area *area;
1326 unsigned long vma_end;
1327 unsigned long slot_addr;
1328
1329 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1330 return;
1331
1332 slot_addr = tsk->utask->xol_vaddr;
1333
1334 if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
1335 return;
1336
1337 area = tsk->mm->uprobes_state.xol_area;
1338 vma_end = area->vaddr + PAGE_SIZE;
1339 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1340 unsigned long offset;
1341 int slot_nr;
1342
1343 offset = slot_addr - area->vaddr;
1344 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1345 if (slot_nr >= UINSNS_PER_PAGE)
1346 return;
1347
1348 clear_bit(slot_nr, area->bitmap);
1349 atomic_dec(&area->slot_count);
1350 if (waitqueue_active(&area->wq))
1351 wake_up(&area->wq);
1352
1353 tsk->utask->xol_vaddr = 0;
1354 }
1355}
1356
1357/**
1358 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1359 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1360 * instruction.
1361 * Return the address of the breakpoint instruction.
1362 */
1363unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1364{
1365 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1366}
1367
1368/*
1369 * Called with no locks held.
1370 * Called in context of a exiting or a exec-ing thread.
1371 */
1372void uprobe_free_utask(struct task_struct *t)
1373{
1374 struct uprobe_task *utask = t->utask;
1375
1376 if (t->uprobe_srcu_id != -1)
1377 srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
1378
1379 if (!utask)
1380 return;
1381
1382 if (utask->active_uprobe)
1383 put_uprobe(utask->active_uprobe);
1384
1385 xol_free_insn_slot(t);
1386 kfree(utask);
1387 t->utask = NULL;
1388}
1389
1390/*
1391 * Called in context of a new clone/fork from copy_process.
1392 */
1393void uprobe_copy_process(struct task_struct *t)
1394{
1395 t->utask = NULL;
1396 t->uprobe_srcu_id = -1;
1397}
1398
1399/*
1400 * Allocate a uprobe_task object for the task.
1401 * Called when the thread hits a breakpoint for the first time.
1402 *
1403 * Returns:
1404 * - pointer to new uprobe_task on success
1405 * - NULL otherwise
1406 */
1407static struct uprobe_task *add_utask(void)
1408{
1409 struct uprobe_task *utask;
1410
1411 utask = kzalloc(sizeof *utask, GFP_KERNEL);
1412 if (unlikely(!utask))
1413 return NULL;
1414
1415 utask->active_uprobe = NULL;
1416 current->utask = utask;
1417 return utask;
1418}
1419
1420/* Prepare to single-step probed instruction out of line. */
1421static int
1422pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
1423{
1424 if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs))
1425 return 0;
1426
1427 return -EFAULT;
1428}
1429
1430/*
1431 * If we are singlestepping, then ensure this thread is not connected to
1432 * non-fatal signals until completion of singlestep. When xol insn itself
1433 * triggers the signal, restart the original insn even if the task is
1434 * already SIGKILL'ed (since coredump should report the correct ip). This
1435 * is even more important if the task has a handler for SIGSEGV/etc, The
1436 * _same_ instruction should be repeated again after return from the signal
1437 * handler, and SSTEP can never finish in this case.
1438 */
1439bool uprobe_deny_signal(void)
1440{
1441 struct task_struct *t = current;
1442 struct uprobe_task *utask = t->utask;
1443
1444 if (likely(!utask || !utask->active_uprobe))
1445 return false;
1446
1447 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1448
1449 if (signal_pending(t)) {
1450 spin_lock_irq(&t->sighand->siglock);
1451 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1452 spin_unlock_irq(&t->sighand->siglock);
1453
1454 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1455 utask->state = UTASK_SSTEP_TRAPPED;
1456 set_tsk_thread_flag(t, TIF_UPROBE);
1457 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1458 }
1459 }
1460
1461 return true;
1462}
1463
1464/*
1465 * Avoid singlestepping the original instruction if the original instruction
1466 * is a NOP or can be emulated.
1467 */
1468static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1469{
1470 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1471 return true;
1472
1473 uprobe->flags &= ~UPROBE_SKIP_SSTEP;
1474 return false;
1475}
1476
1477/*
1478 * Run handler and ask thread to singlestep.
1479 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1480 */
1481static void handle_swbp(struct pt_regs *regs)
1482{
1483 struct vm_area_struct *vma;
1484 struct uprobe_task *utask;
1485 struct uprobe *uprobe;
1486 struct mm_struct *mm;
1487 unsigned long bp_vaddr;
1488
1489 uprobe = NULL;
1490 bp_vaddr = uprobe_get_swbp_addr(regs);
1491 mm = current->mm;
1492 down_read(&mm->mmap_sem);
1493 vma = find_vma(mm, bp_vaddr);
1494
1495 if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
1496 struct inode *inode;
1497 loff_t offset;
1498
1499 inode = vma->vm_file->f_mapping->host;
1500 offset = bp_vaddr - vma->vm_start;
1501 offset += (vma->vm_pgoff << PAGE_SHIFT);
1502 uprobe = find_uprobe(inode, offset);
1503 }
1504
1505 srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
1506 current->uprobe_srcu_id = -1;
1507 up_read(&mm->mmap_sem);
1508
1509 if (!uprobe) {
1510 /* No matching uprobe; signal SIGTRAP. */
1511 send_sig(SIGTRAP, current, 0);
1512 return;
1513 }
1514
1515 utask = current->utask;
1516 if (!utask) {
1517 utask = add_utask();
1518 /* Cannot allocate; re-execute the instruction. */
1519 if (!utask)
1520 goto cleanup_ret;
1521 }
1522 utask->active_uprobe = uprobe;
1523 handler_chain(uprobe, regs);
1524 if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs))
1525 goto cleanup_ret;
1526
1527 utask->state = UTASK_SSTEP;
1528 if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1529 user_enable_single_step(current);
1530 return;
1531 }
1532
1533cleanup_ret:
1534 if (utask) {
1535 utask->active_uprobe = NULL;
1536 utask->state = UTASK_RUNNING;
1537 }
1538 if (uprobe) {
1539 if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
1540
1541 /*
1542 * cannot singlestep; cannot skip instruction;
1543 * re-execute the instruction.
1544 */
1545 instruction_pointer_set(regs, bp_vaddr);
1546
1547 put_uprobe(uprobe);
1548 }
1549}
1550
1551/*
1552 * Perform required fix-ups and disable singlestep.
1553 * Allow pending signals to take effect.
1554 */
1555static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1556{
1557 struct uprobe *uprobe;
1558
1559 uprobe = utask->active_uprobe;
1560 if (utask->state == UTASK_SSTEP_ACK)
1561 arch_uprobe_post_xol(&uprobe->arch, regs);
1562 else if (utask->state == UTASK_SSTEP_TRAPPED)
1563 arch_uprobe_abort_xol(&uprobe->arch, regs);
1564 else
1565 WARN_ON_ONCE(1);
1566
1567 put_uprobe(uprobe);
1568 utask->active_uprobe = NULL;
1569 utask->state = UTASK_RUNNING;
1570 user_disable_single_step(current);
1571 xol_free_insn_slot(current);
1572
1573 spin_lock_irq(&current->sighand->siglock);
1574 recalc_sigpending(); /* see uprobe_deny_signal() */
1575 spin_unlock_irq(&current->sighand->siglock);
1576}
1577
1578/*
1579 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on
1580 * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
1581 * allows the thread to return from interrupt.
1582 *
1583 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
1584 * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
1585 * interrupt.
1586 *
1587 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1588 * uprobe_notify_resume().
1589 */
1590void uprobe_notify_resume(struct pt_regs *regs)
1591{
1592 struct uprobe_task *utask;
1593
1594 utask = current->utask;
1595 if (!utask || utask->state == UTASK_BP_HIT)
1596 handle_swbp(regs);
1597 else
1598 handle_singlestep(utask, regs);
1599}
1600
1601/*
1602 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1603 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1604 */
1605int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1606{
1607 struct uprobe_task *utask;
1608
1609 if (!current->mm || !atomic_read(&current->mm->uprobes_state.count))
1610 /* task is currently not uprobed */
1611 return 0;
1612
1613 utask = current->utask;
1614 if (utask)
1615 utask->state = UTASK_BP_HIT;
1616
1617 set_thread_flag(TIF_UPROBE);
1618 current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
1619
1620 return 1;
1621}
1622
1623/*
1624 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1625 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1626 */
1627int uprobe_post_sstep_notifier(struct pt_regs *regs)
1628{
1629 struct uprobe_task *utask = current->utask;
1630
1631 if (!current->mm || !utask || !utask->active_uprobe)
1632 /* task is currently not uprobed */
1633 return 0;
1634
1635 utask->state = UTASK_SSTEP_ACK;
1636 set_thread_flag(TIF_UPROBE);
1637 return 1;
1638}
1639
1640static struct notifier_block uprobe_exception_nb = {
1641 .notifier_call = arch_uprobe_exception_notify,
1642 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
1643};
1644
1645static int __init init_uprobes(void)
1646{
1647 int i;
1648
1649 for (i = 0; i < UPROBES_HASH_SZ; i++) {
1650 mutex_init(&uprobes_mutex[i]);
1651 mutex_init(&uprobes_mmap_mutex[i]);
1652 }
1653 init_srcu_struct(&uprobes_srcu);
1654
1655 return register_die_notifier(&uprobe_exception_nb);
1656}
1657module_init(init_uprobes);
1658
1659static void __exit exit_uprobes(void)
1660{
1661}
1662module_exit(exit_uprobes);