aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/uprobes.c60
1 files changed, 43 insertions, 17 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 24b7d6ca871b..b886a5e7d4ff 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -73,6 +73,17 @@ struct uprobe {
73 struct inode *inode; /* Also hold a ref to inode */ 73 struct inode *inode; /* Also hold a ref to inode */
74 loff_t offset; 74 loff_t offset;
75 unsigned long flags; 75 unsigned long flags;
76
77 /*
78 * The generic code assumes that it has two members of unknown type
79 * owned by the arch-specific code:
80 *
81 * insn - copy_insn() saves the original instruction here for
82 * arch_uprobe_analyze_insn().
83 *
84 * ixol - potentially modified instruction to execute out of
85 * line, copied to xol_area by xol_get_insn_slot().
86 */
76 struct arch_uprobe arch; 87 struct arch_uprobe arch;
77}; 88};
78 89
@@ -86,6 +97,29 @@ struct return_instance {
86}; 97};
87 98
88/* 99/*
100 * Execute out of line area: anonymous executable mapping installed
101 * by the probed task to execute the copy of the original instruction
102 * mangled by set_swbp().
103 *
104 * On a breakpoint hit, thread contests for a slot. It frees the
105 * slot after singlestep. Currently a fixed number of slots are
106 * allocated.
107 */
108struct xol_area {
109 wait_queue_head_t wq; /* if all slots are busy */
110 atomic_t slot_count; /* number of in-use slots */
111 unsigned long *bitmap; /* 0 = free slot */
112 struct page *page;
113
114 /*
115 * We keep the vma's vm_start rather than a pointer to the vma
116 * itself. The probed process or a naughty kernel module could make
117 * the vma go away, and we must handle that reasonably gracefully.
118 */
119 unsigned long vaddr; /* Page(s) of instruction slots */
120};
121
122/*
89 * valid_vma: Verify if the specified vma is an executable vma 123 * valid_vma: Verify if the specified vma is an executable vma
90 * Relax restrictions while unregistering: vm_flags might have 124 * Relax restrictions while unregistering: vm_flags might have
91 * changed after breakpoint was inserted. 125 * changed after breakpoint was inserted.
@@ -330,7 +364,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
330int __weak 364int __weak
331set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 365set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
332{ 366{
333 return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); 367 return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
334} 368}
335 369
336static int match_uprobe(struct uprobe *l, struct uprobe *r) 370static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -529,8 +563,8 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp)
529{ 563{
530 struct address_space *mapping = uprobe->inode->i_mapping; 564 struct address_space *mapping = uprobe->inode->i_mapping;
531 loff_t offs = uprobe->offset; 565 loff_t offs = uprobe->offset;
532 void *insn = uprobe->arch.insn; 566 void *insn = &uprobe->arch.insn;
533 int size = MAX_UINSN_BYTES; 567 int size = sizeof(uprobe->arch.insn);
534 int len, err = -EIO; 568 int len, err = -EIO;
535 569
536 /* Copy only available bytes, -EIO if nothing was read */ 570 /* Copy only available bytes, -EIO if nothing was read */
@@ -569,7 +603,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
569 goto out; 603 goto out;
570 604
571 ret = -ENOTSUPP; 605 ret = -ENOTSUPP;
572 if (is_trap_insn((uprobe_opcode_t *)uprobe->arch.insn)) 606 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
573 goto out; 607 goto out;
574 608
575 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); 609 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
@@ -1264,7 +1298,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1264 1298
1265 /* Initialize the slot */ 1299 /* Initialize the slot */
1266 copy_to_page(area->page, xol_vaddr, 1300 copy_to_page(area->page, xol_vaddr,
1267 uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); 1301 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1268 /* 1302 /*
1269 * We probably need flush_icache_user_range() but it needs vma. 1303 * We probably need flush_icache_user_range() but it needs vma.
1270 * This should work on supported architectures too. 1304 * This should work on supported architectures too.
@@ -1403,12 +1437,10 @@ static void uprobe_warn(struct task_struct *t, const char *msg)
1403 1437
1404static void dup_xol_work(struct callback_head *work) 1438static void dup_xol_work(struct callback_head *work)
1405{ 1439{
1406 kfree(work);
1407
1408 if (current->flags & PF_EXITING) 1440 if (current->flags & PF_EXITING)
1409 return; 1441 return;
1410 1442
1411 if (!__create_xol_area(current->utask->vaddr)) 1443 if (!__create_xol_area(current->utask->dup_xol_addr))
1412 uprobe_warn(current, "dup xol area"); 1444 uprobe_warn(current, "dup xol area");
1413} 1445}
1414 1446
@@ -1419,7 +1451,6 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1419{ 1451{
1420 struct uprobe_task *utask = current->utask; 1452 struct uprobe_task *utask = current->utask;
1421 struct mm_struct *mm = current->mm; 1453 struct mm_struct *mm = current->mm;
1422 struct callback_head *work;
1423 struct xol_area *area; 1454 struct xol_area *area;
1424 1455
1425 t->utask = NULL; 1456 t->utask = NULL;
@@ -1441,14 +1472,9 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1441 if (mm == t->mm) 1472 if (mm == t->mm)
1442 return; 1473 return;
1443 1474
1444 /* TODO: move it into the union in uprobe_task */ 1475 t->utask->dup_xol_addr = area->vaddr;
1445 work = kmalloc(sizeof(*work), GFP_KERNEL); 1476 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1446 if (!work) 1477 task_work_add(t, &t->utask->dup_xol_work, true);
1447 return uprobe_warn(t, "dup xol area");
1448
1449 t->utask->vaddr = area->vaddr;
1450 init_task_work(work, dup_xol_work);
1451 task_work_add(t, work, true);
1452} 1478}
1453 1479
1454/* 1480/*