diff options
author | Anton Arapov <anton@redhat.com> | 2013-04-03 12:00:35 -0400 |
---|---|---|
committer | Oleg Nesterov <oleg@redhat.com> | 2013-04-13 09:31:57 -0400 |
commit | 0dfd0eb8e4d72ded8b21f4fee74ba5547408cbe9 (patch) | |
tree | af23b10139df4ad0fc424ab7cffa859d216b9ac3 /kernel | |
parent | f15706b79d6f71e016cd06afa21ee31500029067 (diff) |
uretprobes: Return probe entry, prepare_uretprobe()
When a uprobe with return probe consumer is hit, prepare_uretprobe()
function is invoked. It creates return_instance, hijacks return address
and replaces it with the trampoline.
* Return instances are kept as stack per uprobed task.
* Return instance is chained, when the original return address is
trampoline's page vaddr (e.g. recursive call of the probed function).
Signed-off-by: Anton Arapov <anton@redhat.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/uprobes.c | 92 |
1 files changed, 91 insertions, 1 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d345b7c6cb2d..3798947b3b58 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -75,6 +75,15 @@ struct uprobe { | |||
75 | struct arch_uprobe arch; | 75 | struct arch_uprobe arch; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | struct return_instance { | ||
79 | struct uprobe *uprobe; | ||
80 | unsigned long func; | ||
81 | unsigned long orig_ret_vaddr; /* original return address */ | ||
82 | bool chained; /* true, if instance is nested */ | ||
83 | |||
84 | struct return_instance *next; /* keep as stack */ | ||
85 | }; | ||
86 | |||
78 | /* | 87 | /* |
79 | * valid_vma: Verify if the specified vma is an executable vma | 88 | * valid_vma: Verify if the specified vma is an executable vma |
80 | * Relax restrictions while unregistering: vm_flags might have | 89 | * Relax restrictions while unregistering: vm_flags might have |
@@ -1317,6 +1326,7 @@ unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) | |||
1317 | void uprobe_free_utask(struct task_struct *t) | 1326 | void uprobe_free_utask(struct task_struct *t) |
1318 | { | 1327 | { |
1319 | struct uprobe_task *utask = t->utask; | 1328 | struct uprobe_task *utask = t->utask; |
1329 | struct return_instance *ri, *tmp; | ||
1320 | 1330 | ||
1321 | if (!utask) | 1331 | if (!utask) |
1322 | return; | 1332 | return; |
@@ -1324,6 +1334,15 @@ void uprobe_free_utask(struct task_struct *t) | |||
1324 | if (utask->active_uprobe) | 1334 | if (utask->active_uprobe) |
1325 | put_uprobe(utask->active_uprobe); | 1335 | put_uprobe(utask->active_uprobe); |
1326 | 1336 | ||
1337 | ri = utask->return_instances; | ||
1338 | while (ri) { | ||
1339 | tmp = ri; | ||
1340 | ri = ri->next; | ||
1341 | |||
1342 | put_uprobe(tmp->uprobe); | ||
1343 | kfree(tmp); | ||
1344 | } | ||
1345 | |||
1327 | xol_free_insn_slot(t); | 1346 | xol_free_insn_slot(t); |
1328 | kfree(utask); | 1347 | kfree(utask); |
1329 | t->utask = NULL; | 1348 | t->utask = NULL; |
@@ -1371,6 +1390,65 @@ static unsigned long get_trampoline_vaddr(void) | |||
1371 | return trampoline_vaddr; | 1390 | return trampoline_vaddr; |
1372 | } | 1391 | } |
1373 | 1392 | ||
1393 | static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) | ||
1394 | { | ||
1395 | struct return_instance *ri; | ||
1396 | struct uprobe_task *utask; | ||
1397 | unsigned long orig_ret_vaddr, trampoline_vaddr; | ||
1398 | bool chained = false; | ||
1399 | |||
1400 | if (!get_xol_area()) | ||
1401 | return; | ||
1402 | |||
1403 | utask = get_utask(); | ||
1404 | if (!utask) | ||
1405 | return; | ||
1406 | |||
1407 | ri = kzalloc(sizeof(struct return_instance), GFP_KERNEL); | ||
1408 | if (!ri) | ||
1409 | goto fail; | ||
1410 | |||
1411 | trampoline_vaddr = get_trampoline_vaddr(); | ||
1412 | orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); | ||
1413 | if (orig_ret_vaddr == -1) | ||
1414 | goto fail; | ||
1415 | |||
1416 | /* | ||
1417 | * We don't want to keep trampoline address in stack, rather keep the | ||
1418 | * original return address of first caller thru all the consequent | ||
1419 | * instances. This also makes breakpoint unwrapping easier. | ||
1420 | */ | ||
1421 | if (orig_ret_vaddr == trampoline_vaddr) { | ||
1422 | if (!utask->return_instances) { | ||
1423 | /* | ||
1424 | * This situation is not possible. Likely we have an | ||
1425 | * attack from user-space. | ||
1426 | */ | ||
1427 | pr_warn("uprobe: unable to set uretprobe pid/tgid=%d/%d\n", | ||
1428 | current->pid, current->tgid); | ||
1429 | goto fail; | ||
1430 | } | ||
1431 | |||
1432 | chained = true; | ||
1433 | orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; | ||
1434 | } | ||
1435 | |||
1436 | atomic_inc(&uprobe->ref); | ||
1437 | ri->uprobe = uprobe; | ||
1438 | ri->func = instruction_pointer(regs); | ||
1439 | ri->orig_ret_vaddr = orig_ret_vaddr; | ||
1440 | ri->chained = chained; | ||
1441 | |||
1442 | /* add instance to the stack */ | ||
1443 | ri->next = utask->return_instances; | ||
1444 | utask->return_instances = ri; | ||
1445 | |||
1446 | return; | ||
1447 | |||
1448 | fail: | ||
1449 | kfree(ri); | ||
1450 | } | ||
1451 | |||
1374 | /* Prepare to single-step probed instruction out of line. */ | 1452 | /* Prepare to single-step probed instruction out of line. */ |
1375 | static int | 1453 | static int |
1376 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) | 1454 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) |
@@ -1527,6 +1605,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) | |||
1527 | { | 1605 | { |
1528 | struct uprobe_consumer *uc; | 1606 | struct uprobe_consumer *uc; |
1529 | int remove = UPROBE_HANDLER_REMOVE; | 1607 | int remove = UPROBE_HANDLER_REMOVE; |
1608 | bool need_prep = false; /* prepare return uprobe, when needed */ | ||
1530 | 1609 | ||
1531 | down_read(&uprobe->register_rwsem); | 1610 | down_read(&uprobe->register_rwsem); |
1532 | for (uc = uprobe->consumers; uc; uc = uc->next) { | 1611 | for (uc = uprobe->consumers; uc; uc = uc->next) { |
@@ -1537,9 +1616,16 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) | |||
1537 | WARN(rc & ~UPROBE_HANDLER_MASK, | 1616 | WARN(rc & ~UPROBE_HANDLER_MASK, |
1538 | "bad rc=0x%x from %pf()\n", rc, uc->handler); | 1617 | "bad rc=0x%x from %pf()\n", rc, uc->handler); |
1539 | } | 1618 | } |
1619 | |||
1620 | if (uc->ret_handler) | ||
1621 | need_prep = true; | ||
1622 | |||
1540 | remove &= rc; | 1623 | remove &= rc; |
1541 | } | 1624 | } |
1542 | 1625 | ||
1626 | if (need_prep && !remove) | ||
1627 | prepare_uretprobe(uprobe, regs); /* put bp at return */ | ||
1628 | |||
1543 | if (remove && uprobe->consumers) { | 1629 | if (remove && uprobe->consumers) { |
1544 | WARN_ON(!uprobe_is_active(uprobe)); | 1630 | WARN_ON(!uprobe_is_active(uprobe)); |
1545 | unapply_uprobe(uprobe, current->mm); | 1631 | unapply_uprobe(uprobe, current->mm); |
@@ -1658,7 +1744,11 @@ void uprobe_notify_resume(struct pt_regs *regs) | |||
1658 | */ | 1744 | */ |
1659 | int uprobe_pre_sstep_notifier(struct pt_regs *regs) | 1745 | int uprobe_pre_sstep_notifier(struct pt_regs *regs) |
1660 | { | 1746 | { |
1661 | if (!current->mm || !test_bit(MMF_HAS_UPROBES, ¤t->mm->flags)) | 1747 | if (!current->mm) |
1748 | return 0; | ||
1749 | |||
1750 | if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && | ||
1751 | (!current->utask || !current->utask->return_instances)) | ||
1662 | return 0; | 1752 | return 0; |
1663 | 1753 | ||
1664 | set_thread_flag(TIF_UPROBE); | 1754 | set_thread_flag(TIF_UPROBE); |