diff options
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 29 | ||||
-rw-r--r-- | include/linux/perf_counter.h | 28 |
2 files changed, 23 insertions, 34 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index ce1ae3f1f86c..76dfef23f789 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -1555,9 +1555,9 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |||
1555 | */ | 1555 | */ |
1556 | 1556 | ||
1557 | static inline | 1557 | static inline |
1558 | void callchain_store(struct perf_callchain_entry *entry, unsigned long ip) | 1558 | void callchain_store(struct perf_callchain_entry *entry, u64 ip) |
1559 | { | 1559 | { |
1560 | if (entry->nr < MAX_STACK_DEPTH) | 1560 | if (entry->nr < PERF_MAX_STACK_DEPTH) |
1561 | entry->ip[entry->nr++] = ip; | 1561 | entry->ip[entry->nr++] = ip; |
1562 | } | 1562 | } |
1563 | 1563 | ||
@@ -1602,22 +1602,10 @@ static const struct stacktrace_ops backtrace_ops = { | |||
1602 | static void | 1602 | static void |
1603 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1603 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) |
1604 | { | 1604 | { |
1605 | unsigned long bp; | 1605 | callchain_store(entry, PERF_CONTEXT_KERNEL); |
1606 | char *stack; | ||
1607 | int nr = entry->nr; | ||
1608 | |||
1609 | callchain_store(entry, regs->ip); | 1606 | callchain_store(entry, regs->ip); |
1610 | 1607 | ||
1611 | stack = ((char *)regs + sizeof(struct pt_regs)); | 1608 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); |
1612 | #ifdef CONFIG_FRAME_POINTER | ||
1613 | get_bp(bp); | ||
1614 | #else | ||
1615 | bp = 0; | ||
1616 | #endif | ||
1617 | |||
1618 | dump_trace(NULL, regs, (void *)&stack, bp, &backtrace_ops, entry); | ||
1619 | |||
1620 | entry->kernel = entry->nr - nr; | ||
1621 | } | 1609 | } |
1622 | 1610 | ||
1623 | /* | 1611 | /* |
@@ -1669,16 +1657,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1669 | { | 1657 | { |
1670 | struct stack_frame frame; | 1658 | struct stack_frame frame; |
1671 | const void __user *fp; | 1659 | const void __user *fp; |
1672 | int nr = entry->nr; | ||
1673 | 1660 | ||
1674 | if (!user_mode(regs)) | 1661 | if (!user_mode(regs)) |
1675 | regs = task_pt_regs(current); | 1662 | regs = task_pt_regs(current); |
1676 | 1663 | ||
1677 | fp = (void __user *)regs->bp; | 1664 | fp = (void __user *)regs->bp; |
1678 | 1665 | ||
1666 | callchain_store(entry, PERF_CONTEXT_USER); | ||
1679 | callchain_store(entry, regs->ip); | 1667 | callchain_store(entry, regs->ip); |
1680 | 1668 | ||
1681 | while (entry->nr < MAX_STACK_DEPTH) { | 1669 | while (entry->nr < PERF_MAX_STACK_DEPTH) { |
1682 | frame.next_frame = NULL; | 1670 | frame.next_frame = NULL; |
1683 | frame.return_address = 0; | 1671 | frame.return_address = 0; |
1684 | 1672 | ||
@@ -1691,8 +1679,6 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1691 | callchain_store(entry, frame.return_address); | 1679 | callchain_store(entry, frame.return_address); |
1692 | fp = frame.next_frame; | 1680 | fp = frame.next_frame; |
1693 | } | 1681 | } |
1694 | |||
1695 | entry->user = entry->nr - nr; | ||
1696 | } | 1682 | } |
1697 | 1683 | ||
1698 | static void | 1684 | static void |
@@ -1728,9 +1714,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1728 | entry = &__get_cpu_var(irq_entry); | 1714 | entry = &__get_cpu_var(irq_entry); |
1729 | 1715 | ||
1730 | entry->nr = 0; | 1716 | entry->nr = 0; |
1731 | entry->hv = 0; | ||
1732 | entry->kernel = 0; | ||
1733 | entry->user = 0; | ||
1734 | 1717 | ||
1735 | perf_do_callchain(regs, entry); | 1718 | perf_do_callchain(regs, entry); |
1736 | 1719 | ||
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 0765e8e69843..e7e7e0242767 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -343,23 +343,22 @@ enum perf_event_type { | |||
343 | * { u64 nr; | 343 | * { u64 nr; |
344 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP | 344 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP |
345 | * | 345 | * |
346 | * { u16 nr, | 346 | * { u64 nr, |
347 | * hv, | ||
348 | * kernel, | ||
349 | * user; | ||
350 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | 347 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
351 | * }; | 348 | * }; |
352 | */ | 349 | */ |
353 | }; | 350 | }; |
354 | 351 | ||
355 | #define MAX_STACK_DEPTH 255 | 352 | enum perf_callchain_context { |
353 | PERF_CONTEXT_HV = (__u64)-32, | ||
354 | PERF_CONTEXT_KERNEL = (__u64)-128, | ||
355 | PERF_CONTEXT_USER = (__u64)-512, | ||
356 | 356 | ||
357 | struct perf_callchain_entry { | 357 | PERF_CONTEXT_GUEST = (__u64)-2048, |
358 | __u16 nr; | 358 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, |
359 | __u16 hv; | 359 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, |
360 | __u16 kernel; | 360 | |
361 | __u16 user; | 361 | PERF_CONTEXT_MAX = (__u64)-4095, |
362 | __u64 ip[MAX_STACK_DEPTH]; | ||
363 | }; | 362 | }; |
364 | 363 | ||
365 | #ifdef __KERNEL__ | 364 | #ifdef __KERNEL__ |
@@ -381,6 +380,13 @@ struct perf_callchain_entry { | |||
381 | #include <linux/pid_namespace.h> | 380 | #include <linux/pid_namespace.h> |
382 | #include <asm/atomic.h> | 381 | #include <asm/atomic.h> |
383 | 382 | ||
383 | #define PERF_MAX_STACK_DEPTH 255 | ||
384 | |||
385 | struct perf_callchain_entry { | ||
386 | __u64 nr; | ||
387 | __u64 ip[PERF_MAX_STACK_DEPTH]; | ||
388 | }; | ||
389 | |||
384 | struct task_struct; | 390 | struct task_struct; |
385 | 391 | ||
386 | /** | 392 | /** |