diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 311 |
1 files changed, 183 insertions, 128 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d55a50da2347..868102172aa4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1; | |||
42 | static atomic_t nr_counters __read_mostly; | 42 | static atomic_t nr_counters __read_mostly; |
43 | static atomic_t nr_mmap_counters __read_mostly; | 43 | static atomic_t nr_mmap_counters __read_mostly; |
44 | static atomic_t nr_comm_counters __read_mostly; | 44 | static atomic_t nr_comm_counters __read_mostly; |
45 | static atomic_t nr_task_counters __read_mostly; | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * perf counter paranoia level: | 48 | * perf counter paranoia level: |
@@ -146,6 +147,28 @@ static void put_ctx(struct perf_counter_context *ctx) | |||
146 | } | 147 | } |
147 | } | 148 | } |
148 | 149 | ||
150 | static void unclone_ctx(struct perf_counter_context *ctx) | ||
151 | { | ||
152 | if (ctx->parent_ctx) { | ||
153 | put_ctx(ctx->parent_ctx); | ||
154 | ctx->parent_ctx = NULL; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * If we inherit counters we want to return the parent counter id | ||
160 | * to userspace. | ||
161 | */ | ||
162 | static u64 primary_counter_id(struct perf_counter *counter) | ||
163 | { | ||
164 | u64 id = counter->id; | ||
165 | |||
166 | if (counter->parent) | ||
167 | id = counter->parent->id; | ||
168 | |||
169 | return id; | ||
170 | } | ||
171 | |||
149 | /* | 172 | /* |
150 | * Get the perf_counter_context for a task and lock it. | 173 | * Get the perf_counter_context for a task and lock it. |
151 | * This has to cope with with the fact that until it is locked, | 174 | * This has to cope with with the fact that until it is locked, |
@@ -1081,7 +1104,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx, | |||
1081 | __perf_counter_sync_stat(counter, next_counter); | 1104 | __perf_counter_sync_stat(counter, next_counter); |
1082 | 1105 | ||
1083 | counter = list_next_entry(counter, event_entry); | 1106 | counter = list_next_entry(counter, event_entry); |
1084 | next_counter = list_next_entry(counter, event_entry); | 1107 | next_counter = list_next_entry(next_counter, event_entry); |
1085 | } | 1108 | } |
1086 | } | 1109 | } |
1087 | 1110 | ||
@@ -1288,7 +1311,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | |||
1288 | #define MAX_INTERRUPTS (~0ULL) | 1311 | #define MAX_INTERRUPTS (~0ULL) |
1289 | 1312 | ||
1290 | static void perf_log_throttle(struct perf_counter *counter, int enable); | 1313 | static void perf_log_throttle(struct perf_counter *counter, int enable); |
1291 | static void perf_log_period(struct perf_counter *counter, u64 period); | ||
1292 | 1314 | ||
1293 | static void perf_adjust_period(struct perf_counter *counter, u64 events) | 1315 | static void perf_adjust_period(struct perf_counter *counter, u64 events) |
1294 | { | 1316 | { |
@@ -1307,8 +1329,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events) | |||
1307 | if (!sample_period) | 1329 | if (!sample_period) |
1308 | sample_period = 1; | 1330 | sample_period = 1; |
1309 | 1331 | ||
1310 | perf_log_period(counter, sample_period); | ||
1311 | |||
1312 | hwc->sample_period = sample_period; | 1332 | hwc->sample_period = sample_period; |
1313 | } | 1333 | } |
1314 | 1334 | ||
@@ -1463,10 +1483,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task) | |||
1463 | /* | 1483 | /* |
1464 | * Unclone this context if we enabled any counter. | 1484 | * Unclone this context if we enabled any counter. |
1465 | */ | 1485 | */ |
1466 | if (enabled && ctx->parent_ctx) { | 1486 | if (enabled) |
1467 | put_ctx(ctx->parent_ctx); | 1487 | unclone_ctx(ctx); |
1468 | ctx->parent_ctx = NULL; | ||
1469 | } | ||
1470 | 1488 | ||
1471 | spin_unlock(&ctx->lock); | 1489 | spin_unlock(&ctx->lock); |
1472 | 1490 | ||
@@ -1526,7 +1544,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx, | |||
1526 | 1544 | ||
1527 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | 1545 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) |
1528 | { | 1546 | { |
1529 | struct perf_counter_context *parent_ctx; | ||
1530 | struct perf_counter_context *ctx; | 1547 | struct perf_counter_context *ctx; |
1531 | struct perf_cpu_context *cpuctx; | 1548 | struct perf_cpu_context *cpuctx; |
1532 | struct task_struct *task; | 1549 | struct task_struct *task; |
@@ -1586,11 +1603,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | |||
1586 | retry: | 1603 | retry: |
1587 | ctx = perf_lock_task_context(task, &flags); | 1604 | ctx = perf_lock_task_context(task, &flags); |
1588 | if (ctx) { | 1605 | if (ctx) { |
1589 | parent_ctx = ctx->parent_ctx; | 1606 | unclone_ctx(ctx); |
1590 | if (parent_ctx) { | ||
1591 | put_ctx(parent_ctx); | ||
1592 | ctx->parent_ctx = NULL; /* no longer a clone */ | ||
1593 | } | ||
1594 | spin_unlock_irqrestore(&ctx->lock, flags); | 1607 | spin_unlock_irqrestore(&ctx->lock, flags); |
1595 | } | 1608 | } |
1596 | 1609 | ||
@@ -1642,6 +1655,8 @@ static void free_counter(struct perf_counter *counter) | |||
1642 | atomic_dec(&nr_mmap_counters); | 1655 | atomic_dec(&nr_mmap_counters); |
1643 | if (counter->attr.comm) | 1656 | if (counter->attr.comm) |
1644 | atomic_dec(&nr_comm_counters); | 1657 | atomic_dec(&nr_comm_counters); |
1658 | if (counter->attr.task) | ||
1659 | atomic_dec(&nr_task_counters); | ||
1645 | } | 1660 | } |
1646 | 1661 | ||
1647 | if (counter->destroy) | 1662 | if (counter->destroy) |
@@ -1676,6 +1691,18 @@ static int perf_release(struct inode *inode, struct file *file) | |||
1676 | return 0; | 1691 | return 0; |
1677 | } | 1692 | } |
1678 | 1693 | ||
1694 | static u64 perf_counter_read_tree(struct perf_counter *counter) | ||
1695 | { | ||
1696 | struct perf_counter *child; | ||
1697 | u64 total = 0; | ||
1698 | |||
1699 | total += perf_counter_read(counter); | ||
1700 | list_for_each_entry(child, &counter->child_list, child_list) | ||
1701 | total += perf_counter_read(child); | ||
1702 | |||
1703 | return total; | ||
1704 | } | ||
1705 | |||
1679 | /* | 1706 | /* |
1680 | * Read the performance counter - simple non blocking version for now | 1707 | * Read the performance counter - simple non blocking version for now |
1681 | */ | 1708 | */ |
@@ -1695,7 +1722,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1695 | 1722 | ||
1696 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 1723 | WARN_ON_ONCE(counter->ctx->parent_ctx); |
1697 | mutex_lock(&counter->child_mutex); | 1724 | mutex_lock(&counter->child_mutex); |
1698 | values[0] = perf_counter_read(counter); | 1725 | values[0] = perf_counter_read_tree(counter); |
1699 | n = 1; | 1726 | n = 1; |
1700 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 1727 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
1701 | values[n++] = counter->total_time_enabled + | 1728 | values[n++] = counter->total_time_enabled + |
@@ -1704,7 +1731,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1704 | values[n++] = counter->total_time_running + | 1731 | values[n++] = counter->total_time_running + |
1705 | atomic64_read(&counter->child_total_time_running); | 1732 | atomic64_read(&counter->child_total_time_running); |
1706 | if (counter->attr.read_format & PERF_FORMAT_ID) | 1733 | if (counter->attr.read_format & PERF_FORMAT_ID) |
1707 | values[n++] = counter->id; | 1734 | values[n++] = primary_counter_id(counter); |
1708 | mutex_unlock(&counter->child_mutex); | 1735 | mutex_unlock(&counter->child_mutex); |
1709 | 1736 | ||
1710 | if (count < n * sizeof(u64)) | 1737 | if (count < n * sizeof(u64)) |
@@ -1811,8 +1838,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | |||
1811 | 1838 | ||
1812 | counter->attr.sample_freq = value; | 1839 | counter->attr.sample_freq = value; |
1813 | } else { | 1840 | } else { |
1814 | perf_log_period(counter, value); | ||
1815 | |||
1816 | counter->attr.sample_period = value; | 1841 | counter->attr.sample_period = value; |
1817 | counter->hw.sample_period = value; | 1842 | counter->hw.sample_period = value; |
1818 | } | 1843 | } |
@@ -2020,7 +2045,7 @@ fail: | |||
2020 | 2045 | ||
2021 | static void perf_mmap_free_page(unsigned long addr) | 2046 | static void perf_mmap_free_page(unsigned long addr) |
2022 | { | 2047 | { |
2023 | struct page *page = virt_to_page(addr); | 2048 | struct page *page = virt_to_page((void *)addr); |
2024 | 2049 | ||
2025 | page->mapping = NULL; | 2050 | page->mapping = NULL; |
2026 | __free_page(page); | 2051 | __free_page(page); |
@@ -2621,6 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2621 | u64 counter; | 2646 | u64 counter; |
2622 | } group_entry; | 2647 | } group_entry; |
2623 | struct perf_callchain_entry *callchain = NULL; | 2648 | struct perf_callchain_entry *callchain = NULL; |
2649 | struct perf_tracepoint_record *tp; | ||
2624 | int callchain_size = 0; | 2650 | int callchain_size = 0; |
2625 | u64 time; | 2651 | u64 time; |
2626 | struct { | 2652 | struct { |
@@ -2661,10 +2687,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2661 | if (sample_type & PERF_SAMPLE_ID) | 2687 | if (sample_type & PERF_SAMPLE_ID) |
2662 | header.size += sizeof(u64); | 2688 | header.size += sizeof(u64); |
2663 | 2689 | ||
2690 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2691 | header.size += sizeof(u64); | ||
2692 | |||
2664 | if (sample_type & PERF_SAMPLE_CPU) { | 2693 | if (sample_type & PERF_SAMPLE_CPU) { |
2665 | header.size += sizeof(cpu_entry); | 2694 | header.size += sizeof(cpu_entry); |
2666 | 2695 | ||
2667 | cpu_entry.cpu = raw_smp_processor_id(); | 2696 | cpu_entry.cpu = raw_smp_processor_id(); |
2697 | cpu_entry.reserved = 0; | ||
2668 | } | 2698 | } |
2669 | 2699 | ||
2670 | if (sample_type & PERF_SAMPLE_PERIOD) | 2700 | if (sample_type & PERF_SAMPLE_PERIOD) |
@@ -2685,6 +2715,11 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2685 | header.size += sizeof(u64); | 2715 | header.size += sizeof(u64); |
2686 | } | 2716 | } |
2687 | 2717 | ||
2718 | if (sample_type & PERF_SAMPLE_TP_RECORD) { | ||
2719 | tp = data->private; | ||
2720 | header.size += tp->size; | ||
2721 | } | ||
2722 | |||
2688 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | 2723 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); |
2689 | if (ret) | 2724 | if (ret) |
2690 | return; | 2725 | return; |
@@ -2703,7 +2738,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2703 | if (sample_type & PERF_SAMPLE_ADDR) | 2738 | if (sample_type & PERF_SAMPLE_ADDR) |
2704 | perf_output_put(&handle, data->addr); | 2739 | perf_output_put(&handle, data->addr); |
2705 | 2740 | ||
2706 | if (sample_type & PERF_SAMPLE_ID) | 2741 | if (sample_type & PERF_SAMPLE_ID) { |
2742 | u64 id = primary_counter_id(counter); | ||
2743 | |||
2744 | perf_output_put(&handle, id); | ||
2745 | } | ||
2746 | |||
2747 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2707 | perf_output_put(&handle, counter->id); | 2748 | perf_output_put(&handle, counter->id); |
2708 | 2749 | ||
2709 | if (sample_type & PERF_SAMPLE_CPU) | 2750 | if (sample_type & PERF_SAMPLE_CPU) |
@@ -2726,7 +2767,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2726 | if (sub != counter) | 2767 | if (sub != counter) |
2727 | sub->pmu->read(sub); | 2768 | sub->pmu->read(sub); |
2728 | 2769 | ||
2729 | group_entry.id = sub->id; | 2770 | group_entry.id = primary_counter_id(sub); |
2730 | group_entry.counter = atomic64_read(&sub->count); | 2771 | group_entry.counter = atomic64_read(&sub->count); |
2731 | 2772 | ||
2732 | perf_output_put(&handle, group_entry); | 2773 | perf_output_put(&handle, group_entry); |
@@ -2742,6 +2783,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2742 | } | 2783 | } |
2743 | } | 2784 | } |
2744 | 2785 | ||
2786 | if (sample_type & PERF_SAMPLE_TP_RECORD) | ||
2787 | perf_output_copy(&handle, tp->record, tp->size); | ||
2788 | |||
2745 | perf_output_end(&handle); | 2789 | perf_output_end(&handle); |
2746 | } | 2790 | } |
2747 | 2791 | ||
@@ -2786,15 +2830,8 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2786 | } | 2830 | } |
2787 | 2831 | ||
2788 | if (counter->attr.read_format & PERF_FORMAT_ID) { | 2832 | if (counter->attr.read_format & PERF_FORMAT_ID) { |
2789 | u64 id; | ||
2790 | |||
2791 | event.header.size += sizeof(u64); | 2833 | event.header.size += sizeof(u64); |
2792 | if (counter->parent) | 2834 | event.format[i++] = primary_counter_id(counter); |
2793 | id = counter->parent->id; | ||
2794 | else | ||
2795 | id = counter->id; | ||
2796 | |||
2797 | event.format[i++] = id; | ||
2798 | } | 2835 | } |
2799 | 2836 | ||
2800 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 2837 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); |
@@ -2806,10 +2843,12 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2806 | } | 2843 | } |
2807 | 2844 | ||
2808 | /* | 2845 | /* |
2809 | * fork tracking | 2846 | * task tracking -- fork/exit |
2847 | * | ||
2848 | * enabled by: attr.comm | attr.mmap | attr.task | ||
2810 | */ | 2849 | */ |
2811 | 2850 | ||
2812 | struct perf_fork_event { | 2851 | struct perf_task_event { |
2813 | struct task_struct *task; | 2852 | struct task_struct *task; |
2814 | 2853 | ||
2815 | struct { | 2854 | struct { |
@@ -2817,37 +2856,42 @@ struct perf_fork_event { | |||
2817 | 2856 | ||
2818 | u32 pid; | 2857 | u32 pid; |
2819 | u32 ppid; | 2858 | u32 ppid; |
2859 | u32 tid; | ||
2860 | u32 ptid; | ||
2820 | } event; | 2861 | } event; |
2821 | }; | 2862 | }; |
2822 | 2863 | ||
2823 | static void perf_counter_fork_output(struct perf_counter *counter, | 2864 | static void perf_counter_task_output(struct perf_counter *counter, |
2824 | struct perf_fork_event *fork_event) | 2865 | struct perf_task_event *task_event) |
2825 | { | 2866 | { |
2826 | struct perf_output_handle handle; | 2867 | struct perf_output_handle handle; |
2827 | int size = fork_event->event.header.size; | 2868 | int size = task_event->event.header.size; |
2828 | struct task_struct *task = fork_event->task; | 2869 | struct task_struct *task = task_event->task; |
2829 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | 2870 | int ret = perf_output_begin(&handle, counter, size, 0, 0); |
2830 | 2871 | ||
2831 | if (ret) | 2872 | if (ret) |
2832 | return; | 2873 | return; |
2833 | 2874 | ||
2834 | fork_event->event.pid = perf_counter_pid(counter, task); | 2875 | task_event->event.pid = perf_counter_pid(counter, task); |
2835 | fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); | 2876 | task_event->event.ppid = perf_counter_pid(counter, task->real_parent); |
2877 | |||
2878 | task_event->event.tid = perf_counter_tid(counter, task); | ||
2879 | task_event->event.ptid = perf_counter_tid(counter, task->real_parent); | ||
2836 | 2880 | ||
2837 | perf_output_put(&handle, fork_event->event); | 2881 | perf_output_put(&handle, task_event->event); |
2838 | perf_output_end(&handle); | 2882 | perf_output_end(&handle); |
2839 | } | 2883 | } |
2840 | 2884 | ||
2841 | static int perf_counter_fork_match(struct perf_counter *counter) | 2885 | static int perf_counter_task_match(struct perf_counter *counter) |
2842 | { | 2886 | { |
2843 | if (counter->attr.comm || counter->attr.mmap) | 2887 | if (counter->attr.comm || counter->attr.mmap || counter->attr.task) |
2844 | return 1; | 2888 | return 1; |
2845 | 2889 | ||
2846 | return 0; | 2890 | return 0; |
2847 | } | 2891 | } |
2848 | 2892 | ||
2849 | static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | 2893 | static void perf_counter_task_ctx(struct perf_counter_context *ctx, |
2850 | struct perf_fork_event *fork_event) | 2894 | struct perf_task_event *task_event) |
2851 | { | 2895 | { |
2852 | struct perf_counter *counter; | 2896 | struct perf_counter *counter; |
2853 | 2897 | ||
@@ -2856,19 +2900,19 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | |||
2856 | 2900 | ||
2857 | rcu_read_lock(); | 2901 | rcu_read_lock(); |
2858 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 2902 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
2859 | if (perf_counter_fork_match(counter)) | 2903 | if (perf_counter_task_match(counter)) |
2860 | perf_counter_fork_output(counter, fork_event); | 2904 | perf_counter_task_output(counter, task_event); |
2861 | } | 2905 | } |
2862 | rcu_read_unlock(); | 2906 | rcu_read_unlock(); |
2863 | } | 2907 | } |
2864 | 2908 | ||
2865 | static void perf_counter_fork_event(struct perf_fork_event *fork_event) | 2909 | static void perf_counter_task_event(struct perf_task_event *task_event) |
2866 | { | 2910 | { |
2867 | struct perf_cpu_context *cpuctx; | 2911 | struct perf_cpu_context *cpuctx; |
2868 | struct perf_counter_context *ctx; | 2912 | struct perf_counter_context *ctx; |
2869 | 2913 | ||
2870 | cpuctx = &get_cpu_var(perf_cpu_context); | 2914 | cpuctx = &get_cpu_var(perf_cpu_context); |
2871 | perf_counter_fork_ctx(&cpuctx->ctx, fork_event); | 2915 | perf_counter_task_ctx(&cpuctx->ctx, task_event); |
2872 | put_cpu_var(perf_cpu_context); | 2916 | put_cpu_var(perf_cpu_context); |
2873 | 2917 | ||
2874 | rcu_read_lock(); | 2918 | rcu_read_lock(); |
@@ -2878,29 +2922,40 @@ static void perf_counter_fork_event(struct perf_fork_event *fork_event) | |||
2878 | */ | 2922 | */ |
2879 | ctx = rcu_dereference(current->perf_counter_ctxp); | 2923 | ctx = rcu_dereference(current->perf_counter_ctxp); |
2880 | if (ctx) | 2924 | if (ctx) |
2881 | perf_counter_fork_ctx(ctx, fork_event); | 2925 | perf_counter_task_ctx(ctx, task_event); |
2882 | rcu_read_unlock(); | 2926 | rcu_read_unlock(); |
2883 | } | 2927 | } |
2884 | 2928 | ||
2885 | void perf_counter_fork(struct task_struct *task) | 2929 | static void perf_counter_task(struct task_struct *task, int new) |
2886 | { | 2930 | { |
2887 | struct perf_fork_event fork_event; | 2931 | struct perf_task_event task_event; |
2888 | 2932 | ||
2889 | if (!atomic_read(&nr_comm_counters) && | 2933 | if (!atomic_read(&nr_comm_counters) && |
2890 | !atomic_read(&nr_mmap_counters)) | 2934 | !atomic_read(&nr_mmap_counters) && |
2935 | !atomic_read(&nr_task_counters)) | ||
2891 | return; | 2936 | return; |
2892 | 2937 | ||
2893 | fork_event = (struct perf_fork_event){ | 2938 | task_event = (struct perf_task_event){ |
2894 | .task = task, | 2939 | .task = task, |
2895 | .event = { | 2940 | .event = { |
2896 | .header = { | 2941 | .header = { |
2897 | .type = PERF_EVENT_FORK, | 2942 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, |
2898 | .size = sizeof(fork_event.event), | 2943 | .misc = 0, |
2944 | .size = sizeof(task_event.event), | ||
2899 | }, | 2945 | }, |
2946 | /* .pid */ | ||
2947 | /* .ppid */ | ||
2948 | /* .tid */ | ||
2949 | /* .ptid */ | ||
2900 | }, | 2950 | }, |
2901 | }; | 2951 | }; |
2902 | 2952 | ||
2903 | perf_counter_fork_event(&fork_event); | 2953 | perf_counter_task_event(&task_event); |
2954 | } | ||
2955 | |||
2956 | void perf_counter_fork(struct task_struct *task) | ||
2957 | { | ||
2958 | perf_counter_task(task, 1); | ||
2904 | } | 2959 | } |
2905 | 2960 | ||
2906 | /* | 2961 | /* |
@@ -2968,8 +3023,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) | |||
2968 | struct perf_cpu_context *cpuctx; | 3023 | struct perf_cpu_context *cpuctx; |
2969 | struct perf_counter_context *ctx; | 3024 | struct perf_counter_context *ctx; |
2970 | unsigned int size; | 3025 | unsigned int size; |
2971 | char *comm = comm_event->task->comm; | 3026 | char comm[TASK_COMM_LEN]; |
2972 | 3027 | ||
3028 | memset(comm, 0, sizeof(comm)); | ||
3029 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | ||
2973 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | 3030 | size = ALIGN(strlen(comm)+1, sizeof(u64)); |
2974 | 3031 | ||
2975 | comm_event->comm = comm; | 3032 | comm_event->comm = comm; |
@@ -3004,8 +3061,16 @@ void perf_counter_comm(struct task_struct *task) | |||
3004 | 3061 | ||
3005 | comm_event = (struct perf_comm_event){ | 3062 | comm_event = (struct perf_comm_event){ |
3006 | .task = task, | 3063 | .task = task, |
3064 | /* .comm */ | ||
3065 | /* .comm_size */ | ||
3007 | .event = { | 3066 | .event = { |
3008 | .header = { .type = PERF_EVENT_COMM, }, | 3067 | .header = { |
3068 | .type = PERF_EVENT_COMM, | ||
3069 | .misc = 0, | ||
3070 | /* .size */ | ||
3071 | }, | ||
3072 | /* .pid */ | ||
3073 | /* .tid */ | ||
3009 | }, | 3074 | }, |
3010 | }; | 3075 | }; |
3011 | 3076 | ||
@@ -3088,8 +3153,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | |||
3088 | char *buf = NULL; | 3153 | char *buf = NULL; |
3089 | const char *name; | 3154 | const char *name; |
3090 | 3155 | ||
3156 | memset(tmp, 0, sizeof(tmp)); | ||
3157 | |||
3091 | if (file) { | 3158 | if (file) { |
3092 | buf = kzalloc(PATH_MAX, GFP_KERNEL); | 3159 | /* |
3160 | * d_path works from the end of the buffer backwards, so we | ||
3161 | * need to add enough zero bytes after the string to handle | ||
3162 | * the 64bit alignment we do later. | ||
3163 | */ | ||
3164 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | ||
3093 | if (!buf) { | 3165 | if (!buf) { |
3094 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | 3166 | name = strncpy(tmp, "//enomem", sizeof(tmp)); |
3095 | goto got_name; | 3167 | goto got_name; |
@@ -3100,9 +3172,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | |||
3100 | goto got_name; | 3172 | goto got_name; |
3101 | } | 3173 | } |
3102 | } else { | 3174 | } else { |
3103 | name = arch_vma_name(mmap_event->vma); | 3175 | if (arch_vma_name(mmap_event->vma)) { |
3104 | if (name) | 3176 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), |
3177 | sizeof(tmp)); | ||
3105 | goto got_name; | 3178 | goto got_name; |
3179 | } | ||
3106 | 3180 | ||
3107 | if (!vma->vm_mm) { | 3181 | if (!vma->vm_mm) { |
3108 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 3182 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); |
@@ -3147,8 +3221,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3147 | 3221 | ||
3148 | mmap_event = (struct perf_mmap_event){ | 3222 | mmap_event = (struct perf_mmap_event){ |
3149 | .vma = vma, | 3223 | .vma = vma, |
3224 | /* .file_name */ | ||
3225 | /* .file_size */ | ||
3150 | .event = { | 3226 | .event = { |
3151 | .header = { .type = PERF_EVENT_MMAP, }, | 3227 | .header = { |
3228 | .type = PERF_EVENT_MMAP, | ||
3229 | .misc = 0, | ||
3230 | /* .size */ | ||
3231 | }, | ||
3232 | /* .pid */ | ||
3233 | /* .tid */ | ||
3152 | .start = vma->vm_start, | 3234 | .start = vma->vm_start, |
3153 | .len = vma->vm_end - vma->vm_start, | 3235 | .len = vma->vm_end - vma->vm_start, |
3154 | .pgoff = vma->vm_pgoff, | 3236 | .pgoff = vma->vm_pgoff, |
@@ -3159,49 +3241,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3159 | } | 3241 | } |
3160 | 3242 | ||
3161 | /* | 3243 | /* |
3162 | * Log sample_period changes so that analyzing tools can re-normalize the | ||
3163 | * event flow. | ||
3164 | */ | ||
3165 | |||
3166 | struct freq_event { | ||
3167 | struct perf_event_header header; | ||
3168 | u64 time; | ||
3169 | u64 id; | ||
3170 | u64 period; | ||
3171 | }; | ||
3172 | |||
3173 | static void perf_log_period(struct perf_counter *counter, u64 period) | ||
3174 | { | ||
3175 | struct perf_output_handle handle; | ||
3176 | struct freq_event event; | ||
3177 | int ret; | ||
3178 | |||
3179 | if (counter->hw.sample_period == period) | ||
3180 | return; | ||
3181 | |||
3182 | if (counter->attr.sample_type & PERF_SAMPLE_PERIOD) | ||
3183 | return; | ||
3184 | |||
3185 | event = (struct freq_event) { | ||
3186 | .header = { | ||
3187 | .type = PERF_EVENT_PERIOD, | ||
3188 | .misc = 0, | ||
3189 | .size = sizeof(event), | ||
3190 | }, | ||
3191 | .time = sched_clock(), | ||
3192 | .id = counter->id, | ||
3193 | .period = period, | ||
3194 | }; | ||
3195 | |||
3196 | ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0); | ||
3197 | if (ret) | ||
3198 | return; | ||
3199 | |||
3200 | perf_output_put(&handle, event); | ||
3201 | perf_output_end(&handle); | ||
3202 | } | ||
3203 | |||
3204 | /* | ||
3205 | * IRQ throttle logging | 3244 | * IRQ throttle logging |
3206 | */ | 3245 | */ |
3207 | 3246 | ||
@@ -3214,16 +3253,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) | |||
3214 | struct perf_event_header header; | 3253 | struct perf_event_header header; |
3215 | u64 time; | 3254 | u64 time; |
3216 | u64 id; | 3255 | u64 id; |
3256 | u64 stream_id; | ||
3217 | } throttle_event = { | 3257 | } throttle_event = { |
3218 | .header = { | 3258 | .header = { |
3219 | .type = PERF_EVENT_THROTTLE + 1, | 3259 | .type = PERF_EVENT_THROTTLE, |
3220 | .misc = 0, | 3260 | .misc = 0, |
3221 | .size = sizeof(throttle_event), | 3261 | .size = sizeof(throttle_event), |
3222 | }, | 3262 | }, |
3223 | .time = sched_clock(), | 3263 | .time = sched_clock(), |
3224 | .id = counter->id, | 3264 | .id = primary_counter_id(counter), |
3265 | .stream_id = counter->id, | ||
3225 | }; | 3266 | }; |
3226 | 3267 | ||
3268 | if (enable) | ||
3269 | throttle_event.header.type = PERF_EVENT_UNTHROTTLE; | ||
3270 | |||
3227 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); | 3271 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); |
3228 | if (ret) | 3272 | if (ret) |
3229 | return; | 3273 | return; |
@@ -3668,17 +3712,24 @@ static const struct pmu perf_ops_task_clock = { | |||
3668 | }; | 3712 | }; |
3669 | 3713 | ||
3670 | #ifdef CONFIG_EVENT_PROFILE | 3714 | #ifdef CONFIG_EVENT_PROFILE |
3671 | void perf_tpcounter_event(int event_id) | 3715 | void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, |
3716 | int entry_size) | ||
3672 | { | 3717 | { |
3718 | struct perf_tracepoint_record tp = { | ||
3719 | .size = entry_size, | ||
3720 | .record = record, | ||
3721 | }; | ||
3722 | |||
3673 | struct perf_sample_data data = { | 3723 | struct perf_sample_data data = { |
3674 | .regs = get_irq_regs(); | 3724 | .regs = get_irq_regs(), |
3675 | .addr = 0, | 3725 | .addr = addr, |
3726 | .private = &tp, | ||
3676 | }; | 3727 | }; |
3677 | 3728 | ||
3678 | if (!data.regs) | 3729 | if (!data.regs) |
3679 | data.regs = task_pt_regs(current); | 3730 | data.regs = task_pt_regs(current); |
3680 | 3731 | ||
3681 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); | 3732 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); |
3682 | } | 3733 | } |
3683 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 3734 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); |
3684 | 3735 | ||
@@ -3687,16 +3738,12 @@ extern void ftrace_profile_disable(int); | |||
3687 | 3738 | ||
3688 | static void tp_perf_counter_destroy(struct perf_counter *counter) | 3739 | static void tp_perf_counter_destroy(struct perf_counter *counter) |
3689 | { | 3740 | { |
3690 | ftrace_profile_disable(perf_event_id(&counter->attr)); | 3741 | ftrace_profile_disable(counter->attr.config); |
3691 | } | 3742 | } |
3692 | 3743 | ||
3693 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 3744 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
3694 | { | 3745 | { |
3695 | int event_id = perf_event_id(&counter->attr); | 3746 | if (ftrace_profile_enable(counter->attr.config)) |
3696 | int ret; | ||
3697 | |||
3698 | ret = ftrace_profile_enable(event_id); | ||
3699 | if (ret) | ||
3700 | return NULL; | 3747 | return NULL; |
3701 | 3748 | ||
3702 | counter->destroy = tp_perf_counter_destroy; | 3749 | counter->destroy = tp_perf_counter_destroy; |
@@ -3874,6 +3921,8 @@ done: | |||
3874 | atomic_inc(&nr_mmap_counters); | 3921 | atomic_inc(&nr_mmap_counters); |
3875 | if (counter->attr.comm) | 3922 | if (counter->attr.comm) |
3876 | atomic_inc(&nr_comm_counters); | 3923 | atomic_inc(&nr_comm_counters); |
3924 | if (counter->attr.task) | ||
3925 | atomic_inc(&nr_task_counters); | ||
3877 | } | 3926 | } |
3878 | 3927 | ||
3879 | return counter; | 3928 | return counter; |
@@ -4235,8 +4284,10 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4235 | struct perf_counter_context *child_ctx; | 4284 | struct perf_counter_context *child_ctx; |
4236 | unsigned long flags; | 4285 | unsigned long flags; |
4237 | 4286 | ||
4238 | if (likely(!child->perf_counter_ctxp)) | 4287 | if (likely(!child->perf_counter_ctxp)) { |
4288 | perf_counter_task(child, 0); | ||
4239 | return; | 4289 | return; |
4290 | } | ||
4240 | 4291 | ||
4241 | local_irq_save(flags); | 4292 | local_irq_save(flags); |
4242 | /* | 4293 | /* |
@@ -4254,18 +4305,22 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4254 | * incremented the context's refcount before we do put_ctx below. | 4305 | * incremented the context's refcount before we do put_ctx below. |
4255 | */ | 4306 | */ |
4256 | spin_lock(&child_ctx->lock); | 4307 | spin_lock(&child_ctx->lock); |
4308 | /* | ||
4309 | * If this context is a clone; unclone it so it can't get | ||
4310 | * swapped to another process while we're removing all | ||
4311 | * the counters from it. | ||
4312 | */ | ||
4313 | unclone_ctx(child_ctx); | ||
4314 | spin_unlock_irqrestore(&child_ctx->lock, flags); | ||
4315 | |||
4316 | /* | ||
4317 | * Report the task dead after unscheduling the counters so that we | ||
4318 | * won't get any samples after PERF_EVENT_EXIT. We can however still | ||
4319 | * get a few PERF_EVENT_READ events. | ||
4320 | */ | ||
4321 | perf_counter_task(child, 0); | ||
4322 | |||
4257 | child->perf_counter_ctxp = NULL; | 4323 | child->perf_counter_ctxp = NULL; |
4258 | if (child_ctx->parent_ctx) { | ||
4259 | /* | ||
4260 | * This context is a clone; unclone it so it can't get | ||
4261 | * swapped to another process while we're removing all | ||
4262 | * the counters from it. | ||
4263 | */ | ||
4264 | put_ctx(child_ctx->parent_ctx); | ||
4265 | child_ctx->parent_ctx = NULL; | ||
4266 | } | ||
4267 | spin_unlock(&child_ctx->lock); | ||
4268 | local_irq_restore(flags); | ||
4269 | 4324 | ||
4270 | /* | 4325 | /* |
4271 | * We can recurse on the same lock type through: | 4326 | * We can recurse on the same lock type through: |