diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 285 |
1 files changed, 162 insertions, 123 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a641eb753b8c..199ed4771315 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1; | |||
42 | static atomic_t nr_counters __read_mostly; | 42 | static atomic_t nr_counters __read_mostly; |
43 | static atomic_t nr_mmap_counters __read_mostly; | 43 | static atomic_t nr_mmap_counters __read_mostly; |
44 | static atomic_t nr_comm_counters __read_mostly; | 44 | static atomic_t nr_comm_counters __read_mostly; |
45 | static atomic_t nr_task_counters __read_mostly; | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * perf counter paranoia level: | 48 | * perf counter paranoia level: |
@@ -146,6 +147,28 @@ static void put_ctx(struct perf_counter_context *ctx) | |||
146 | } | 147 | } |
147 | } | 148 | } |
148 | 149 | ||
150 | static void unclone_ctx(struct perf_counter_context *ctx) | ||
151 | { | ||
152 | if (ctx->parent_ctx) { | ||
153 | put_ctx(ctx->parent_ctx); | ||
154 | ctx->parent_ctx = NULL; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * If we inherit counters we want to return the parent counter id | ||
160 | * to userspace. | ||
161 | */ | ||
162 | static u64 primary_counter_id(struct perf_counter *counter) | ||
163 | { | ||
164 | u64 id = counter->id; | ||
165 | |||
166 | if (counter->parent) | ||
167 | id = counter->parent->id; | ||
168 | |||
169 | return id; | ||
170 | } | ||
171 | |||
149 | /* | 172 | /* |
150 | * Get the perf_counter_context for a task and lock it. | 173 | * Get the perf_counter_context for a task and lock it. |
151 | * This has to cope with with the fact that until it is locked, | 174 | * This has to cope with with the fact that until it is locked, |
@@ -1288,7 +1311,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | |||
1288 | #define MAX_INTERRUPTS (~0ULL) | 1311 | #define MAX_INTERRUPTS (~0ULL) |
1289 | 1312 | ||
1290 | static void perf_log_throttle(struct perf_counter *counter, int enable); | 1313 | static void perf_log_throttle(struct perf_counter *counter, int enable); |
1291 | static void perf_log_period(struct perf_counter *counter, u64 period); | ||
1292 | 1314 | ||
1293 | static void perf_adjust_period(struct perf_counter *counter, u64 events) | 1315 | static void perf_adjust_period(struct perf_counter *counter, u64 events) |
1294 | { | 1316 | { |
@@ -1307,8 +1329,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events) | |||
1307 | if (!sample_period) | 1329 | if (!sample_period) |
1308 | sample_period = 1; | 1330 | sample_period = 1; |
1309 | 1331 | ||
1310 | perf_log_period(counter, sample_period); | ||
1311 | |||
1312 | hwc->sample_period = sample_period; | 1332 | hwc->sample_period = sample_period; |
1313 | } | 1333 | } |
1314 | 1334 | ||
@@ -1463,10 +1483,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task) | |||
1463 | /* | 1483 | /* |
1464 | * Unclone this context if we enabled any counter. | 1484 | * Unclone this context if we enabled any counter. |
1465 | */ | 1485 | */ |
1466 | if (enabled && ctx->parent_ctx) { | 1486 | if (enabled) |
1467 | put_ctx(ctx->parent_ctx); | 1487 | unclone_ctx(ctx); |
1468 | ctx->parent_ctx = NULL; | ||
1469 | } | ||
1470 | 1488 | ||
1471 | spin_unlock(&ctx->lock); | 1489 | spin_unlock(&ctx->lock); |
1472 | 1490 | ||
@@ -1526,7 +1544,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx, | |||
1526 | 1544 | ||
1527 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | 1545 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) |
1528 | { | 1546 | { |
1529 | struct perf_counter_context *parent_ctx; | ||
1530 | struct perf_counter_context *ctx; | 1547 | struct perf_counter_context *ctx; |
1531 | struct perf_cpu_context *cpuctx; | 1548 | struct perf_cpu_context *cpuctx; |
1532 | struct task_struct *task; | 1549 | struct task_struct *task; |
@@ -1586,11 +1603,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | |||
1586 | retry: | 1603 | retry: |
1587 | ctx = perf_lock_task_context(task, &flags); | 1604 | ctx = perf_lock_task_context(task, &flags); |
1588 | if (ctx) { | 1605 | if (ctx) { |
1589 | parent_ctx = ctx->parent_ctx; | 1606 | unclone_ctx(ctx); |
1590 | if (parent_ctx) { | ||
1591 | put_ctx(parent_ctx); | ||
1592 | ctx->parent_ctx = NULL; /* no longer a clone */ | ||
1593 | } | ||
1594 | spin_unlock_irqrestore(&ctx->lock, flags); | 1607 | spin_unlock_irqrestore(&ctx->lock, flags); |
1595 | } | 1608 | } |
1596 | 1609 | ||
@@ -1642,6 +1655,8 @@ static void free_counter(struct perf_counter *counter) | |||
1642 | atomic_dec(&nr_mmap_counters); | 1655 | atomic_dec(&nr_mmap_counters); |
1643 | if (counter->attr.comm) | 1656 | if (counter->attr.comm) |
1644 | atomic_dec(&nr_comm_counters); | 1657 | atomic_dec(&nr_comm_counters); |
1658 | if (counter->attr.task) | ||
1659 | atomic_dec(&nr_task_counters); | ||
1645 | } | 1660 | } |
1646 | 1661 | ||
1647 | if (counter->destroy) | 1662 | if (counter->destroy) |
@@ -1676,6 +1691,18 @@ static int perf_release(struct inode *inode, struct file *file) | |||
1676 | return 0; | 1691 | return 0; |
1677 | } | 1692 | } |
1678 | 1693 | ||
1694 | static u64 perf_counter_read_tree(struct perf_counter *counter) | ||
1695 | { | ||
1696 | struct perf_counter *child; | ||
1697 | u64 total = 0; | ||
1698 | |||
1699 | total += perf_counter_read(counter); | ||
1700 | list_for_each_entry(child, &counter->child_list, child_list) | ||
1701 | total += perf_counter_read(child); | ||
1702 | |||
1703 | return total; | ||
1704 | } | ||
1705 | |||
1679 | /* | 1706 | /* |
1680 | * Read the performance counter - simple non blocking version for now | 1707 | * Read the performance counter - simple non blocking version for now |
1681 | */ | 1708 | */ |
@@ -1695,7 +1722,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1695 | 1722 | ||
1696 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 1723 | WARN_ON_ONCE(counter->ctx->parent_ctx); |
1697 | mutex_lock(&counter->child_mutex); | 1724 | mutex_lock(&counter->child_mutex); |
1698 | values[0] = perf_counter_read(counter); | 1725 | values[0] = perf_counter_read_tree(counter); |
1699 | n = 1; | 1726 | n = 1; |
1700 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 1727 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
1701 | values[n++] = counter->total_time_enabled + | 1728 | values[n++] = counter->total_time_enabled + |
@@ -1704,7 +1731,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1704 | values[n++] = counter->total_time_running + | 1731 | values[n++] = counter->total_time_running + |
1705 | atomic64_read(&counter->child_total_time_running); | 1732 | atomic64_read(&counter->child_total_time_running); |
1706 | if (counter->attr.read_format & PERF_FORMAT_ID) | 1733 | if (counter->attr.read_format & PERF_FORMAT_ID) |
1707 | values[n++] = counter->id; | 1734 | values[n++] = primary_counter_id(counter); |
1708 | mutex_unlock(&counter->child_mutex); | 1735 | mutex_unlock(&counter->child_mutex); |
1709 | 1736 | ||
1710 | if (count < n * sizeof(u64)) | 1737 | if (count < n * sizeof(u64)) |
@@ -1811,8 +1838,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | |||
1811 | 1838 | ||
1812 | counter->attr.sample_freq = value; | 1839 | counter->attr.sample_freq = value; |
1813 | } else { | 1840 | } else { |
1814 | perf_log_period(counter, value); | ||
1815 | |||
1816 | counter->attr.sample_period = value; | 1841 | counter->attr.sample_period = value; |
1817 | counter->hw.sample_period = value; | 1842 | counter->hw.sample_period = value; |
1818 | } | 1843 | } |
@@ -2661,10 +2686,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2661 | if (sample_type & PERF_SAMPLE_ID) | 2686 | if (sample_type & PERF_SAMPLE_ID) |
2662 | header.size += sizeof(u64); | 2687 | header.size += sizeof(u64); |
2663 | 2688 | ||
2689 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2690 | header.size += sizeof(u64); | ||
2691 | |||
2664 | if (sample_type & PERF_SAMPLE_CPU) { | 2692 | if (sample_type & PERF_SAMPLE_CPU) { |
2665 | header.size += sizeof(cpu_entry); | 2693 | header.size += sizeof(cpu_entry); |
2666 | 2694 | ||
2667 | cpu_entry.cpu = raw_smp_processor_id(); | 2695 | cpu_entry.cpu = raw_smp_processor_id(); |
2696 | cpu_entry.reserved = 0; | ||
2668 | } | 2697 | } |
2669 | 2698 | ||
2670 | if (sample_type & PERF_SAMPLE_PERIOD) | 2699 | if (sample_type & PERF_SAMPLE_PERIOD) |
@@ -2703,7 +2732,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2703 | if (sample_type & PERF_SAMPLE_ADDR) | 2732 | if (sample_type & PERF_SAMPLE_ADDR) |
2704 | perf_output_put(&handle, data->addr); | 2733 | perf_output_put(&handle, data->addr); |
2705 | 2734 | ||
2706 | if (sample_type & PERF_SAMPLE_ID) | 2735 | if (sample_type & PERF_SAMPLE_ID) { |
2736 | u64 id = primary_counter_id(counter); | ||
2737 | |||
2738 | perf_output_put(&handle, id); | ||
2739 | } | ||
2740 | |||
2741 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2707 | perf_output_put(&handle, counter->id); | 2742 | perf_output_put(&handle, counter->id); |
2708 | 2743 | ||
2709 | if (sample_type & PERF_SAMPLE_CPU) | 2744 | if (sample_type & PERF_SAMPLE_CPU) |
@@ -2726,7 +2761,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2726 | if (sub != counter) | 2761 | if (sub != counter) |
2727 | sub->pmu->read(sub); | 2762 | sub->pmu->read(sub); |
2728 | 2763 | ||
2729 | group_entry.id = sub->id; | 2764 | group_entry.id = primary_counter_id(sub); |
2730 | group_entry.counter = atomic64_read(&sub->count); | 2765 | group_entry.counter = atomic64_read(&sub->count); |
2731 | 2766 | ||
2732 | perf_output_put(&handle, group_entry); | 2767 | perf_output_put(&handle, group_entry); |
@@ -2786,15 +2821,8 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2786 | } | 2821 | } |
2787 | 2822 | ||
2788 | if (counter->attr.read_format & PERF_FORMAT_ID) { | 2823 | if (counter->attr.read_format & PERF_FORMAT_ID) { |
2789 | u64 id; | ||
2790 | |||
2791 | event.header.size += sizeof(u64); | 2824 | event.header.size += sizeof(u64); |
2792 | if (counter->parent) | 2825 | event.format[i++] = primary_counter_id(counter); |
2793 | id = counter->parent->id; | ||
2794 | else | ||
2795 | id = counter->id; | ||
2796 | |||
2797 | event.format[i++] = id; | ||
2798 | } | 2826 | } |
2799 | 2827 | ||
2800 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 2828 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); |
@@ -2806,10 +2834,12 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2806 | } | 2834 | } |
2807 | 2835 | ||
2808 | /* | 2836 | /* |
2809 | * fork tracking | 2837 | * task tracking -- fork/exit |
2838 | * | ||
2839 | * enabled by: attr.comm | attr.mmap | attr.task | ||
2810 | */ | 2840 | */ |
2811 | 2841 | ||
2812 | struct perf_fork_event { | 2842 | struct perf_task_event { |
2813 | struct task_struct *task; | 2843 | struct task_struct *task; |
2814 | 2844 | ||
2815 | struct { | 2845 | struct { |
@@ -2817,37 +2847,42 @@ struct perf_fork_event { | |||
2817 | 2847 | ||
2818 | u32 pid; | 2848 | u32 pid; |
2819 | u32 ppid; | 2849 | u32 ppid; |
2850 | u32 tid; | ||
2851 | u32 ptid; | ||
2820 | } event; | 2852 | } event; |
2821 | }; | 2853 | }; |
2822 | 2854 | ||
2823 | static void perf_counter_fork_output(struct perf_counter *counter, | 2855 | static void perf_counter_task_output(struct perf_counter *counter, |
2824 | struct perf_fork_event *fork_event) | 2856 | struct perf_task_event *task_event) |
2825 | { | 2857 | { |
2826 | struct perf_output_handle handle; | 2858 | struct perf_output_handle handle; |
2827 | int size = fork_event->event.header.size; | 2859 | int size = task_event->event.header.size; |
2828 | struct task_struct *task = fork_event->task; | 2860 | struct task_struct *task = task_event->task; |
2829 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | 2861 | int ret = perf_output_begin(&handle, counter, size, 0, 0); |
2830 | 2862 | ||
2831 | if (ret) | 2863 | if (ret) |
2832 | return; | 2864 | return; |
2833 | 2865 | ||
2834 | fork_event->event.pid = perf_counter_pid(counter, task); | 2866 | task_event->event.pid = perf_counter_pid(counter, task); |
2835 | fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); | 2867 | task_event->event.ppid = perf_counter_pid(counter, task->real_parent); |
2836 | 2868 | ||
2837 | perf_output_put(&handle, fork_event->event); | 2869 | task_event->event.tid = perf_counter_tid(counter, task); |
2870 | task_event->event.ptid = perf_counter_tid(counter, task->real_parent); | ||
2871 | |||
2872 | perf_output_put(&handle, task_event->event); | ||
2838 | perf_output_end(&handle); | 2873 | perf_output_end(&handle); |
2839 | } | 2874 | } |
2840 | 2875 | ||
2841 | static int perf_counter_fork_match(struct perf_counter *counter) | 2876 | static int perf_counter_task_match(struct perf_counter *counter) |
2842 | { | 2877 | { |
2843 | if (counter->attr.comm || counter->attr.mmap) | 2878 | if (counter->attr.comm || counter->attr.mmap || counter->attr.task) |
2844 | return 1; | 2879 | return 1; |
2845 | 2880 | ||
2846 | return 0; | 2881 | return 0; |
2847 | } | 2882 | } |
2848 | 2883 | ||
2849 | static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | 2884 | static void perf_counter_task_ctx(struct perf_counter_context *ctx, |
2850 | struct perf_fork_event *fork_event) | 2885 | struct perf_task_event *task_event) |
2851 | { | 2886 | { |
2852 | struct perf_counter *counter; | 2887 | struct perf_counter *counter; |
2853 | 2888 | ||
@@ -2856,19 +2891,19 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | |||
2856 | 2891 | ||
2857 | rcu_read_lock(); | 2892 | rcu_read_lock(); |
2858 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 2893 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
2859 | if (perf_counter_fork_match(counter)) | 2894 | if (perf_counter_task_match(counter)) |
2860 | perf_counter_fork_output(counter, fork_event); | 2895 | perf_counter_task_output(counter, task_event); |
2861 | } | 2896 | } |
2862 | rcu_read_unlock(); | 2897 | rcu_read_unlock(); |
2863 | } | 2898 | } |
2864 | 2899 | ||
2865 | static void perf_counter_fork_event(struct perf_fork_event *fork_event) | 2900 | static void perf_counter_task_event(struct perf_task_event *task_event) |
2866 | { | 2901 | { |
2867 | struct perf_cpu_context *cpuctx; | 2902 | struct perf_cpu_context *cpuctx; |
2868 | struct perf_counter_context *ctx; | 2903 | struct perf_counter_context *ctx; |
2869 | 2904 | ||
2870 | cpuctx = &get_cpu_var(perf_cpu_context); | 2905 | cpuctx = &get_cpu_var(perf_cpu_context); |
2871 | perf_counter_fork_ctx(&cpuctx->ctx, fork_event); | 2906 | perf_counter_task_ctx(&cpuctx->ctx, task_event); |
2872 | put_cpu_var(perf_cpu_context); | 2907 | put_cpu_var(perf_cpu_context); |
2873 | 2908 | ||
2874 | rcu_read_lock(); | 2909 | rcu_read_lock(); |
@@ -2878,29 +2913,40 @@ static void perf_counter_fork_event(struct perf_fork_event *fork_event) | |||
2878 | */ | 2913 | */ |
2879 | ctx = rcu_dereference(current->perf_counter_ctxp); | 2914 | ctx = rcu_dereference(current->perf_counter_ctxp); |
2880 | if (ctx) | 2915 | if (ctx) |
2881 | perf_counter_fork_ctx(ctx, fork_event); | 2916 | perf_counter_task_ctx(ctx, task_event); |
2882 | rcu_read_unlock(); | 2917 | rcu_read_unlock(); |
2883 | } | 2918 | } |
2884 | 2919 | ||
2885 | void perf_counter_fork(struct task_struct *task) | 2920 | static void perf_counter_task(struct task_struct *task, int new) |
2886 | { | 2921 | { |
2887 | struct perf_fork_event fork_event; | 2922 | struct perf_task_event task_event; |
2888 | 2923 | ||
2889 | if (!atomic_read(&nr_comm_counters) && | 2924 | if (!atomic_read(&nr_comm_counters) && |
2890 | !atomic_read(&nr_mmap_counters)) | 2925 | !atomic_read(&nr_mmap_counters) && |
2926 | !atomic_read(&nr_task_counters)) | ||
2891 | return; | 2927 | return; |
2892 | 2928 | ||
2893 | fork_event = (struct perf_fork_event){ | 2929 | task_event = (struct perf_task_event){ |
2894 | .task = task, | 2930 | .task = task, |
2895 | .event = { | 2931 | .event = { |
2896 | .header = { | 2932 | .header = { |
2897 | .type = PERF_EVENT_FORK, | 2933 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, |
2898 | .size = sizeof(fork_event.event), | 2934 | .misc = 0, |
2935 | .size = sizeof(task_event.event), | ||
2899 | }, | 2936 | }, |
2937 | /* .pid */ | ||
2938 | /* .ppid */ | ||
2939 | /* .tid */ | ||
2940 | /* .ptid */ | ||
2900 | }, | 2941 | }, |
2901 | }; | 2942 | }; |
2902 | 2943 | ||
2903 | perf_counter_fork_event(&fork_event); | 2944 | perf_counter_task_event(&task_event); |
2945 | } | ||
2946 | |||
2947 | void perf_counter_fork(struct task_struct *task) | ||
2948 | { | ||
2949 | perf_counter_task(task, 1); | ||
2904 | } | 2950 | } |
2905 | 2951 | ||
2906 | /* | 2952 | /* |
@@ -2968,8 +3014,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) | |||
2968 | struct perf_cpu_context *cpuctx; | 3014 | struct perf_cpu_context *cpuctx; |
2969 | struct perf_counter_context *ctx; | 3015 | struct perf_counter_context *ctx; |
2970 | unsigned int size; | 3016 | unsigned int size; |
2971 | char *comm = comm_event->task->comm; | 3017 | char comm[TASK_COMM_LEN]; |
2972 | 3018 | ||
3019 | memset(comm, 0, sizeof(comm)); | ||
3020 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | ||
2973 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | 3021 | size = ALIGN(strlen(comm)+1, sizeof(u64)); |
2974 | 3022 | ||
2975 | comm_event->comm = comm; | 3023 | comm_event->comm = comm; |
@@ -3004,8 +3052,16 @@ void perf_counter_comm(struct task_struct *task) | |||
3004 | 3052 | ||
3005 | comm_event = (struct perf_comm_event){ | 3053 | comm_event = (struct perf_comm_event){ |
3006 | .task = task, | 3054 | .task = task, |
3055 | /* .comm */ | ||
3056 | /* .comm_size */ | ||
3007 | .event = { | 3057 | .event = { |
3008 | .header = { .type = PERF_EVENT_COMM, }, | 3058 | .header = { |
3059 | .type = PERF_EVENT_COMM, | ||
3060 | .misc = 0, | ||
3061 | /* .size */ | ||
3062 | }, | ||
3063 | /* .pid */ | ||
3064 | /* .tid */ | ||
3009 | }, | 3065 | }, |
3010 | }; | 3066 | }; |
3011 | 3067 | ||
@@ -3088,8 +3144,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | |||
3088 | char *buf = NULL; | 3144 | char *buf = NULL; |
3089 | const char *name; | 3145 | const char *name; |
3090 | 3146 | ||
3147 | memset(tmp, 0, sizeof(tmp)); | ||
3148 | |||
3091 | if (file) { | 3149 | if (file) { |
3092 | buf = kzalloc(PATH_MAX, GFP_KERNEL); | 3150 | /* |
3151 | * d_path works from the end of the buffer backwards, so we | ||
3152 | * need to add enough zero bytes after the string to handle | ||
3153 | * the 64bit alignment we do later. | ||
3154 | */ | ||
3155 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | ||
3093 | if (!buf) { | 3156 | if (!buf) { |
3094 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | 3157 | name = strncpy(tmp, "//enomem", sizeof(tmp)); |
3095 | goto got_name; | 3158 | goto got_name; |
@@ -3100,9 +3163,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | |||
3100 | goto got_name; | 3163 | goto got_name; |
3101 | } | 3164 | } |
3102 | } else { | 3165 | } else { |
3103 | name = arch_vma_name(mmap_event->vma); | 3166 | if (arch_vma_name(mmap_event->vma)) { |
3104 | if (name) | 3167 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), |
3168 | sizeof(tmp)); | ||
3105 | goto got_name; | 3169 | goto got_name; |
3170 | } | ||
3106 | 3171 | ||
3107 | if (!vma->vm_mm) { | 3172 | if (!vma->vm_mm) { |
3108 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 3173 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); |
@@ -3147,8 +3212,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3147 | 3212 | ||
3148 | mmap_event = (struct perf_mmap_event){ | 3213 | mmap_event = (struct perf_mmap_event){ |
3149 | .vma = vma, | 3214 | .vma = vma, |
3215 | /* .file_name */ | ||
3216 | /* .file_size */ | ||
3150 | .event = { | 3217 | .event = { |
3151 | .header = { .type = PERF_EVENT_MMAP, }, | 3218 | .header = { |
3219 | .type = PERF_EVENT_MMAP, | ||
3220 | .misc = 0, | ||
3221 | /* .size */ | ||
3222 | }, | ||
3223 | /* .pid */ | ||
3224 | /* .tid */ | ||
3152 | .start = vma->vm_start, | 3225 | .start = vma->vm_start, |
3153 | .len = vma->vm_end - vma->vm_start, | 3226 | .len = vma->vm_end - vma->vm_start, |
3154 | .pgoff = vma->vm_pgoff, | 3227 | .pgoff = vma->vm_pgoff, |
@@ -3159,49 +3232,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3159 | } | 3232 | } |
3160 | 3233 | ||
3161 | /* | 3234 | /* |
3162 | * Log sample_period changes so that analyzing tools can re-normalize the | ||
3163 | * event flow. | ||
3164 | */ | ||
3165 | |||
3166 | struct freq_event { | ||
3167 | struct perf_event_header header; | ||
3168 | u64 time; | ||
3169 | u64 id; | ||
3170 | u64 period; | ||
3171 | }; | ||
3172 | |||
3173 | static void perf_log_period(struct perf_counter *counter, u64 period) | ||
3174 | { | ||
3175 | struct perf_output_handle handle; | ||
3176 | struct freq_event event; | ||
3177 | int ret; | ||
3178 | |||
3179 | if (counter->hw.sample_period == period) | ||
3180 | return; | ||
3181 | |||
3182 | if (counter->attr.sample_type & PERF_SAMPLE_PERIOD) | ||
3183 | return; | ||
3184 | |||
3185 | event = (struct freq_event) { | ||
3186 | .header = { | ||
3187 | .type = PERF_EVENT_PERIOD, | ||
3188 | .misc = 0, | ||
3189 | .size = sizeof(event), | ||
3190 | }, | ||
3191 | .time = sched_clock(), | ||
3192 | .id = counter->id, | ||
3193 | .period = period, | ||
3194 | }; | ||
3195 | |||
3196 | ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0); | ||
3197 | if (ret) | ||
3198 | return; | ||
3199 | |||
3200 | perf_output_put(&handle, event); | ||
3201 | perf_output_end(&handle); | ||
3202 | } | ||
3203 | |||
3204 | /* | ||
3205 | * IRQ throttle logging | 3235 | * IRQ throttle logging |
3206 | */ | 3236 | */ |
3207 | 3237 | ||
@@ -3214,16 +3244,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) | |||
3214 | struct perf_event_header header; | 3244 | struct perf_event_header header; |
3215 | u64 time; | 3245 | u64 time; |
3216 | u64 id; | 3246 | u64 id; |
3247 | u64 stream_id; | ||
3217 | } throttle_event = { | 3248 | } throttle_event = { |
3218 | .header = { | 3249 | .header = { |
3219 | .type = PERF_EVENT_THROTTLE + 1, | 3250 | .type = PERF_EVENT_THROTTLE, |
3220 | .misc = 0, | 3251 | .misc = 0, |
3221 | .size = sizeof(throttle_event), | 3252 | .size = sizeof(throttle_event), |
3222 | }, | 3253 | }, |
3223 | .time = sched_clock(), | 3254 | .time = sched_clock(), |
3224 | .id = counter->id, | 3255 | .id = primary_counter_id(counter), |
3256 | .stream_id = counter->id, | ||
3225 | }; | 3257 | }; |
3226 | 3258 | ||
3259 | if (enable) | ||
3260 | throttle_event.header.type = PERF_EVENT_UNTHROTTLE; | ||
3261 | |||
3227 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); | 3262 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); |
3228 | if (ret) | 3263 | if (ret) |
3229 | return; | 3264 | return; |
@@ -3671,7 +3706,7 @@ static const struct pmu perf_ops_task_clock = { | |||
3671 | void perf_tpcounter_event(int event_id) | 3706 | void perf_tpcounter_event(int event_id) |
3672 | { | 3707 | { |
3673 | struct perf_sample_data data = { | 3708 | struct perf_sample_data data = { |
3674 | .regs = get_irq_regs(); | 3709 | .regs = get_irq_regs(), |
3675 | .addr = 0, | 3710 | .addr = 0, |
3676 | }; | 3711 | }; |
3677 | 3712 | ||
@@ -3687,16 +3722,12 @@ extern void ftrace_profile_disable(int); | |||
3687 | 3722 | ||
3688 | static void tp_perf_counter_destroy(struct perf_counter *counter) | 3723 | static void tp_perf_counter_destroy(struct perf_counter *counter) |
3689 | { | 3724 | { |
3690 | ftrace_profile_disable(perf_event_id(&counter->attr)); | 3725 | ftrace_profile_disable(counter->attr.config); |
3691 | } | 3726 | } |
3692 | 3727 | ||
3693 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 3728 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
3694 | { | 3729 | { |
3695 | int event_id = perf_event_id(&counter->attr); | 3730 | if (ftrace_profile_enable(counter->attr.config)) |
3696 | int ret; | ||
3697 | |||
3698 | ret = ftrace_profile_enable(event_id); | ||
3699 | if (ret) | ||
3700 | return NULL; | 3731 | return NULL; |
3701 | 3732 | ||
3702 | counter->destroy = tp_perf_counter_destroy; | 3733 | counter->destroy = tp_perf_counter_destroy; |
@@ -3874,6 +3905,8 @@ done: | |||
3874 | atomic_inc(&nr_mmap_counters); | 3905 | atomic_inc(&nr_mmap_counters); |
3875 | if (counter->attr.comm) | 3906 | if (counter->attr.comm) |
3876 | atomic_inc(&nr_comm_counters); | 3907 | atomic_inc(&nr_comm_counters); |
3908 | if (counter->attr.task) | ||
3909 | atomic_inc(&nr_task_counters); | ||
3877 | } | 3910 | } |
3878 | 3911 | ||
3879 | return counter; | 3912 | return counter; |
@@ -4235,8 +4268,10 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4235 | struct perf_counter_context *child_ctx; | 4268 | struct perf_counter_context *child_ctx; |
4236 | unsigned long flags; | 4269 | unsigned long flags; |
4237 | 4270 | ||
4238 | if (likely(!child->perf_counter_ctxp)) | 4271 | if (likely(!child->perf_counter_ctxp)) { |
4272 | perf_counter_task(child, 0); | ||
4239 | return; | 4273 | return; |
4274 | } | ||
4240 | 4275 | ||
4241 | local_irq_save(flags); | 4276 | local_irq_save(flags); |
4242 | /* | 4277 | /* |
@@ -4254,18 +4289,22 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4254 | * incremented the context's refcount before we do put_ctx below. | 4289 | * incremented the context's refcount before we do put_ctx below. |
4255 | */ | 4290 | */ |
4256 | spin_lock(&child_ctx->lock); | 4291 | spin_lock(&child_ctx->lock); |
4292 | /* | ||
4293 | * If this context is a clone; unclone it so it can't get | ||
4294 | * swapped to another process while we're removing all | ||
4295 | * the counters from it. | ||
4296 | */ | ||
4297 | unclone_ctx(child_ctx); | ||
4298 | spin_unlock_irqrestore(&child_ctx->lock, flags); | ||
4299 | |||
4300 | /* | ||
4301 | * Report the task dead after unscheduling the counters so that we | ||
4302 | * won't get any samples after PERF_EVENT_EXIT. We can however still | ||
4303 | * get a few PERF_EVENT_READ events. | ||
4304 | */ | ||
4305 | perf_counter_task(child, 0); | ||
4306 | |||
4257 | child->perf_counter_ctxp = NULL; | 4307 | child->perf_counter_ctxp = NULL; |
4258 | if (child_ctx->parent_ctx) { | ||
4259 | /* | ||
4260 | * This context is a clone; unclone it so it can't get | ||
4261 | * swapped to another process while we're removing all | ||
4262 | * the counters from it. | ||
4263 | */ | ||
4264 | put_ctx(child_ctx->parent_ctx); | ||
4265 | child_ctx->parent_ctx = NULL; | ||
4266 | } | ||
4267 | spin_unlock(&child_ctx->lock); | ||
4268 | local_irq_restore(flags); | ||
4269 | 4308 | ||
4270 | /* | 4309 | /* |
4271 | * We can recurse on the same lock type through: | 4310 | * We can recurse on the same lock type through: |