diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-04 11:08:58 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-04 11:51:38 -0400 |
commit | d99e9446200c1ffab28cb0e39b76c34a2bfafd06 (patch) | |
tree | 10cd2f67f3bffaf8dcec79c197689f440faf9265 /kernel/perf_counter.c | |
parent | 60313ebed739b331e8e61079da27a11ee3b73a30 (diff) |
perf_counter: Remove munmap stuff
In name of keeping it simple, only track mmap events. Userspace
will have to remove old overlapping maps when it encounters them.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 38 |
1 files changed, 3 insertions, 35 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 78c58623a0dd..195712e20d07 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -41,7 +41,6 @@ static int perf_overcommit __read_mostly = 1; | |||
41 | 41 | ||
42 | static atomic_t nr_counters __read_mostly; | 42 | static atomic_t nr_counters __read_mostly; |
43 | static atomic_t nr_mmap_counters __read_mostly; | 43 | static atomic_t nr_mmap_counters __read_mostly; |
44 | static atomic_t nr_munmap_counters __read_mostly; | ||
45 | static atomic_t nr_comm_counters __read_mostly; | 44 | static atomic_t nr_comm_counters __read_mostly; |
46 | 45 | ||
47 | int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ | 46 | int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ |
@@ -1448,8 +1447,6 @@ static void free_counter(struct perf_counter *counter) | |||
1448 | atomic_dec(&nr_counters); | 1447 | atomic_dec(&nr_counters); |
1449 | if (counter->attr.mmap) | 1448 | if (counter->attr.mmap) |
1450 | atomic_dec(&nr_mmap_counters); | 1449 | atomic_dec(&nr_mmap_counters); |
1451 | if (counter->attr.munmap) | ||
1452 | atomic_dec(&nr_munmap_counters); | ||
1453 | if (counter->attr.comm) | 1450 | if (counter->attr.comm) |
1454 | atomic_dec(&nr_comm_counters); | 1451 | atomic_dec(&nr_comm_counters); |
1455 | 1452 | ||
@@ -2510,7 +2507,7 @@ static void perf_counter_fork_output(struct perf_counter *counter, | |||
2510 | 2507 | ||
2511 | static int perf_counter_fork_match(struct perf_counter *counter) | 2508 | static int perf_counter_fork_match(struct perf_counter *counter) |
2512 | { | 2509 | { |
2513 | if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap) | 2510 | if (counter->attr.comm || counter->attr.mmap) |
2514 | return 1; | 2511 | return 1; |
2515 | 2512 | ||
2516 | return 0; | 2513 | return 0; |
@@ -2557,8 +2554,7 @@ void perf_counter_fork(struct task_struct *task) | |||
2557 | struct perf_fork_event fork_event; | 2554 | struct perf_fork_event fork_event; |
2558 | 2555 | ||
2559 | if (!atomic_read(&nr_comm_counters) && | 2556 | if (!atomic_read(&nr_comm_counters) && |
2560 | !atomic_read(&nr_mmap_counters) && | 2557 | !atomic_read(&nr_mmap_counters)) |
2561 | !atomic_read(&nr_munmap_counters)) | ||
2562 | return; | 2558 | return; |
2563 | 2559 | ||
2564 | fork_event = (struct perf_fork_event){ | 2560 | fork_event = (struct perf_fork_event){ |
@@ -2722,12 +2718,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter, | |||
2722 | static int perf_counter_mmap_match(struct perf_counter *counter, | 2718 | static int perf_counter_mmap_match(struct perf_counter *counter, |
2723 | struct perf_mmap_event *mmap_event) | 2719 | struct perf_mmap_event *mmap_event) |
2724 | { | 2720 | { |
2725 | if (counter->attr.mmap && | 2721 | if (counter->attr.mmap) |
2726 | mmap_event->event.header.type == PERF_EVENT_MMAP) | ||
2727 | return 1; | ||
2728 | |||
2729 | if (counter->attr.munmap && | ||
2730 | mmap_event->event.header.type == PERF_EVENT_MUNMAP) | ||
2731 | return 1; | 2722 | return 1; |
2732 | 2723 | ||
2733 | return 0; | 2724 | return 0; |
@@ -2821,27 +2812,6 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, | |||
2821 | perf_counter_mmap_event(&mmap_event); | 2812 | perf_counter_mmap_event(&mmap_event); |
2822 | } | 2813 | } |
2823 | 2814 | ||
2824 | void perf_counter_munmap(unsigned long addr, unsigned long len, | ||
2825 | unsigned long pgoff, struct file *file) | ||
2826 | { | ||
2827 | struct perf_mmap_event mmap_event; | ||
2828 | |||
2829 | if (!atomic_read(&nr_munmap_counters)) | ||
2830 | return; | ||
2831 | |||
2832 | mmap_event = (struct perf_mmap_event){ | ||
2833 | .file = file, | ||
2834 | .event = { | ||
2835 | .header = { .type = PERF_EVENT_MUNMAP, }, | ||
2836 | .start = addr, | ||
2837 | .len = len, | ||
2838 | .pgoff = pgoff, | ||
2839 | }, | ||
2840 | }; | ||
2841 | |||
2842 | perf_counter_mmap_event(&mmap_event); | ||
2843 | } | ||
2844 | |||
2845 | /* | 2815 | /* |
2846 | * Log sample_period changes so that analyzing tools can re-normalize the | 2816 | * Log sample_period changes so that analyzing tools can re-normalize the |
2847 | * event flow. | 2817 | * event flow. |
@@ -3525,8 +3495,6 @@ done: | |||
3525 | atomic_inc(&nr_counters); | 3495 | atomic_inc(&nr_counters); |
3526 | if (counter->attr.mmap) | 3496 | if (counter->attr.mmap) |
3527 | atomic_inc(&nr_mmap_counters); | 3497 | atomic_inc(&nr_mmap_counters); |
3528 | if (counter->attr.munmap) | ||
3529 | atomic_inc(&nr_munmap_counters); | ||
3530 | if (counter->attr.comm) | 3498 | if (counter->attr.comm) |
3531 | atomic_inc(&nr_comm_counters); | 3499 | atomic_inc(&nr_comm_counters); |
3532 | 3500 | ||