aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_counter.h10
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/perf_counter.c131
3 files changed, 126 insertions, 19 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 37d5541d74c..380247bdb91 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -277,6 +277,14 @@ enum perf_event_type {
277 PERF_EVENT_UNTHROTTLE = 6, 277 PERF_EVENT_UNTHROTTLE = 6,
278 278
279 /* 279 /*
280 * struct {
281 * struct perf_event_header header;
282 * u32 pid, ppid;
283 * };
284 */
285 PERF_EVENT_FORK = 7,
286
287 /*
280 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field 288 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
281 * will be PERF_RECORD_* 289 * will be PERF_RECORD_*
282 * 290 *
@@ -618,6 +626,7 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len,
618 unsigned long pgoff, struct file *file); 626 unsigned long pgoff, struct file *file);
619 627
620extern void perf_counter_comm(struct task_struct *tsk); 628extern void perf_counter_comm(struct task_struct *tsk);
629extern void perf_counter_fork(struct task_struct *tsk);
621 630
622extern void perf_counter_task_migration(struct task_struct *task, int cpu); 631extern void perf_counter_task_migration(struct task_struct *task, int cpu);
623 632
@@ -673,6 +682,7 @@ perf_counter_munmap(unsigned long addr, unsigned long len,
673 unsigned long pgoff, struct file *file) { } 682 unsigned long pgoff, struct file *file) { }
674 683
675static inline void perf_counter_comm(struct task_struct *tsk) { } 684static inline void perf_counter_comm(struct task_struct *tsk) { }
685static inline void perf_counter_fork(struct task_struct *tsk) { }
676static inline void perf_counter_init(void) { } 686static inline void perf_counter_init(void) { }
677static inline void perf_counter_task_migration(struct task_struct *task, 687static inline void perf_counter_task_migration(struct task_struct *task,
678 int cpu) { } 688 int cpu) { }
diff --git a/kernel/fork.c b/kernel/fork.c
index b7d7a9f0bd7..f4466ca37ec 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1412,12 +1412,12 @@ long do_fork(unsigned long clone_flags,
1412 if (clone_flags & CLONE_VFORK) { 1412 if (clone_flags & CLONE_VFORK) {
1413 p->vfork_done = &vfork; 1413 p->vfork_done = &vfork;
1414 init_completion(&vfork); 1414 init_completion(&vfork);
1415 } else { 1415 } else if (!(clone_flags & CLONE_VM)) {
1416 /* 1416 /*
1417 * vfork will do an exec which will call 1417 * vfork will do an exec which will call
1418 * set_task_comm() 1418 * set_task_comm()
1419 */ 1419 */
1420 perf_counter_comm(p); 1420 perf_counter_fork(p);
1421 } 1421 }
1422 1422
1423 audit_finish_fork(p); 1423 audit_finish_fork(p);
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0bb03f15a5b..78c58623a0d 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -40,9 +40,9 @@ static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1; 40static int perf_overcommit __read_mostly = 1;
41 41
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_counters __read_mostly;
43static atomic_t nr_mmap_tracking __read_mostly; 43static atomic_t nr_mmap_counters __read_mostly;
44static atomic_t nr_munmap_tracking __read_mostly; 44static atomic_t nr_munmap_counters __read_mostly;
45static atomic_t nr_comm_tracking __read_mostly; 45static atomic_t nr_comm_counters __read_mostly;
46 46
47int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ 47int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ 48int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
@@ -1447,11 +1447,11 @@ static void free_counter(struct perf_counter *counter)
1447 1447
1448 atomic_dec(&nr_counters); 1448 atomic_dec(&nr_counters);
1449 if (counter->attr.mmap) 1449 if (counter->attr.mmap)
1450 atomic_dec(&nr_mmap_tracking); 1450 atomic_dec(&nr_mmap_counters);
1451 if (counter->attr.munmap) 1451 if (counter->attr.munmap)
1452 atomic_dec(&nr_munmap_tracking); 1452 atomic_dec(&nr_munmap_counters);
1453 if (counter->attr.comm) 1453 if (counter->attr.comm)
1454 atomic_dec(&nr_comm_tracking); 1454 atomic_dec(&nr_comm_counters);
1455 1455
1456 if (counter->destroy) 1456 if (counter->destroy)
1457 counter->destroy(counter); 1457 counter->destroy(counter);
@@ -2476,6 +2476,105 @@ static void perf_counter_output(struct perf_counter *counter,
2476} 2476}
2477 2477
2478/* 2478/*
2479 * fork tracking
2480 */
2481
2482struct perf_fork_event {
2483 struct task_struct *task;
2484
2485 struct {
2486 struct perf_event_header header;
2487
2488 u32 pid;
2489 u32 ppid;
2490 } event;
2491};
2492
2493static void perf_counter_fork_output(struct perf_counter *counter,
2494 struct perf_fork_event *fork_event)
2495{
2496 struct perf_output_handle handle;
2497 int size = fork_event->event.header.size;
2498 struct task_struct *task = fork_event->task;
2499 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2500
2501 if (ret)
2502 return;
2503
2504 fork_event->event.pid = perf_counter_pid(counter, task);
2505 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2506
2507 perf_output_put(&handle, fork_event->event);
2508 perf_output_end(&handle);
2509}
2510
2511static int perf_counter_fork_match(struct perf_counter *counter)
2512{
2513 if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap)
2514 return 1;
2515
2516 return 0;
2517}
2518
2519static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2520 struct perf_fork_event *fork_event)
2521{
2522 struct perf_counter *counter;
2523
2524 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2525 return;
2526
2527 rcu_read_lock();
2528 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2529 if (perf_counter_fork_match(counter))
2530 perf_counter_fork_output(counter, fork_event);
2531 }
2532 rcu_read_unlock();
2533}
2534
2535static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2536{
2537 struct perf_cpu_context *cpuctx;
2538 struct perf_counter_context *ctx;
2539
2540 cpuctx = &get_cpu_var(perf_cpu_context);
2541 perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2542 put_cpu_var(perf_cpu_context);
2543
2544 rcu_read_lock();
2545 /*
2546 * doesn't really matter which of the child contexts the
2547 * events ends up in.
2548 */
2549 ctx = rcu_dereference(current->perf_counter_ctxp);
2550 if (ctx)
2551 perf_counter_fork_ctx(ctx, fork_event);
2552 rcu_read_unlock();
2553}
2554
2555void perf_counter_fork(struct task_struct *task)
2556{
2557 struct perf_fork_event fork_event;
2558
2559 if (!atomic_read(&nr_comm_counters) &&
2560 !atomic_read(&nr_mmap_counters) &&
2561 !atomic_read(&nr_munmap_counters))
2562 return;
2563
2564 fork_event = (struct perf_fork_event){
2565 .task = task,
2566 .event = {
2567 .header = {
2568 .type = PERF_EVENT_FORK,
2569 .size = sizeof(fork_event.event),
2570 },
2571 },
2572 };
2573
2574 perf_counter_fork_event(&fork_event);
2575}
2576
2577/*
2479 * comm tracking 2578 * comm tracking
2480 */ 2579 */
2481 2580
@@ -2511,11 +2610,9 @@ static void perf_counter_comm_output(struct perf_counter *counter,
2511 perf_output_end(&handle); 2610 perf_output_end(&handle);
2512} 2611}
2513 2612
2514static int perf_counter_comm_match(struct perf_counter *counter, 2613static int perf_counter_comm_match(struct perf_counter *counter)
2515 struct perf_comm_event *comm_event)
2516{ 2614{
2517 if (counter->attr.comm && 2615 if (counter->attr.comm)
2518 comm_event->event.header.type == PERF_EVENT_COMM)
2519 return 1; 2616 return 1;
2520 2617
2521 return 0; 2618 return 0;
@@ -2531,7 +2628,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2531 2628
2532 rcu_read_lock(); 2629 rcu_read_lock();
2533 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 2630 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2534 if (perf_counter_comm_match(counter, comm_event)) 2631 if (perf_counter_comm_match(counter))
2535 perf_counter_comm_output(counter, comm_event); 2632 perf_counter_comm_output(counter, comm_event);
2536 } 2633 }
2537 rcu_read_unlock(); 2634 rcu_read_unlock();
@@ -2570,7 +2667,7 @@ void perf_counter_comm(struct task_struct *task)
2570{ 2667{
2571 struct perf_comm_event comm_event; 2668 struct perf_comm_event comm_event;
2572 2669
2573 if (!atomic_read(&nr_comm_tracking)) 2670 if (!atomic_read(&nr_comm_counters))
2574 return; 2671 return;
2575 2672
2576 comm_event = (struct perf_comm_event){ 2673 comm_event = (struct perf_comm_event){
@@ -2708,7 +2805,7 @@ void perf_counter_mmap(unsigned long addr, unsigned long len,
2708{ 2805{
2709 struct perf_mmap_event mmap_event; 2806 struct perf_mmap_event mmap_event;
2710 2807
2711 if (!atomic_read(&nr_mmap_tracking)) 2808 if (!atomic_read(&nr_mmap_counters))
2712 return; 2809 return;
2713 2810
2714 mmap_event = (struct perf_mmap_event){ 2811 mmap_event = (struct perf_mmap_event){
@@ -2729,7 +2826,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
2729{ 2826{
2730 struct perf_mmap_event mmap_event; 2827 struct perf_mmap_event mmap_event;
2731 2828
2732 if (!atomic_read(&nr_munmap_tracking)) 2829 if (!atomic_read(&nr_munmap_counters))
2733 return; 2830 return;
2734 2831
2735 mmap_event = (struct perf_mmap_event){ 2832 mmap_event = (struct perf_mmap_event){
@@ -3427,11 +3524,11 @@ done:
3427 3524
3428 atomic_inc(&nr_counters); 3525 atomic_inc(&nr_counters);
3429 if (counter->attr.mmap) 3526 if (counter->attr.mmap)
3430 atomic_inc(&nr_mmap_tracking); 3527 atomic_inc(&nr_mmap_counters);
3431 if (counter->attr.munmap) 3528 if (counter->attr.munmap)
3432 atomic_inc(&nr_munmap_tracking); 3529 atomic_inc(&nr_munmap_counters);
3433 if (counter->attr.comm) 3530 if (counter->attr.comm)
3434 atomic_inc(&nr_comm_tracking); 3531 atomic_inc(&nr_comm_counters);
3435 3532
3436 return counter; 3533 return counter;
3437} 3534}