aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-23 13:22:07 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:25 -0400
commit96f6d4444302bb2ea2cf409529eef816462f6ce0 (patch)
tree34ae6944b4a8e6db89c50dba5a3a8b9da3315122
parentf4a2deb4860497f4332cf6a1acddab3dd628ddf0 (diff)
perf_counter: avoid recursion
Tracepoint events like lock_acquire and software counters like pagefaults can recurse into the perf counter code again, avoid that. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090323172417.152096433@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h7
-rw-r--r--kernel/perf_counter.c26
2 files changed, 33 insertions, 0 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 56099e52970d..18dc17d0a61c 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -328,6 +328,13 @@ struct perf_cpu_context {
328 int active_oncpu; 328 int active_oncpu;
329 int max_pertask; 329 int max_pertask;
330 int exclusive; 330 int exclusive;
331
332 /*
333 * Recursion avoidance:
334 *
335 * task, softirq, irq, nmi context
336 */
337 int recursion[4];
331}; 338};
332 339
333/* 340/*
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index ca14fc41ccdf..ce34bff07bda 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/vmstat.h> 24#include <linux/vmstat.h>
25#include <linux/rculist.h> 25#include <linux/rculist.h>
26#include <linux/hardirq.h>
26 27
27#include <asm/irq_regs.h> 28#include <asm/irq_regs.h>
28 29
@@ -1532,10 +1533,31 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
1532 rcu_read_unlock(); 1533 rcu_read_unlock();
1533} 1534}
1534 1535
1536static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
1537{
1538 if (in_nmi())
1539 return &cpuctx->recursion[3];
1540
1541 if (in_irq())
1542 return &cpuctx->recursion[2];
1543
1544 if (in_softirq())
1545 return &cpuctx->recursion[1];
1546
1547 return &cpuctx->recursion[0];
1548}
1549
1535static void __perf_swcounter_event(enum perf_event_types type, u32 event, 1550static void __perf_swcounter_event(enum perf_event_types type, u32 event,
1536 u64 nr, int nmi, struct pt_regs *regs) 1551 u64 nr, int nmi, struct pt_regs *regs)
1537{ 1552{
1538 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 1553 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
1554 int *recursion = perf_swcounter_recursion_context(cpuctx);
1555
1556 if (*recursion)
1557 goto out;
1558
1559 (*recursion)++;
1560 barrier();
1539 1561
1540 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs); 1562 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
1541 if (cpuctx->task_ctx) { 1563 if (cpuctx->task_ctx) {
@@ -1543,6 +1565,10 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event,
1543 nr, nmi, regs); 1565 nr, nmi, regs);
1544 } 1566 }
1545 1567
1568 barrier();
1569 (*recursion)--;
1570
1571out:
1546 put_cpu_var(perf_cpu_context); 1572 put_cpu_var(perf_cpu_context);
1547} 1573}
1548 1574