aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-13 07:21:33 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:29:37 -0400
commit7dd1fcc258b65da718f01e4684a7b9244501a9fb (patch)
tree4fb864f493b9f1d47bedbc3c97b9de7df572d6ec
parent15dbf27cc18559a14e99609f78678aa86b9c6ff1 (diff)
perf_counter: provide pagefault software events
We use the generic software counter infrastructure to provide page fault events. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/powerpc/mm/fault.c3
-rw-r--r--arch/x86/mm/fault.c3
-rw-r--r--kernel/perf_counter.c53
3 files changed, 9 insertions, 50 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 76993941cac9..eda5b0ca4af2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/kprobes.h> 30#include <linux/kprobes.h>
31#include <linux/kdebug.h> 31#include <linux/kdebug.h>
32#include <linux/perf_counter.h>
32 33
33#include <asm/firmware.h> 34#include <asm/firmware.h>
34#include <asm/page.h> 35#include <asm/page.h>
@@ -170,6 +171,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
170 die("Weird page fault", regs, SIGSEGV); 171 die("Weird page fault", regs, SIGSEGV);
171 } 172 }
172 173
174 perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs);
175
173 /* When running in the kernel we expect faults to occur only to 176 /* When running in the kernel we expect faults to occur only to
174 * addresses in user space. All other faults represent errors in the 177 * addresses in user space. All other faults represent errors in the
175 * kernel and should generate an OOPS. Unfortunately, in the case of an 178 * kernel and should generate an OOPS. Unfortunately, in the case of an
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a03b7279efa0..c8725752b6cd 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -27,6 +27,7 @@
27#include <linux/tty.h> 27#include <linux/tty.h>
28#include <linux/smp.h> 28#include <linux/smp.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/perf_counter.h>
30 31
31#include <asm-generic/sections.h> 32#include <asm-generic/sections.h>
32 33
@@ -1044,6 +1045,8 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
1044 if (unlikely(error_code & PF_RSVD)) 1045 if (unlikely(error_code & PF_RSVD))
1045 pgtable_bad(regs, error_code, address); 1046 pgtable_bad(regs, error_code, address);
1046 1047
1048 perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs);
1049
1047 /* 1050 /*
1048 * If we're in an interrupt, have no user context or are running 1051 * If we're in an interrupt, have no user context or are running
1049 * in an atomic region then we must not take the fault: 1052 * in an atomic region then we must not take the fault:
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index eeb1b46cf707..1773c5d7427d 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1607,57 +1607,10 @@ static const struct hw_perf_counter_ops perf_ops_task_clock = {
1607 * Software counter: page faults 1607 * Software counter: page faults
1608 */ 1608 */
1609 1609
1610#ifdef CONFIG_VM_EVENT_COUNTERS
1611#define cpu_page_faults() __get_cpu_var(vm_event_states).event[PGFAULT]
1612#else
1613#define cpu_page_faults() 0
1614#endif
1615
1616static u64 get_page_faults(struct perf_counter *counter)
1617{
1618 struct task_struct *curr = counter->ctx->task;
1619
1620 if (curr)
1621 return curr->maj_flt + curr->min_flt;
1622 return cpu_page_faults();
1623}
1624
1625static void page_faults_perf_counter_update(struct perf_counter *counter)
1626{
1627 u64 prev, now;
1628 s64 delta;
1629
1630 prev = atomic64_read(&counter->hw.prev_count);
1631 now = get_page_faults(counter);
1632
1633 atomic64_set(&counter->hw.prev_count, now);
1634
1635 delta = now - prev;
1636
1637 atomic64_add(delta, &counter->count);
1638}
1639
1640static void page_faults_perf_counter_read(struct perf_counter *counter)
1641{
1642 page_faults_perf_counter_update(counter);
1643}
1644
1645static int page_faults_perf_counter_enable(struct perf_counter *counter)
1646{
1647 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1648 atomic64_set(&counter->hw.prev_count, get_page_faults(counter));
1649 return 0;
1650}
1651
1652static void page_faults_perf_counter_disable(struct perf_counter *counter)
1653{
1654 page_faults_perf_counter_update(counter);
1655}
1656
1657static const struct hw_perf_counter_ops perf_ops_page_faults = { 1610static const struct hw_perf_counter_ops perf_ops_page_faults = {
1658 .enable = page_faults_perf_counter_enable, 1611 .enable = perf_swcounter_enable,
1659 .disable = page_faults_perf_counter_disable, 1612 .disable = perf_swcounter_disable,
1660 .read = page_faults_perf_counter_read, 1613 .read = perf_swcounter_read,
1661}; 1614};
1662 1615
1663/* 1616/*