aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/fault_32.c61
-rw-r--r--arch/sh/mm/init.c4
-rw-r--r--arch/sh/mm/tlbflush_64.c15
3 files changed, 45 insertions, 35 deletions
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index cc8ddbdf3d7a..71925946f1e1 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,12 +15,28 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/marker.h> 18#include <linux/perf_counter.h>
19#include <asm/io_trapped.h> 19#include <asm/io_trapped.h>
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23 23
24static inline int notify_page_fault(struct pt_regs *regs, int trap)
25{
26 int ret = 0;
27
28#ifdef CONFIG_KPROBES
29 if (!user_mode(regs)) {
30 preempt_disable();
31 if (kprobe_running() && kprobe_fault_handler(regs, trap))
32 ret = 1;
33 preempt_enable();
34 }
35#endif
36
37 return ret;
38}
39
24/* 40/*
25 * This routine handles page faults. It determines the address, 41 * This routine handles page faults. It determines the address,
26 * and the problem, and then passes it off to one of the appropriate 42 * and the problem, and then passes it off to one of the appropriate
@@ -87,13 +103,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
87 return; 103 return;
88 } 104 }
89 105
106 mm = tsk->mm;
107
108 if (unlikely(notify_page_fault(regs, lookup_exception_vector())))
109 return;
110
90 /* Only enable interrupts if they were on before the fault */ 111 /* Only enable interrupts if they were on before the fault */
91 if ((regs->sr & SR_IMASK) != SR_IMASK) { 112 if ((regs->sr & SR_IMASK) != SR_IMASK)
92 trace_hardirqs_on();
93 local_irq_enable(); 113 local_irq_enable();
94 }
95 114
96 mm = tsk->mm; 115 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
97 116
98 /* 117 /*
99 * If we're in an interrupt or have no user 118 * If we're in an interrupt or have no user
@@ -141,10 +160,15 @@ survive:
141 goto do_sigbus; 160 goto do_sigbus;
142 BUG(); 161 BUG();
143 } 162 }
144 if (fault & VM_FAULT_MAJOR) 163 if (fault & VM_FAULT_MAJOR) {
145 tsk->maj_flt++; 164 tsk->maj_flt++;
146 else 165 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
166 regs, address);
167 } else {
147 tsk->min_flt++; 168 tsk->min_flt++;
169 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
170 regs, address);
171 }
148 172
149 up_read(&mm->mmap_sem); 173 up_read(&mm->mmap_sem);
150 return; 174 return;
@@ -245,22 +269,6 @@ do_sigbus:
245 goto no_context; 269 goto no_context;
246} 270}
247 271
248static inline int notify_page_fault(struct pt_regs *regs, int trap)
249{
250 int ret = 0;
251
252#ifdef CONFIG_KPROBES
253 if (!user_mode(regs)) {
254 preempt_disable();
255 if (kprobe_running() && kprobe_fault_handler(regs, trap))
256 ret = 1;
257 preempt_enable();
258 }
259#endif
260
261 return ret;
262}
263
264/* 272/*
265 * Called with interrupts disabled. 273 * Called with interrupts disabled.
266 */ 274 */
@@ -273,12 +281,7 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
273 pmd_t *pmd; 281 pmd_t *pmd;
274 pte_t *pte; 282 pte_t *pte;
275 pte_t entry; 283 pte_t entry;
276 int ret = 0; 284 int ret = 1;
277
278 if (notify_page_fault(regs, lookup_exception_vector()))
279 goto out;
280
281 ret = 1;
282 285
283 /* 286 /*
284 * We don't take page faults for P1, P2, and parts of P4, these 287 * We don't take page faults for P1, P2, and parts of P4, these
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index ee8e6bbe882c..fe532aeaa16d 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -70,7 +70,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
70 } 70 }
71 71
72 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 72 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
73 flush_tlb_one(get_asid(), addr); 73 local_flush_tlb_one(get_asid(), addr);
74} 74}
75 75
76/* 76/*
@@ -177,10 +177,8 @@ void __init paging_init(void)
177 177
178 free_area_init_nodes(max_zone_pfns); 178 free_area_init_nodes(max_zone_pfns);
179 179
180#ifdef CONFIG_SUPERH32
181 /* Set up the uncached fixmap */ 180 /* Set up the uncached fixmap */
182 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); 181 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
183#endif
184} 182}
185 183
186static struct kcore_list kcore_mem, kcore_vmalloc; 184static struct kcore_list kcore_mem, kcore_vmalloc;
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index fcbb6e135cef..3ce40ea34824 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli 4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) 5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6 * Copyright (C) 2003 Paul Mundt 6 * Copyright (C) 2003 - 2009 Paul Mundt
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
@@ -20,6 +20,7 @@
20#include <linux/mman.h> 20#include <linux/mman.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/perf_counter.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <asm/system.h> 25#include <asm/system.h>
25#include <asm/io.h> 26#include <asm/io.h>
@@ -115,6 +116,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
115 /* Not an IO address, so reenable interrupts */ 116 /* Not an IO address, so reenable interrupts */
116 local_irq_enable(); 117 local_irq_enable();
117 118
119 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
120
118 /* 121 /*
119 * If we're in an interrupt or have no user 122 * If we're in an interrupt or have no user
120 * context, we must not take the fault.. 123 * context, we must not take the fault..
@@ -195,10 +198,16 @@ survive:
195 goto do_sigbus; 198 goto do_sigbus;
196 BUG(); 199 BUG();
197 } 200 }
198 if (fault & VM_FAULT_MAJOR) 201
202 if (fault & VM_FAULT_MAJOR) {
199 tsk->maj_flt++; 203 tsk->maj_flt++;
200 else 204 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
205 regs, address);
206 } else {
201 tsk->min_flt++; 207 tsk->min_flt++;
208 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
209 regs, address);
210 }
202 211
203 /* If we get here, the page fault has been handled. Do the TLB refill 212 /* If we get here, the page fault has been handled. Do the TLB refill
204 now from the newly-setup PTE, to avoid having to fault again right 213 now from the newly-setup PTE, to avoid having to fault again right