diff options
Diffstat (limited to 'arch/sh/mm/tlbflush_64.c')
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 64 |
1 files changed, 27 insertions, 37 deletions
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index fcbb6e135cef..7f5810f5dfdc 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | 4 | * Copyright (C) 2000, 2001 Paolo Alberelli |
5 | * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) | 5 | * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) |
6 | * Copyright (C) 2003 Paul Mundt | 6 | * Copyright (C) 2003 - 2009 Paul Mundt |
7 | * | 7 | * |
8 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
9 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/mman.h> | 20 | #include <linux/mman.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/perf_event.h> | ||
23 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -35,7 +36,7 @@ extern void die(const char *,struct pt_regs *,long); | |||
35 | 36 | ||
36 | static inline void print_prots(pgprot_t prot) | 37 | static inline void print_prots(pgprot_t prot) |
37 | { | 38 | { |
38 | printk("prot is 0x%08lx\n",pgprot_val(prot)); | 39 | printk("prot is 0x%016llx\n",pgprot_val(prot)); |
39 | 40 | ||
40 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), | 41 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), |
41 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); | 42 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); |
@@ -115,6 +116,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
115 | /* Not an IO address, so reenable interrupts */ | 116 | /* Not an IO address, so reenable interrupts */ |
116 | local_irq_enable(); | 117 | local_irq_enable(); |
117 | 118 | ||
119 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | ||
120 | |||
118 | /* | 121 | /* |
119 | * If we're in an interrupt or have no user | 122 | * If we're in an interrupt or have no user |
120 | * context, we must not take the fault.. | 123 | * context, we must not take the fault.. |
@@ -186,7 +189,6 @@ good_area: | |||
186 | * make sure we exit gracefully rather than endlessly redo | 189 | * make sure we exit gracefully rather than endlessly redo |
187 | * the fault. | 190 | * the fault. |
188 | */ | 191 | */ |
189 | survive: | ||
190 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); | 192 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
191 | if (unlikely(fault & VM_FAULT_ERROR)) { | 193 | if (unlikely(fault & VM_FAULT_ERROR)) { |
192 | if (fault & VM_FAULT_OOM) | 194 | if (fault & VM_FAULT_OOM) |
@@ -195,10 +197,16 @@ survive: | |||
195 | goto do_sigbus; | 197 | goto do_sigbus; |
196 | BUG(); | 198 | BUG(); |
197 | } | 199 | } |
198 | if (fault & VM_FAULT_MAJOR) | 200 | |
201 | if (fault & VM_FAULT_MAJOR) { | ||
199 | tsk->maj_flt++; | 202 | tsk->maj_flt++; |
200 | else | 203 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
204 | regs, address); | ||
205 | } else { | ||
201 | tsk->min_flt++; | 206 | tsk->min_flt++; |
207 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | ||
208 | regs, address); | ||
209 | } | ||
202 | 210 | ||
203 | /* If we get here, the page fault has been handled. Do the TLB refill | 211 | /* If we get here, the page fault has been handled. Do the TLB refill |
204 | now from the newly-setup PTE, to avoid having to fault again right | 212 | now from the newly-setup PTE, to avoid having to fault again right |
@@ -285,22 +293,11 @@ no_context: | |||
285 | * us unable to handle the page fault gracefully. | 293 | * us unable to handle the page fault gracefully. |
286 | */ | 294 | */ |
287 | out_of_memory: | 295 | out_of_memory: |
288 | if (is_global_init(current)) { | ||
289 | panic("INIT out of memory\n"); | ||
290 | yield(); | ||
291 | goto survive; | ||
292 | } | ||
293 | printk("fault:Out of memory\n"); | ||
294 | up_read(&mm->mmap_sem); | 296 | up_read(&mm->mmap_sem); |
295 | if (is_global_init(current)) { | 297 | if (!user_mode(regs)) |
296 | yield(); | 298 | goto no_context; |
297 | down_read(&mm->mmap_sem); | 299 | pagefault_out_of_memory(); |
298 | goto survive; | 300 | return; |
299 | } | ||
300 | printk("VM: killing process %s\n", tsk->comm); | ||
301 | if (user_mode(regs)) | ||
302 | do_group_exit(SIGKILL); | ||
303 | goto no_context; | ||
304 | 301 | ||
305 | do_sigbus: | 302 | do_sigbus: |
306 | printk("fault:Do sigbus\n"); | 303 | printk("fault:Do sigbus\n"); |
@@ -320,22 +317,6 @@ do_sigbus: | |||
320 | goto no_context; | 317 | goto no_context; |
321 | } | 318 | } |
322 | 319 | ||
323 | void update_mmu_cache(struct vm_area_struct * vma, | ||
324 | unsigned long address, pte_t pte) | ||
325 | { | ||
326 | /* | ||
327 | * This appears to get called once for every pte entry that gets | ||
328 | * established => I don't think it's efficient to try refilling the | ||
329 | * TLBs with the pages - some may not get accessed even. Also, for | ||
330 | * executable pages, it is impossible to determine reliably here which | ||
331 | * TLB they should be mapped into (or both even). | ||
332 | * | ||
333 | * So, just do nothing here and handle faults on demand. In the | ||
334 | * TLBMISS handling case, the refill is now done anyway after the pte | ||
335 | * has been fixed up, so that deals with most useful cases. | ||
336 | */ | ||
337 | } | ||
338 | |||
339 | void local_flush_tlb_one(unsigned long asid, unsigned long page) | 320 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
340 | { | 321 | { |
341 | unsigned long long match, pteh=0, lpage; | 322 | unsigned long long match, pteh=0, lpage; |
@@ -344,7 +325,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) | |||
344 | /* | 325 | /* |
345 | * Sign-extend based on neff. | 326 | * Sign-extend based on neff. |
346 | */ | 327 | */ |
347 | lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; | 328 | lpage = neff_sign_extend(page); |
348 | match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; | 329 | match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; |
349 | match |= lpage; | 330 | match |= lpage; |
350 | 331 | ||
@@ -473,3 +454,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
473 | /* FIXME: Optimize this later.. */ | 454 | /* FIXME: Optimize this later.. */ |
474 | flush_tlb_all(); | 455 | flush_tlb_all(); |
475 | } | 456 | } |
457 | |||
458 | void __flush_tlb_global(void) | ||
459 | { | ||
460 | flush_tlb_all(); | ||
461 | } | ||
462 | |||
463 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | ||
464 | { | ||
465 | } | ||