aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/fault_32.c')
-rw-r--r--arch/sh/mm/fault_32.c196
1 files changed, 121 insertions, 75 deletions
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 71925946f1e1..781b413ff82d 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -2,7 +2,7 @@
2 * Page fault handler for SH with an MMU. 2 * Page fault handler for SH with an MMU.
3 * 3 *
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2008 Paul Mundt 5 * Copyright (C) 2003 - 2009 Paul Mundt
6 * 6 *
7 * Based on linux/arch/i386/mm/fault.c: 7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
@@ -25,18 +25,91 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
25{ 25{
26 int ret = 0; 26 int ret = 0;
27 27
28#ifdef CONFIG_KPROBES 28 if (kprobes_built_in() && !user_mode(regs)) {
29 if (!user_mode(regs)) {
30 preempt_disable(); 29 preempt_disable();
31 if (kprobe_running() && kprobe_fault_handler(regs, trap)) 30 if (kprobe_running() && kprobe_fault_handler(regs, trap))
32 ret = 1; 31 ret = 1;
33 preempt_enable(); 32 preempt_enable();
34 } 33 }
35#endif
36 34
37 return ret; 35 return ret;
38} 36}
39 37
38static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
39{
40 unsigned index = pgd_index(address);
41 pgd_t *pgd_k;
42 pud_t *pud, *pud_k;
43 pmd_t *pmd, *pmd_k;
44
45 pgd += index;
46 pgd_k = init_mm.pgd + index;
47
48 if (!pgd_present(*pgd_k))
49 return NULL;
50
51 pud = pud_offset(pgd, address);
52 pud_k = pud_offset(pgd_k, address);
53 if (!pud_present(*pud_k))
54 return NULL;
55
56 pmd = pmd_offset(pud, address);
57 pmd_k = pmd_offset(pud_k, address);
58 if (!pmd_present(*pmd_k))
59 return NULL;
60
61 if (!pmd_present(*pmd))
62 set_pmd(pmd, *pmd_k);
63 else {
64 /*
65 * The page tables are fully synchronised so there must
66 * be another reason for the fault. Return NULL here to
67 * signal that we have not taken care of the fault.
68 */
69 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
70 return NULL;
71 }
72
73 return pmd_k;
74}
75
76/*
77 * Handle a fault on the vmalloc or module mapping area
78 */
79static noinline int vmalloc_fault(unsigned long address)
80{
81 pgd_t *pgd_k;
82 pmd_t *pmd_k;
83 pte_t *pte_k;
84
85 /* Make sure we are in vmalloc/module/P3 area: */
86 if (!(address >= VMALLOC_START && address < P3_ADDR_MAX))
87 return -1;
88
89 /*
90 * Synchronize this task's top level page-table
91 * with the 'reference' page table.
92 *
93 * Do _not_ use "current" here. We might be inside
94 * an interrupt in the middle of a task switch..
95 */
96 pgd_k = get_TTB();
97 pmd_k = vmalloc_sync_one(pgd_k, address);
98 if (!pmd_k)
99 return -1;
100
101 pte_k = pte_offset_kernel(pmd_k, address);
102 if (!pte_present(*pte_k))
103 return -1;
104
105 return 0;
106}
107
108static int fault_in_kernel_space(unsigned long address)
109{
110 return address >= TASK_SIZE;
111}
112
40/* 113/*
41 * This routine handles page faults. It determines the address, 114 * This routine handles page faults. It determines the address,
42 * and the problem, and then passes it off to one of the appropriate 115 * and the problem, and then passes it off to one of the appropriate
@@ -46,6 +119,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
46 unsigned long writeaccess, 119 unsigned long writeaccess,
47 unsigned long address) 120 unsigned long address)
48{ 121{
122 unsigned long vec;
49 struct task_struct *tsk; 123 struct task_struct *tsk;
50 struct mm_struct *mm; 124 struct mm_struct *mm;
51 struct vm_area_struct * vma; 125 struct vm_area_struct * vma;
@@ -53,59 +127,30 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
53 int fault; 127 int fault;
54 siginfo_t info; 128 siginfo_t info;
55 129
56 /*
57 * We don't bother with any notifier callbacks here, as they are
58 * all handled through the __do_page_fault() fast-path.
59 */
60
61 tsk = current; 130 tsk = current;
131 mm = tsk->mm;
62 si_code = SEGV_MAPERR; 132 si_code = SEGV_MAPERR;
133 vec = lookup_exception_vector();
63 134
64 if (unlikely(address >= TASK_SIZE)) { 135 /*
65 /* 136 * We fault-in kernel-space virtual memory on-demand. The
66 * Synchronize this task's top level page-table 137 * 'reference' page table is init_mm.pgd.
67 * with the 'reference' page table. 138 *
68 * 139 * NOTE! We MUST NOT take any locks for this case. We may
69 * Do _not_ use "tsk" here. We might be inside 140 * be in an interrupt or a critical region, and should
70 * an interrupt in the middle of a task switch.. 141 * only copy the information from the master page table,
71 */ 142 * nothing more.
72 int offset = pgd_index(address); 143 */
73 pgd_t *pgd, *pgd_k; 144 if (unlikely(fault_in_kernel_space(address))) {
74 pud_t *pud, *pud_k; 145 if (vmalloc_fault(address) >= 0)
75 pmd_t *pmd, *pmd_k;
76
77 pgd = get_TTB() + offset;
78 pgd_k = swapper_pg_dir + offset;
79
80 if (!pgd_present(*pgd)) {
81 if (!pgd_present(*pgd_k))
82 goto bad_area_nosemaphore;
83 set_pgd(pgd, *pgd_k);
84 return; 146 return;
85 } 147 if (notify_page_fault(regs, vec))
86
87 pud = pud_offset(pgd, address);
88 pud_k = pud_offset(pgd_k, address);
89
90 if (!pud_present(*pud)) {
91 if (!pud_present(*pud_k))
92 goto bad_area_nosemaphore;
93 set_pud(pud, *pud_k);
94 return; 148 return;
95 }
96 149
97 pmd = pmd_offset(pud, address); 150 goto bad_area_nosemaphore;
98 pmd_k = pmd_offset(pud_k, address);
99 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
100 goto bad_area_nosemaphore;
101 set_pmd(pmd, *pmd_k);
102
103 return;
104 } 151 }
105 152
106 mm = tsk->mm; 153 if (unlikely(notify_page_fault(regs, vec)))
107
108 if (unlikely(notify_page_fault(regs, lookup_exception_vector())))
109 return; 154 return;
110 155
111 /* Only enable interrupts if they were on before the fault */ 156 /* Only enable interrupts if they were on before the fault */
@@ -115,8 +160,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
115 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 160 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
116 161
117 /* 162 /*
118 * If we're in an interrupt or have no user 163 * If we're in an interrupt, have no user context or are running
119 * context, we must not take the fault.. 164 * in an atomic region then we must not take the fault:
120 */ 165 */
121 if (in_atomic() || !mm) 166 if (in_atomic() || !mm)
122 goto no_context; 167 goto no_context;
@@ -132,10 +177,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
132 goto bad_area; 177 goto bad_area;
133 if (expand_stack(vma, address)) 178 if (expand_stack(vma, address))
134 goto bad_area; 179 goto bad_area;
135/* 180
136 * Ok, we have a good vm_area for this memory access, so 181 /*
137 * we can handle it.. 182 * Ok, we have a good vm_area for this memory access, so
138 */ 183 * we can handle it..
184 */
139good_area: 185good_area:
140 si_code = SEGV_ACCERR; 186 si_code = SEGV_ACCERR;
141 if (writeaccess) { 187 if (writeaccess) {
@@ -173,10 +219,10 @@ survive:
173 up_read(&mm->mmap_sem); 219 up_read(&mm->mmap_sem);
174 return; 220 return;
175 221
176/* 222 /*
177 * Something tried to access memory that isn't in our memory map.. 223 * Something tried to access memory that isn't in our memory map..
178 * Fix it, but check if it's kernel or user first.. 224 * Fix it, but check if it's kernel or user first..
179 */ 225 */
180bad_area: 226bad_area:
181 up_read(&mm->mmap_sem); 227 up_read(&mm->mmap_sem);
182 228
@@ -272,16 +318,15 @@ do_sigbus:
272/* 318/*
273 * Called with interrupts disabled. 319 * Called with interrupts disabled.
274 */ 320 */
275asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, 321asmlinkage int __kprobes
276 unsigned long writeaccess, 322handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
277 unsigned long address) 323 unsigned long address)
278{ 324{
279 pgd_t *pgd; 325 pgd_t *pgd;
280 pud_t *pud; 326 pud_t *pud;
281 pmd_t *pmd; 327 pmd_t *pmd;
282 pte_t *pte; 328 pte_t *pte;
283 pte_t entry; 329 pte_t entry;
284 int ret = 1;
285 330
286 /* 331 /*
287 * We don't take page faults for P1, P2, and parts of P4, these 332 * We don't take page faults for P1, P2, and parts of P4, these
@@ -292,40 +337,41 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
292 pgd = pgd_offset_k(address); 337 pgd = pgd_offset_k(address);
293 } else { 338 } else {
294 if (unlikely(address >= TASK_SIZE || !current->mm)) 339 if (unlikely(address >= TASK_SIZE || !current->mm))
295 goto out; 340 return 1;
296 341
297 pgd = pgd_offset(current->mm, address); 342 pgd = pgd_offset(current->mm, address);
298 } 343 }
299 344
300 pud = pud_offset(pgd, address); 345 pud = pud_offset(pgd, address);
301 if (pud_none_or_clear_bad(pud)) 346 if (pud_none_or_clear_bad(pud))
302 goto out; 347 return 1;
303 pmd = pmd_offset(pud, address); 348 pmd = pmd_offset(pud, address);
304 if (pmd_none_or_clear_bad(pmd)) 349 if (pmd_none_or_clear_bad(pmd))
305 goto out; 350 return 1;
306 pte = pte_offset_kernel(pmd, address); 351 pte = pte_offset_kernel(pmd, address);
307 entry = *pte; 352 entry = *pte;
308 if (unlikely(pte_none(entry) || pte_not_present(entry))) 353 if (unlikely(pte_none(entry) || pte_not_present(entry)))
309 goto out; 354 return 1;
310 if (unlikely(writeaccess && !pte_write(entry))) 355 if (unlikely(writeaccess && !pte_write(entry)))
311 goto out; 356 return 1;
312 357
313 if (writeaccess) 358 if (writeaccess)
314 entry = pte_mkdirty(entry); 359 entry = pte_mkdirty(entry);
315 entry = pte_mkyoung(entry); 360 entry = pte_mkyoung(entry);
316 361
362 set_pte(pte, entry);
363
317#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) 364#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
318 /* 365 /*
319 * ITLB is not affected by "ldtlb" instruction. 366 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
320 * So, we need to flush the entry by ourselves. 367 * the case of an initial page write exception, so we need to
368 * flush it in order to avoid potential TLB entry duplication.
321 */ 369 */
322 local_flush_tlb_one(get_asid(), address & PAGE_MASK); 370 if (writeaccess == 2)
371 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
323#endif 372#endif
324 373
325 set_pte(pte, entry);
326 update_mmu_cache(NULL, address, entry); 374 update_mmu_cache(NULL, address, entry);
327 375
328 ret = 0; 376 return 0;
329out:
330 return ret;
331} 377}