diff options
Diffstat (limited to 'arch/sh/mm/fault.c')
-rw-r--r-- | arch/sh/mm/fault.c | 161 |
1 files changed, 62 insertions, 99 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 68663b8f99ae..716ebf568af2 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c | |||
@@ -26,13 +26,19 @@ extern void die(const char *,struct pt_regs *,long); | |||
26 | * and the problem, and then passes it off to one of the appropriate | 26 | * and the problem, and then passes it off to one of the appropriate |
27 | * routines. | 27 | * routines. |
28 | */ | 28 | */ |
29 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | 29 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
30 | unsigned long address) | 30 | unsigned long writeaccess, |
31 | unsigned long address) | ||
31 | { | 32 | { |
32 | struct task_struct *tsk; | 33 | struct task_struct *tsk; |
33 | struct mm_struct *mm; | 34 | struct mm_struct *mm; |
34 | struct vm_area_struct * vma; | 35 | struct vm_area_struct * vma; |
35 | unsigned long page; | 36 | unsigned long page; |
37 | int si_code; | ||
38 | siginfo_t info; | ||
39 | |||
40 | trace_hardirqs_on(); | ||
41 | local_irq_enable(); | ||
36 | 42 | ||
37 | #ifdef CONFIG_SH_KGDB | 43 | #ifdef CONFIG_SH_KGDB |
38 | if (kgdb_nofault && kgdb_bus_err_hook) | 44 | if (kgdb_nofault && kgdb_bus_err_hook) |
@@ -41,6 +47,46 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
41 | 47 | ||
42 | tsk = current; | 48 | tsk = current; |
43 | mm = tsk->mm; | 49 | mm = tsk->mm; |
50 | si_code = SEGV_MAPERR; | ||
51 | |||
52 | if (unlikely(address >= TASK_SIZE)) { | ||
53 | /* | ||
54 | * Synchronize this task's top level page-table | ||
55 | * with the 'reference' page table. | ||
56 | * | ||
57 | * Do _not_ use "tsk" here. We might be inside | ||
58 | * an interrupt in the middle of a task switch.. | ||
59 | */ | ||
60 | int offset = pgd_index(address); | ||
61 | pgd_t *pgd, *pgd_k; | ||
62 | pud_t *pud, *pud_k; | ||
63 | pmd_t *pmd, *pmd_k; | ||
64 | |||
65 | pgd = get_TTB() + offset; | ||
66 | pgd_k = swapper_pg_dir + offset; | ||
67 | |||
68 | /* This will never happen with the folded page table. */ | ||
69 | if (!pgd_present(*pgd)) { | ||
70 | if (!pgd_present(*pgd_k)) | ||
71 | goto bad_area_nosemaphore; | ||
72 | set_pgd(pgd, *pgd_k); | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | pud = pud_offset(pgd, address); | ||
77 | pud_k = pud_offset(pgd_k, address); | ||
78 | if (pud_present(*pud) || !pud_present(*pud_k)) | ||
79 | goto bad_area_nosemaphore; | ||
80 | set_pud(pud, *pud_k); | ||
81 | |||
82 | pmd = pmd_offset(pud, address); | ||
83 | pmd_k = pmd_offset(pud_k, address); | ||
84 | if (pmd_present(*pmd) || !pmd_present(*pmd_k)) | ||
85 | goto bad_area_nosemaphore; | ||
86 | set_pmd(pmd, *pmd_k); | ||
87 | |||
88 | return; | ||
89 | } | ||
44 | 90 | ||
45 | /* | 91 | /* |
46 | * If we're in an interrupt or have no user | 92 | * If we're in an interrupt or have no user |
@@ -65,6 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
65 | * we can handle it.. | 111 | * we can handle it.. |
66 | */ | 112 | */ |
67 | good_area: | 113 | good_area: |
114 | si_code = SEGV_ACCERR; | ||
68 | if (writeaccess) { | 115 | if (writeaccess) { |
69 | if (!(vma->vm_flags & VM_WRITE)) | 116 | if (!(vma->vm_flags & VM_WRITE)) |
70 | goto bad_area; | 117 | goto bad_area; |
@@ -104,10 +151,13 @@ survive: | |||
104 | bad_area: | 151 | bad_area: |
105 | up_read(&mm->mmap_sem); | 152 | up_read(&mm->mmap_sem); |
106 | 153 | ||
154 | bad_area_nosemaphore: | ||
107 | if (user_mode(regs)) { | 155 | if (user_mode(regs)) { |
108 | tsk->thread.address = address; | 156 | info.si_signo = SIGSEGV; |
109 | tsk->thread.error_code = writeaccess; | 157 | info.si_errno = 0; |
110 | force_sig(SIGSEGV, tsk); | 158 | info.si_code = si_code; |
159 | info.si_addr = (void *) address; | ||
160 | force_sig_info(SIGSEGV, &info, tsk); | ||
111 | return; | 161 | return; |
112 | } | 162 | } |
113 | 163 | ||
@@ -127,11 +177,9 @@ no_context: | |||
127 | printk(KERN_ALERT "Unable to handle kernel paging request"); | 177 | printk(KERN_ALERT "Unable to handle kernel paging request"); |
128 | printk(" at virtual address %08lx\n", address); | 178 | printk(" at virtual address %08lx\n", address); |
129 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); | 179 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); |
130 | asm volatile("mov.l %1, %0" | 180 | page = (unsigned long)get_TTB(); |
131 | : "=r" (page) | ||
132 | : "m" (__m(MMU_TTB))); | ||
133 | if (page) { | 181 | if (page) { |
134 | page = ((unsigned long *) page)[address >> 22]; | 182 | page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; |
135 | printk(KERN_ALERT "*pde = %08lx\n", page); | 183 | printk(KERN_ALERT "*pde = %08lx\n", page); |
136 | if (page & _PAGE_PRESENT) { | 184 | if (page & _PAGE_PRESENT) { |
137 | page &= PAGE_MASK; | 185 | page &= PAGE_MASK; |
@@ -166,98 +214,13 @@ do_sigbus: | |||
166 | * Send a sigbus, regardless of whether we were in kernel | 214 | * Send a sigbus, regardless of whether we were in kernel |
167 | * or user mode. | 215 | * or user mode. |
168 | */ | 216 | */ |
169 | tsk->thread.address = address; | 217 | info.si_signo = SIGBUS; |
170 | tsk->thread.error_code = writeaccess; | 218 | info.si_errno = 0; |
171 | tsk->thread.trap_no = 14; | 219 | info.si_code = BUS_ADRERR; |
172 | force_sig(SIGBUS, tsk); | 220 | info.si_addr = (void *)address; |
221 | force_sig_info(SIGBUS, &info, tsk); | ||
173 | 222 | ||
174 | /* Kernel mode? Handle exceptions or die */ | 223 | /* Kernel mode? Handle exceptions or die */ |
175 | if (!user_mode(regs)) | 224 | if (!user_mode(regs)) |
176 | goto no_context; | 225 | goto no_context; |
177 | } | 226 | } |
178 | |||
179 | #ifdef CONFIG_SH_STORE_QUEUES | ||
180 | /* | ||
181 | * This is a special case for the SH-4 store queues, as pages for this | ||
182 | * space still need to be faulted in before it's possible to flush the | ||
183 | * store queue cache for writeout to the remapped region. | ||
184 | */ | ||
185 | #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) | ||
186 | #else | ||
187 | #define P3_ADDR_MAX P4SEG | ||
188 | #endif | ||
189 | |||
190 | /* | ||
191 | * Called with interrupts disabled. | ||
192 | */ | ||
193 | asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | ||
194 | unsigned long writeaccess, | ||
195 | unsigned long address) | ||
196 | { | ||
197 | pgd_t *pgd; | ||
198 | pud_t *pud; | ||
199 | pmd_t *pmd; | ||
200 | pte_t *pte; | ||
201 | pte_t entry; | ||
202 | struct mm_struct *mm = current->mm; | ||
203 | spinlock_t *ptl; | ||
204 | int ret = 1; | ||
205 | |||
206 | #ifdef CONFIG_SH_KGDB | ||
207 | if (kgdb_nofault && kgdb_bus_err_hook) | ||
208 | kgdb_bus_err_hook(); | ||
209 | #endif | ||
210 | |||
211 | /* | ||
212 | * We don't take page faults for P1, P2, and parts of P4, these | ||
213 | * are always mapped, whether it be due to legacy behaviour in | ||
214 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | ||
215 | */ | ||
216 | if (address >= P3SEG && address < P3_ADDR_MAX) { | ||
217 | pgd = pgd_offset_k(address); | ||
218 | mm = NULL; | ||
219 | } else { | ||
220 | if (unlikely(address >= TASK_SIZE || !mm)) | ||
221 | return 1; | ||
222 | |||
223 | pgd = pgd_offset(mm, address); | ||
224 | } | ||
225 | |||
226 | pud = pud_offset(pgd, address); | ||
227 | if (pud_none_or_clear_bad(pud)) | ||
228 | return 1; | ||
229 | pmd = pmd_offset(pud, address); | ||
230 | if (pmd_none_or_clear_bad(pmd)) | ||
231 | return 1; | ||
232 | |||
233 | if (mm) | ||
234 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
235 | else | ||
236 | pte = pte_offset_kernel(pmd, address); | ||
237 | |||
238 | entry = *pte; | ||
239 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | ||
240 | goto unlock; | ||
241 | if (unlikely(writeaccess && !pte_write(entry))) | ||
242 | goto unlock; | ||
243 | |||
244 | if (writeaccess) | ||
245 | entry = pte_mkdirty(entry); | ||
246 | entry = pte_mkyoung(entry); | ||
247 | |||
248 | #ifdef CONFIG_CPU_SH4 | ||
249 | /* | ||
250 | * ITLB is not affected by "ldtlb" instruction. | ||
251 | * So, we need to flush the entry by ourselves. | ||
252 | */ | ||
253 | __flush_tlb_page(get_asid(), address & PAGE_MASK); | ||
254 | #endif | ||
255 | |||
256 | set_pte(pte, entry); | ||
257 | update_mmu_cache(NULL, address, entry); | ||
258 | ret = 0; | ||
259 | unlock: | ||
260 | if (mm) | ||
261 | pte_unmap_unlock(pte, ptl); | ||
262 | return ret; | ||
263 | } | ||