aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/mm/fault.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2018-03-07 15:36:19 -0500
committerArnd Bergmann <arnd@arndb.de>2018-03-09 17:20:00 -0500
commit553b085c2075f6a4a2591108554f830fa61e881f (patch)
tree68d63911f2c12e0fb9fa23498df9300442a88f92 /arch/m32r/mm/fault.c
parentfd8773f9f544955f6f47dc2ac3ab85ad64376b7f (diff)
arch: remove m32r port
The Mitsubishi/Renesas m32r architecture has been around for many years, but the Linux port has been obsolete for a very long time as well, with the last significant updates done for linux-2.6.14. While some m32r microcontrollers are still being marketed by Renesas, those are apparently no longer possible to support, mainly due to the lack of an external memory interface. Hirokazu Takata was the maintainer until the architecture got marked Orphaned in 2014. Link: http://www.linux-m32r.org/ Link: https://www.renesas.com/en-eu/products/microcontrollers-microprocessors/m32r.html Cc: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/m32r/mm/fault.c')
-rw-r--r--arch/m32r/mm/fault.c550
1 files changed, 0 insertions, 550 deletions
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
deleted file mode 100644
index 46d9a5ca0e3a..000000000000
--- a/arch/m32r/mm/fault.c
+++ /dev/null
@@ -1,550 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m32r/mm/fault.c
4 *
5 * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo
6 * Copyright (c) 2004 Naoto Sugai, NIIBE Yutaka
7 *
8 * Some code taken from i386 version.
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/ptrace.h>
19#include <linux/mman.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/init.h>
24#include <linux/tty.h>
25#include <linux/vt_kern.h> /* For unblank_screen() */
26#include <linux/highmem.h>
27#include <linux/extable.h>
28#include <linux/uaccess.h>
29
30#include <asm/m32r.h>
31#include <asm/hardirq.h>
32#include <asm/mmu_context.h>
33#include <asm/tlbflush.h>
34
35extern void die(const char *, struct pt_regs *, long);
36
37#ifndef CONFIG_SMP
38asmlinkage unsigned int tlb_entry_i_dat;
39asmlinkage unsigned int tlb_entry_d_dat;
40#define tlb_entry_i tlb_entry_i_dat
41#define tlb_entry_d tlb_entry_d_dat
42#else
43unsigned int tlb_entry_i_dat[NR_CPUS];
44unsigned int tlb_entry_d_dat[NR_CPUS];
45#define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]
46#define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]
47#endif
48
49extern void init_tlb(void);
50
51/*======================================================================*
52 * do_page_fault()
53 *======================================================================*
54 * This routine handles page faults. It determines the address,
55 * and the problem, and then passes it off to one of the appropriate
56 * routines.
57 *
58 * ARGUMENT:
59 * regs : M32R SP reg.
60 * error_code : See below
61 * address : M32R MMU MDEVA reg. (Operand ACE)
62 * : M32R BPC reg. (Instruction ACE)
63 *
64 * error_code :
65 * bit 0 == 0 means no page found, 1 means protection fault
66 * bit 1 == 0 means read, 1 means write
67 * bit 2 == 0 means kernel, 1 means user-mode
68 * bit 3 == 0 means data, 1 means instruction
69 *======================================================================*/
70#define ACE_PROTECTION 1
71#define ACE_WRITE 2
72#define ACE_USERMODE 4
73#define ACE_INSTRUCTION 8
74
75asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
76 unsigned long address)
77{
78 struct task_struct *tsk;
79 struct mm_struct *mm;
80 struct vm_area_struct * vma;
81 unsigned long page, addr;
82 unsigned long flags = 0;
83 int fault;
84 siginfo_t info;
85
86 /*
87 * If BPSW IE bit enable --> set PSW IE bit
88 */
89 if (regs->psw & M32R_PSW_BIE)
90 local_irq_enable();
91
92 tsk = current;
93
94 info.si_code = SEGV_MAPERR;
95
96 /*
97 * We fault-in kernel-space virtual memory on-demand. The
98 * 'reference' page table is init_mm.pgd.
99 *
100 * NOTE! We MUST NOT take any locks for this case. We may
101 * be in an interrupt or a critical region, and should
102 * only copy the information from the master page table,
103 * nothing more.
104 *
105 * This verifies that the fault happens in kernel space
106 * (error_code & ACE_USERMODE) == 0, and that the fault was not a
107 * protection error (error_code & ACE_PROTECTION) == 0.
108 */
109 if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
110 goto vmalloc_fault;
111
112 mm = tsk->mm;
113
114 /*
115 * If we're in an interrupt or have no user context or have pagefaults
116 * disabled then we must not take the fault.
117 */
118 if (faulthandler_disabled() || !mm)
119 goto bad_area_nosemaphore;
120
121 if (error_code & ACE_USERMODE)
122 flags |= FAULT_FLAG_USER;
123
124 /* When running in the kernel we expect faults to occur only to
125 * addresses in user space. All other faults represent errors in the
126 * kernel and should generate an OOPS. Unfortunately, in the case of an
127 * erroneous fault occurring in a code path which already holds mmap_sem
128 * we will deadlock attempting to validate the fault against the
129 * address space. Luckily the kernel only validly references user
130 * space from well defined areas of code, which are listed in the
131 * exceptions table.
132 *
133 * As the vast majority of faults will be valid we will only perform
134 * the source reference check when there is a possibility of a deadlock.
135 * Attempt to lock the address space, if we cannot we then validate the
136 * source. If this is invalid we can skip the address space check,
137 * thus avoiding the deadlock.
138 */
139 if (!down_read_trylock(&mm->mmap_sem)) {
140 if ((error_code & ACE_USERMODE) == 0 &&
141 !search_exception_tables(regs->psw))
142 goto bad_area_nosemaphore;
143 down_read(&mm->mmap_sem);
144 }
145
146 vma = find_vma(mm, address);
147 if (!vma)
148 goto bad_area;
149 if (vma->vm_start <= address)
150 goto good_area;
151 if (!(vma->vm_flags & VM_GROWSDOWN))
152 goto bad_area;
153
154 if (error_code & ACE_USERMODE) {
155 /*
156 * accessing the stack below "spu" is always a bug.
157 * The "+ 4" is there due to the push instruction
158 * doing pre-decrement on the stack and that
159 * doesn't show up until later..
160 */
161 if (address + 4 < regs->spu)
162 goto bad_area;
163 }
164
165 if (expand_stack(vma, address))
166 goto bad_area;
167/*
168 * Ok, we have a good vm_area for this memory access, so
169 * we can handle it..
170 */
171good_area:
172 info.si_code = SEGV_ACCERR;
173 switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
174 default: /* 3: write, present */
175 /* fall through */
176 case ACE_WRITE: /* write, not present */
177 if (!(vma->vm_flags & VM_WRITE))
178 goto bad_area;
179 flags |= FAULT_FLAG_WRITE;
180 break;
181 case ACE_PROTECTION: /* read, present */
182 case 0: /* read, not present */
183 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
184 goto bad_area;
185 }
186
187 /*
188 * For instruction access exception, check if the area is executable
189 */
190 if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
191 goto bad_area;
192
193 /*
194 * If for any reason at all we couldn't handle the fault,
195 * make sure we exit gracefully rather than endlessly redo
196 * the fault.
197 */
198 addr = (address & PAGE_MASK);
199 set_thread_fault_code(error_code);
200 fault = handle_mm_fault(vma, addr, flags);
201 if (unlikely(fault & VM_FAULT_ERROR)) {
202 if (fault & VM_FAULT_OOM)
203 goto out_of_memory;
204 else if (fault & VM_FAULT_SIGSEGV)
205 goto bad_area;
206 else if (fault & VM_FAULT_SIGBUS)
207 goto do_sigbus;
208 BUG();
209 }
210 if (fault & VM_FAULT_MAJOR)
211 tsk->maj_flt++;
212 else
213 tsk->min_flt++;
214 set_thread_fault_code(0);
215 up_read(&mm->mmap_sem);
216 return;
217
218/*
219 * Something tried to access memory that isn't in our memory map..
220 * Fix it, but check if it's kernel or user first..
221 */
222bad_area:
223 up_read(&mm->mmap_sem);
224
225bad_area_nosemaphore:
226 /* User mode accesses just cause a SIGSEGV */
227 if (error_code & ACE_USERMODE) {
228 tsk->thread.address = address;
229 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
230 tsk->thread.trap_no = 14;
231 info.si_signo = SIGSEGV;
232 info.si_errno = 0;
233 /* info.si_code has been set above */
234 info.si_addr = (void __user *)address;
235 force_sig_info(SIGSEGV, &info, tsk);
236 return;
237 }
238
239no_context:
240 /* Are we prepared to handle this kernel fault? */
241 if (fixup_exception(regs))
242 return;
243
244/*
245 * Oops. The kernel tried to access some bad page. We'll have to
246 * terminate things with extreme prejudice.
247 */
248
249 bust_spinlocks(1);
250
251 if (address < PAGE_SIZE)
252 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
253 else
254 printk(KERN_ALERT "Unable to handle kernel paging request");
255 printk(" at virtual address %08lx\n",address);
256 printk(KERN_ALERT " printing bpc:\n");
257 printk("%08lx\n", regs->bpc);
258 page = *(unsigned long *)MPTB;
259 page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
260 printk(KERN_ALERT "*pde = %08lx\n", page);
261 if (page & _PAGE_PRESENT) {
262 page &= PAGE_MASK;
263 address &= 0x003ff000;
264 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
265 printk(KERN_ALERT "*pte = %08lx\n", page);
266 }
267 die("Oops", regs, error_code);
268 bust_spinlocks(0);
269 do_exit(SIGKILL);
270
271/*
272 * We ran out of memory, or some other thing happened to us that made
273 * us unable to handle the page fault gracefully.
274 */
275out_of_memory:
276 up_read(&mm->mmap_sem);
277 if (!(error_code & ACE_USERMODE))
278 goto no_context;
279 pagefault_out_of_memory();
280 return;
281
282do_sigbus:
283 up_read(&mm->mmap_sem);
284
285 /* Kernel mode? Handle exception or die */
286 if (!(error_code & ACE_USERMODE))
287 goto no_context;
288
289 tsk->thread.address = address;
290 tsk->thread.error_code = error_code;
291 tsk->thread.trap_no = 14;
292 info.si_signo = SIGBUS;
293 info.si_errno = 0;
294 info.si_code = BUS_ADRERR;
295 info.si_addr = (void __user *)address;
296 force_sig_info(SIGBUS, &info, tsk);
297 return;
298
299vmalloc_fault:
300 {
301 /*
302 * Synchronize this task's top level page-table
303 * with the 'reference' page table.
304 *
305 * Do _not_ use "tsk" here. We might be inside
306 * an interrupt in the middle of a task switch..
307 */
308 int offset = pgd_index(address);
309 pgd_t *pgd, *pgd_k;
310 pmd_t *pmd, *pmd_k;
311 pte_t *pte_k;
312
313 pgd = (pgd_t *)*(unsigned long *)MPTB;
314 pgd = offset + (pgd_t *)pgd;
315 pgd_k = init_mm.pgd + offset;
316
317 if (!pgd_present(*pgd_k))
318 goto no_context;
319
320 /*
321 * set_pgd(pgd, *pgd_k); here would be useless on PAE
322 * and redundant with the set_pmd() on non-PAE.
323 */
324
325 pmd = pmd_offset(pgd, address);
326 pmd_k = pmd_offset(pgd_k, address);
327 if (!pmd_present(*pmd_k))
328 goto no_context;
329 set_pmd(pmd, *pmd_k);
330
331 pte_k = pte_offset_kernel(pmd_k, address);
332 if (!pte_present(*pte_k))
333 goto no_context;
334
335 addr = (address & PAGE_MASK);
336 set_thread_fault_code(error_code);
337 update_mmu_cache(NULL, addr, pte_k);
338 set_thread_fault_code(0);
339 return;
340 }
341}
342
343/*======================================================================*
344 * update_mmu_cache()
345 *======================================================================*/
346#define TLB_MASK (NR_TLB_ENTRIES - 1)
347#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
348#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
349void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
350 pte_t *ptep)
351{
352 volatile unsigned long *entry1, *entry2;
353 unsigned long pte_data, flags;
354 unsigned int *entry_dat;
355 int inst = get_thread_fault_code() & ACE_INSTRUCTION;
356 int i;
357
358 /* Ptrace may call this routine. */
359 if (vma && current->active_mm != vma->vm_mm)
360 return;
361
362 local_irq_save(flags);
363
364 vaddr = (vaddr & PAGE_MASK) | get_asid();
365
366 pte_data = pte_val(*ptep);
367
368#ifdef CONFIG_CHIP_OPSP
369 entry1 = (unsigned long *)ITLB_BASE;
370 for (i = 0; i < NR_TLB_ENTRIES; i++) {
371 if (*entry1++ == vaddr) {
372 set_tlb_data(entry1, pte_data);
373 break;
374 }
375 entry1++;
376 }
377 entry2 = (unsigned long *)DTLB_BASE;
378 for (i = 0; i < NR_TLB_ENTRIES; i++) {
379 if (*entry2++ == vaddr) {
380 set_tlb_data(entry2, pte_data);
381 break;
382 }
383 entry2++;
384 }
385#else
386 /*
387 * Update TLB entries
388 * entry1: ITLB entry address
389 * entry2: DTLB entry address
390 */
391 __asm__ __volatile__ (
392 "seth %0, #high(%4) \n\t"
393 "st %2, @(%5, %0) \n\t"
394 "ldi %1, #1 \n\t"
395 "st %1, @(%6, %0) \n\t"
396 "add3 r4, %0, %7 \n\t"
397 ".fillinsn \n"
398 "1: \n\t"
399 "ld %1, @(%6, %0) \n\t"
400 "bnez %1, 1b \n\t"
401 "ld %0, @r4+ \n\t"
402 "ld %1, @r4 \n\t"
403 "st %3, @+%0 \n\t"
404 "st %3, @+%1 \n\t"
405 : "=&r" (entry1), "=&r" (entry2)
406 : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE),
407 "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset)
408 : "r4", "memory"
409 );
410#endif
411
412 if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END))
413 goto notfound;
414
415found:
416 local_irq_restore(flags);
417
418 return;
419
420 /* Valid entry not found */
421notfound:
422 /*
423 * Update ITLB or DTLB entry
424 * entry1: TLB entry address
425 * entry2: TLB base address
426 */
427 if (!inst) {
428 entry2 = (unsigned long *)DTLB_BASE;
429 entry_dat = &tlb_entry_d;
430 } else {
431 entry2 = (unsigned long *)ITLB_BASE;
432 entry_dat = &tlb_entry_i;
433 }
434 entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1);
435
436 for (i = 0 ; i < NR_TLB_ENTRIES ; i++) {
437 if (!(entry1[1] & 2)) /* Valid bit check */
438 break;
439
440 if (entry1 != entry2)
441 entry1 -= 2;
442 else
443 entry1 += TLB_MASK << 1;
444 }
445
446 if (i >= NR_TLB_ENTRIES) { /* Empty entry not found */
447 entry1 = entry2 + (*entry_dat << 1);
448 *entry_dat = (*entry_dat + 1) & TLB_MASK;
449 }
450 *entry1++ = vaddr; /* Set TLB tag */
451 set_tlb_data(entry1, pte_data);
452
453 goto found;
454}
455
456/*======================================================================*
457 * flush_tlb_page() : flushes one page
458 *======================================================================*/
459void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
460{
461 if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) {
462 unsigned long flags;
463
464 local_irq_save(flags);
465 page &= PAGE_MASK;
466 page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK);
467 __flush_tlb_page(page);
468 local_irq_restore(flags);
469 }
470}
471
472/*======================================================================*
473 * flush_tlb_range() : flushes a range of pages
474 *======================================================================*/
475void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
476 unsigned long end)
477{
478 struct mm_struct *mm;
479
480 mm = vma->vm_mm;
481 if (mm_context(mm) != NO_CONTEXT) {
482 unsigned long flags;
483 int size;
484
485 local_irq_save(flags);
486 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
487 if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */
488 mm_context(mm) = NO_CONTEXT;
489 if (mm == current->mm)
490 activate_context(mm);
491 } else {
492 unsigned long asid;
493
494 asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK;
495 start &= PAGE_MASK;
496 end += (PAGE_SIZE - 1);
497 end &= PAGE_MASK;
498
499 start |= asid;
500 end |= asid;
501 while (start < end) {
502 __flush_tlb_page(start);
503 start += PAGE_SIZE;
504 }
505 }
506 local_irq_restore(flags);
507 }
508}
509
510/*======================================================================*
511 * flush_tlb_mm() : flushes the specified mm context TLB's
512 *======================================================================*/
513void local_flush_tlb_mm(struct mm_struct *mm)
514{
515 /* Invalidate all TLB of this process. */
516 /* Instead of invalidating each TLB, we get new MMU context. */
517 if (mm_context(mm) != NO_CONTEXT) {
518 unsigned long flags;
519
520 local_irq_save(flags);
521 mm_context(mm) = NO_CONTEXT;
522 if (mm == current->mm)
523 activate_context(mm);
524 local_irq_restore(flags);
525 }
526}
527
528/*======================================================================*
529 * flush_tlb_all() : flushes all processes TLBs
530 *======================================================================*/
531void local_flush_tlb_all(void)
532{
533 unsigned long flags;
534
535 local_irq_save(flags);
536 __flush_tlb_all();
537 local_irq_restore(flags);
538}
539
540/*======================================================================*
541 * init_mmu()
542 *======================================================================*/
543void __init init_mmu(void)
544{
545 tlb_entry_i = 0;
546 tlb_entry_d = 0;
547 mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
548 set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
549 *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir;
550}