diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 02:33:28 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 02:33:28 -0400 |
commit | 28080329ede3e4110bb14306b4529a5b9a2ce163 (patch) | |
tree | 6c4d953750eb8aa99077cb0315af060b4dcffc33 /arch/sh | |
parent | e45af0e083dfc5d49dbad6965b9eeb3ac0072d82 (diff) |
sh: Enable shared page fault handler for _32/_64.
This moves the now generic _32 page fault handling code to a shared place
and adapts the _64 implementation to make use of it.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/mm/Makefile | 4 | ||||
-rw-r--r-- | arch/sh/mm/fault.c (renamed from arch/sh/mm/fault_32.c) | 80 | ||||
-rw-r--r-- | arch/sh/mm/tlbex_32.c | 78 | ||||
-rw-r--r-- | arch/sh/mm/tlbex_64.c (renamed from arch/sh/mm/fault_64.c) | 2 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 243 |
5 files changed, 93 insertions, 314 deletions
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 2228c8cee4d6..ba819108631b 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -15,8 +15,8 @@ cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o | |||
15 | obj-y += $(cacheops-y) | 15 | obj-y += $(cacheops-y) |
16 | 16 | ||
17 | mmu-y := nommu.o extable_32.o | 17 | mmu-y := nommu.o extable_32.o |
18 | mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o gup.o \ | 18 | mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o gup.o ioremap.o kmap.o \ |
19 | ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o | 19 | pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o |
20 | 20 | ||
21 | obj-y += $(mmu-y) | 21 | obj-y += $(mmu-y) |
22 | 22 | ||
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault.c index 39e291c6b352..16799f920f90 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault.c | |||
@@ -66,7 +66,7 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) | |||
66 | printk(KERN_ALERT "pgd = %p\n", pgd); | 66 | printk(KERN_ALERT "pgd = %p\n", pgd); |
67 | pgd += pgd_index(addr); | 67 | pgd += pgd_index(addr); |
68 | printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, | 68 | printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, |
69 | sizeof(*pgd) * 2, (u64)pgd_val(*pgd)); | 69 | (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd)); |
70 | 70 | ||
71 | do { | 71 | do { |
72 | pud_t *pud; | 72 | pud_t *pud; |
@@ -83,7 +83,7 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) | |||
83 | 83 | ||
84 | pud = pud_offset(pgd, addr); | 84 | pud = pud_offset(pgd, addr); |
85 | if (PTRS_PER_PUD != 1) | 85 | if (PTRS_PER_PUD != 1) |
86 | printk(", *pud=%0*Lx", sizeof(*pud) * 2, | 86 | printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2), |
87 | (u64)pud_val(*pud)); | 87 | (u64)pud_val(*pud)); |
88 | 88 | ||
89 | if (pud_none(*pud)) | 89 | if (pud_none(*pud)) |
@@ -96,7 +96,7 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) | |||
96 | 96 | ||
97 | pmd = pmd_offset(pud, addr); | 97 | pmd = pmd_offset(pud, addr); |
98 | if (PTRS_PER_PMD != 1) | 98 | if (PTRS_PER_PMD != 1) |
99 | printk(", *pmd=%0*Lx", sizeof(*pmd) * 2, | 99 | printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2), |
100 | (u64)pmd_val(*pmd)); | 100 | (u64)pmd_val(*pmd)); |
101 | 101 | ||
102 | if (pmd_none(*pmd)) | 102 | if (pmd_none(*pmd)) |
@@ -112,7 +112,8 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) | |||
112 | break; | 112 | break; |
113 | 113 | ||
114 | pte = pte_offset_kernel(pmd, addr); | 114 | pte = pte_offset_kernel(pmd, addr); |
115 | printk(", *pte=%0*Lx", sizeof(*pte) * 2, (u64)pte_val(*pte)); | 115 | printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2), |
116 | (u64)pte_val(*pte)); | ||
116 | } while (0); | 117 | } while (0); |
117 | 118 | ||
118 | printk("\n"); | 119 | printk("\n"); |
@@ -354,15 +355,20 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
354 | return 1; | 355 | return 1; |
355 | } | 356 | } |
356 | 357 | ||
357 | static inline int access_error(int write, struct vm_area_struct *vma) | 358 | static inline int access_error(int error_code, struct vm_area_struct *vma) |
358 | { | 359 | { |
359 | if (write) { | 360 | if (error_code & FAULT_CODE_WRITE) { |
360 | /* write, present and write, not present: */ | 361 | /* write, present and write, not present: */ |
361 | if (unlikely(!(vma->vm_flags & VM_WRITE))) | 362 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
362 | return 1; | 363 | return 1; |
363 | return 0; | 364 | return 0; |
364 | } | 365 | } |
365 | 366 | ||
367 | /* ITLB miss on NX page */ | ||
368 | if (unlikely((error_code & FAULT_CODE_ITLB) && | ||
369 | !(vma->vm_flags & VM_EXEC))) | ||
370 | return 1; | ||
371 | |||
366 | /* read, not present: */ | 372 | /* read, not present: */ |
367 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | 373 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) |
368 | return 1; | 374 | return 1; |
@@ -500,65 +506,3 @@ good_area: | |||
500 | 506 | ||
501 | up_read(&mm->mmap_sem); | 507 | up_read(&mm->mmap_sem); |
502 | } | 508 | } |
503 | |||
504 | /* | ||
505 | * Called with interrupts disabled. | ||
506 | */ | ||
507 | asmlinkage int __kprobes | ||
508 | handle_tlbmiss(struct pt_regs *regs, unsigned long error_code, | ||
509 | unsigned long address) | ||
510 | { | ||
511 | pgd_t *pgd; | ||
512 | pud_t *pud; | ||
513 | pmd_t *pmd; | ||
514 | pte_t *pte; | ||
515 | pte_t entry; | ||
516 | |||
517 | /* | ||
518 | * We don't take page faults for P1, P2, and parts of P4, these | ||
519 | * are always mapped, whether it be due to legacy behaviour in | ||
520 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | ||
521 | */ | ||
522 | if (address >= P3SEG && address < P3_ADDR_MAX) { | ||
523 | pgd = pgd_offset_k(address); | ||
524 | } else { | ||
525 | if (unlikely(address >= TASK_SIZE || !current->mm)) | ||
526 | return 1; | ||
527 | |||
528 | pgd = pgd_offset(current->mm, address); | ||
529 | } | ||
530 | |||
531 | pud = pud_offset(pgd, address); | ||
532 | if (pud_none_or_clear_bad(pud)) | ||
533 | return 1; | ||
534 | pmd = pmd_offset(pud, address); | ||
535 | if (pmd_none_or_clear_bad(pmd)) | ||
536 | return 1; | ||
537 | pte = pte_offset_kernel(pmd, address); | ||
538 | entry = *pte; | ||
539 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | ||
540 | return 1; | ||
541 | if (unlikely(error_code && !pte_write(entry))) | ||
542 | return 1; | ||
543 | |||
544 | if (error_code) | ||
545 | entry = pte_mkdirty(entry); | ||
546 | entry = pte_mkyoung(entry); | ||
547 | |||
548 | set_pte(pte, entry); | ||
549 | |||
550 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) | ||
551 | /* | ||
552 | * SH-4 does not set MMUCR.RC to the corresponding TLB entry in | ||
553 | * the case of an initial page write exception, so we need to | ||
554 | * flush it in order to avoid potential TLB entry duplication. | ||
555 | */ | ||
556 | if (error_code == FAULT_CODE_INITIAL) | ||
557 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | ||
558 | #endif | ||
559 | |||
560 | set_thread_fault_code(error_code); | ||
561 | update_mmu_cache(NULL, address, pte); | ||
562 | |||
563 | return 0; | ||
564 | } | ||
diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c new file mode 100644 index 000000000000..382262dc0c4b --- /dev/null +++ b/arch/sh/mm/tlbex_32.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * TLB miss handler for SH with an MMU. | ||
3 | * | ||
4 | * Copyright (C) 1999 Niibe Yutaka | ||
5 | * Copyright (C) 2003 - 2012 Paul Mundt | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/kprobes.h> | ||
14 | #include <linux/kdebug.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/thread_info.h> | ||
17 | |||
18 | /* | ||
19 | * Called with interrupts disabled. | ||
20 | */ | ||
21 | asmlinkage int __kprobes | ||
22 | handle_tlbmiss(struct pt_regs *regs, unsigned long error_code, | ||
23 | unsigned long address) | ||
24 | { | ||
25 | pgd_t *pgd; | ||
26 | pud_t *pud; | ||
27 | pmd_t *pmd; | ||
28 | pte_t *pte; | ||
29 | pte_t entry; | ||
30 | |||
31 | /* | ||
32 | * We don't take page faults for P1, P2, and parts of P4, these | ||
33 | * are always mapped, whether it be due to legacy behaviour in | ||
34 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | ||
35 | */ | ||
36 | if (address >= P3SEG && address < P3_ADDR_MAX) { | ||
37 | pgd = pgd_offset_k(address); | ||
38 | } else { | ||
39 | if (unlikely(address >= TASK_SIZE || !current->mm)) | ||
40 | return 1; | ||
41 | |||
42 | pgd = pgd_offset(current->mm, address); | ||
43 | } | ||
44 | |||
45 | pud = pud_offset(pgd, address); | ||
46 | if (pud_none_or_clear_bad(pud)) | ||
47 | return 1; | ||
48 | pmd = pmd_offset(pud, address); | ||
49 | if (pmd_none_or_clear_bad(pmd)) | ||
50 | return 1; | ||
51 | pte = pte_offset_kernel(pmd, address); | ||
52 | entry = *pte; | ||
53 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | ||
54 | return 1; | ||
55 | if (unlikely(error_code && !pte_write(entry))) | ||
56 | return 1; | ||
57 | |||
58 | if (error_code) | ||
59 | entry = pte_mkdirty(entry); | ||
60 | entry = pte_mkyoung(entry); | ||
61 | |||
62 | set_pte(pte, entry); | ||
63 | |||
64 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) | ||
65 | /* | ||
66 | * SH-4 does not set MMUCR.RC to the corresponding TLB entry in | ||
67 | * the case of an initial page write exception, so we need to | ||
68 | * flush it in order to avoid potential TLB entry duplication. | ||
69 | */ | ||
70 | if (error_code == FAULT_CODE_INITIAL) | ||
71 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | ||
72 | #endif | ||
73 | |||
74 | set_thread_fault_code(error_code); | ||
75 | update_mmu_cache(NULL, address, pte); | ||
76 | |||
77 | return 0; | ||
78 | } | ||
diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/tlbex_64.c index 33a921962d02..d15b99466508 100644 --- a/arch/sh/mm/fault_64.c +++ b/arch/sh/mm/tlbex_64.c | |||
@@ -246,7 +246,7 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, | |||
246 | tsk = current; | 246 | tsk = current; |
247 | mm = tsk->mm; | 247 | mm = tsk->mm; |
248 | 248 | ||
249 | if ((address >= VMALLOC_START && address < VMALLOC_END)) { | 249 | if (is_vmalloc_addr((void *)address)) { |
250 | if (ssr_md) | 250 | if (ssr_md) |
251 | /* | 251 | /* |
252 | * Process-contexts can never have this address | 252 | * Process-contexts can never have this address |
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 99c5833036be..908167bdfc04 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -28,249 +28,6 @@ | |||
28 | #include <asm/pgalloc.h> | 28 | #include <asm/pgalloc.h> |
29 | #include <asm/mmu_context.h> | 29 | #include <asm/mmu_context.h> |
30 | 30 | ||
31 | static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address) | ||
32 | { | ||
33 | pgd_t *dir; | ||
34 | pud_t *pud; | ||
35 | pmd_t *pmd; | ||
36 | pte_t *pte; | ||
37 | pte_t entry; | ||
38 | |||
39 | dir = pgd_offset(mm, address); | ||
40 | if (pgd_none(*dir)) | ||
41 | return NULL; | ||
42 | |||
43 | pud = pud_offset(dir, address); | ||
44 | if (pud_none(*pud)) | ||
45 | return NULL; | ||
46 | |||
47 | pmd = pmd_offset(pud, address); | ||
48 | if (pmd_none(*pmd)) | ||
49 | return NULL; | ||
50 | |||
51 | pte = pte_offset_kernel(pmd, address); | ||
52 | entry = *pte; | ||
53 | if (pte_none(entry) || !pte_present(entry)) | ||
54 | return NULL; | ||
55 | |||
56 | return pte; | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * This routine handles page faults. It determines the address, | ||
61 | * and the problem, and then passes it off to one of the appropriate | ||
62 | * routines. | ||
63 | */ | ||
64 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, | ||
65 | unsigned long address) | ||
66 | { | ||
67 | struct task_struct *tsk; | ||
68 | struct mm_struct *mm; | ||
69 | struct vm_area_struct * vma; | ||
70 | const struct exception_table_entry *fixup; | ||
71 | int write = error_code & FAULT_CODE_WRITE; | ||
72 | int textaccess = error_code & FAULT_CODE_ITLB; | ||
73 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
74 | (write ? FAULT_FLAG_WRITE : 0)); | ||
75 | pte_t *pte; | ||
76 | int fault; | ||
77 | |||
78 | /* SIM | ||
79 | * Note this is now called with interrupts still disabled | ||
80 | * This is to cope with being called for a missing IO port | ||
81 | * address with interrupts disabled. This should be fixed as | ||
82 | * soon as we have a better 'fast path' miss handler. | ||
83 | * | ||
84 | * Plus take care how you try and debug this stuff. | ||
85 | * For example, writing debug data to a port which you | ||
86 | * have just faulted on is not going to work. | ||
87 | */ | ||
88 | |||
89 | tsk = current; | ||
90 | mm = tsk->mm; | ||
91 | |||
92 | /* Not an IO address, so reenable interrupts */ | ||
93 | local_irq_enable(); | ||
94 | |||
95 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | ||
96 | |||
97 | /* | ||
98 | * If we're in an interrupt or have no user | ||
99 | * context, we must not take the fault.. | ||
100 | */ | ||
101 | if (in_atomic() || !mm) | ||
102 | goto no_context; | ||
103 | |||
104 | retry: | ||
105 | /* TLB misses upon some cache flushes get done under cli() */ | ||
106 | down_read(&mm->mmap_sem); | ||
107 | |||
108 | vma = find_vma(mm, address); | ||
109 | if (!vma) | ||
110 | goto bad_area; | ||
111 | if (vma->vm_start <= address) | ||
112 | goto good_area; | ||
113 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
114 | goto bad_area; | ||
115 | if (expand_stack(vma, address)) | ||
116 | goto bad_area; | ||
117 | |||
118 | /* | ||
119 | * Ok, we have a good vm_area for this memory access, so | ||
120 | * we can handle it.. | ||
121 | */ | ||
122 | good_area: | ||
123 | if (textaccess) { | ||
124 | if (!(vma->vm_flags & VM_EXEC)) | ||
125 | goto bad_area; | ||
126 | } else { | ||
127 | if (write) { | ||
128 | if (!(vma->vm_flags & VM_WRITE)) | ||
129 | goto bad_area; | ||
130 | } else { | ||
131 | if (!(vma->vm_flags & VM_READ)) | ||
132 | goto bad_area; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * If for any reason at all we couldn't handle the fault, | ||
138 | * make sure we exit gracefully rather than endlessly redo | ||
139 | * the fault. | ||
140 | */ | ||
141 | fault = handle_mm_fault(mm, vma, address, flags); | ||
142 | |||
143 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
144 | return; | ||
145 | |||
146 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
147 | if (fault & VM_FAULT_OOM) | ||
148 | goto out_of_memory; | ||
149 | else if (fault & VM_FAULT_SIGBUS) | ||
150 | goto do_sigbus; | ||
151 | BUG(); | ||
152 | } | ||
153 | |||
154 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | ||
155 | if (fault & VM_FAULT_MAJOR) { | ||
156 | tsk->maj_flt++; | ||
157 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | ||
158 | regs, address); | ||
159 | } else { | ||
160 | tsk->min_flt++; | ||
161 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
162 | regs, address); | ||
163 | } | ||
164 | |||
165 | if (fault & VM_FAULT_RETRY) { | ||
166 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
167 | |||
168 | /* | ||
169 | * No need to up_read(&mm->mmap_sem) as we would | ||
170 | * have already released it in __lock_page_or_retry | ||
171 | * in mm/filemap.c. | ||
172 | */ | ||
173 | goto retry; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | /* If we get here, the page fault has been handled. Do the TLB refill | ||
178 | now from the newly-setup PTE, to avoid having to fault again right | ||
179 | away on the same instruction. */ | ||
180 | pte = lookup_pte (mm, address); | ||
181 | if (!pte) { | ||
182 | /* From empirical evidence, we can get here, due to | ||
183 | !pte_present(pte). (e.g. if a swap-in occurs, and the page | ||
184 | is swapped back out again before the process that wanted it | ||
185 | gets rescheduled?) */ | ||
186 | goto no_pte; | ||
187 | } | ||
188 | |||
189 | __do_tlb_refill(address, textaccess, pte); | ||
190 | |||
191 | no_pte: | ||
192 | |||
193 | up_read(&mm->mmap_sem); | ||
194 | return; | ||
195 | |||
196 | /* | ||
197 | * Something tried to access memory that isn't in our memory map.. | ||
198 | * Fix it, but check if it's kernel or user first.. | ||
199 | */ | ||
200 | bad_area: | ||
201 | up_read(&mm->mmap_sem); | ||
202 | |||
203 | if (user_mode(regs)) { | ||
204 | static int count=0; | ||
205 | siginfo_t info; | ||
206 | if (count < 4) { | ||
207 | /* This is really to help debug faults when starting | ||
208 | * usermode, so only need a few */ | ||
209 | count++; | ||
210 | printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n", | ||
211 | address, task_pid_nr(current), current->comm, | ||
212 | (unsigned long) regs->pc); | ||
213 | } | ||
214 | if (is_global_init(tsk)) { | ||
215 | panic("INIT had user mode bad_area\n"); | ||
216 | } | ||
217 | tsk->thread.address = address; | ||
218 | info.si_signo = SIGSEGV; | ||
219 | info.si_errno = 0; | ||
220 | info.si_addr = (void *) address; | ||
221 | force_sig_info(SIGSEGV, &info, tsk); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | no_context: | ||
226 | /* Are we prepared to handle this kernel fault? */ | ||
227 | fixup = search_exception_tables(regs->pc); | ||
228 | if (fixup) { | ||
229 | regs->pc = fixup->fixup; | ||
230 | return; | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Oops. The kernel tried to access some bad page. We'll have to | ||
235 | * terminate things with extreme prejudice. | ||
236 | * | ||
237 | */ | ||
238 | if (address < PAGE_SIZE) | ||
239 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
240 | else | ||
241 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
242 | printk(" at virtual address %08lx\n", address); | ||
243 | printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff); | ||
244 | die("Oops", regs, error_code); | ||
245 | do_exit(SIGKILL); | ||
246 | |||
247 | /* | ||
248 | * We ran out of memory, or some other thing happened to us that made | ||
249 | * us unable to handle the page fault gracefully. | ||
250 | */ | ||
251 | out_of_memory: | ||
252 | up_read(&mm->mmap_sem); | ||
253 | if (!user_mode(regs)) | ||
254 | goto no_context; | ||
255 | pagefault_out_of_memory(); | ||
256 | return; | ||
257 | |||
258 | do_sigbus: | ||
259 | printk("fault:Do sigbus\n"); | ||
260 | up_read(&mm->mmap_sem); | ||
261 | |||
262 | /* | ||
263 | * Send a sigbus, regardless of whether we were in kernel | ||
264 | * or user mode. | ||
265 | */ | ||
266 | tsk->thread.address = address; | ||
267 | force_sig(SIGBUS, tsk); | ||
268 | |||
269 | /* Kernel mode? Handle exceptions or die */ | ||
270 | if (!user_mode(regs)) | ||
271 | goto no_context; | ||
272 | } | ||
273 | |||
274 | void local_flush_tlb_one(unsigned long asid, unsigned long page) | 31 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
275 | { | 32 | { |
276 | unsigned long long match, pteh=0, lpage; | 33 | unsigned long long match, pteh=0, lpage; |