diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 04:24:21 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 04:24:21 -0400 |
commit | 392c3822a6fc247c0708c9e52c0818d1fbc9d7d7 (patch) | |
tree | 0a2103e365edce78be338b90caebf4526107d5e6 /arch/sh/mm/tlbex_64.c | |
parent | 2ec08e141f88328e8a4d24590e9a2406633a0898 (diff) |
sh64: Tidy up and consolidate the TLB miss fast path.
This unifies the fast-path TLB miss handler, allowing for further cleanup
and eventual utilization of a shared _32/_64 handler.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/tlbex_64.c')
-rw-r--r-- | arch/sh/mm/tlbex_64.c | 107 |
1 files changed, 15 insertions, 92 deletions
diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c index 59cb058217a0..24dd4ab33dec 100644 --- a/arch/sh/mm/tlbex_64.c +++ b/arch/sh/mm/tlbex_64.c | |||
@@ -33,76 +33,32 @@ | |||
33 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
34 | #include <linux/smp.h> | 34 | #include <linux/smp.h> |
35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
36 | #include <linux/kprobes.h> | ||
36 | #include <asm/tlb.h> | 37 | #include <asm/tlb.h> |
37 | #include <asm/io.h> | 38 | #include <asm/io.h> |
38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
39 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
40 | #include <asm/mmu_context.h> | 41 | #include <asm/mmu_context.h> |
41 | 42 | ||
42 | static int handle_vmalloc_fault(struct mm_struct *mm, | 43 | static int handle_tlbmiss(unsigned long long protection_flags, |
43 | unsigned long protection_flags, | ||
44 | unsigned long address) | ||
45 | { | ||
46 | pgd_t *dir; | ||
47 | pud_t *pud; | ||
48 | pmd_t *pmd; | ||
49 | pte_t *pte; | ||
50 | pte_t entry; | ||
51 | |||
52 | dir = pgd_offset_k(address); | ||
53 | |||
54 | pud = pud_offset(dir, address); | ||
55 | if (pud_none_or_clear_bad(pud)) | ||
56 | return 1; | ||
57 | |||
58 | pmd = pmd_offset(pud, address); | ||
59 | if (pmd_none_or_clear_bad(pmd)) | ||
60 | return 1; | ||
61 | |||
62 | pte = pte_offset_kernel(pmd, address); | ||
63 | entry = *pte; | ||
64 | |||
65 | if (pte_none(entry) || !pte_present(entry)) | ||
66 | return 1; | ||
67 | if ((pte_val(entry) & protection_flags) != protection_flags) | ||
68 | return 1; | ||
69 | |||
70 | update_mmu_cache(NULL, address, pte); | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static int handle_tlbmiss(struct mm_struct *mm, | ||
76 | unsigned long long protection_flags, | ||
77 | unsigned long address) | 44 | unsigned long address) |
78 | { | 45 | { |
79 | pgd_t *dir; | 46 | pgd_t *pgd; |
80 | pud_t *pud; | 47 | pud_t *pud; |
81 | pmd_t *pmd; | 48 | pmd_t *pmd; |
82 | pte_t *pte; | 49 | pte_t *pte; |
83 | pte_t entry; | 50 | pte_t entry; |
84 | 51 | ||
85 | /* NB. The PGD currently only contains a single entry - there is no | 52 | if (is_vmalloc_addr((void *)address)) { |
86 | page table tree stored for the top half of the address space since | 53 | pgd = pgd_offset_k(address); |
87 | virtual pages in that region should never be mapped in user mode. | 54 | } else { |
88 | (In kernel mode, the only things in that region are the 512Mb super | 55 | if (unlikely(address >= TASK_SIZE || !current->mm)) |
89 | page (locked in), and vmalloc (modules) + I/O device pages (handled | 56 | return 1; |
90 | by handle_vmalloc_fault), so no PGD for the upper half is required | ||
91 | by kernel mode either). | ||
92 | |||
93 | See how mm->pgd is allocated and initialised in pgd_alloc to see why | ||
94 | the next test is necessary. - RPC */ | ||
95 | if (address >= (unsigned long) TASK_SIZE) | ||
96 | /* upper half - never has page table entries. */ | ||
97 | return 1; | ||
98 | 57 | ||
99 | dir = pgd_offset(mm, address); | 58 | pgd = pgd_offset(current->mm, address); |
100 | if (pgd_none(*dir) || !pgd_present(*dir)) | 59 | } |
101 | return 1; | ||
102 | if (!pgd_present(*dir)) | ||
103 | return 1; | ||
104 | 60 | ||
105 | pud = pud_offset(dir, address); | 61 | pud = pud_offset(pgd, address); |
106 | if (pud_none(*pud) || !pud_present(*pud)) | 62 | if (pud_none(*pud) || !pud_present(*pud)) |
107 | return 1; | 63 | return 1; |
108 | 64 | ||
@@ -112,7 +68,6 @@ static int handle_tlbmiss(struct mm_struct *mm, | |||
112 | 68 | ||
113 | pte = pte_offset_kernel(pmd, address); | 69 | pte = pte_offset_kernel(pmd, address); |
114 | entry = *pte; | 70 | entry = *pte; |
115 | |||
116 | if (pte_none(entry) || !pte_present(entry)) | 71 | if (pte_none(entry) || !pte_present(entry)) |
117 | return 1; | 72 | return 1; |
118 | 73 | ||
@@ -146,9 +101,6 @@ struct expevt_lookup { | |||
146 | #define PRX (1<<7) | 101 | #define PRX (1<<7) |
147 | #define PRR (1<<6) | 102 | #define PRR (1<<6) |
148 | 103 | ||
149 | #define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED) | ||
150 | #define YOUNG (_PAGE_ACCESSED) | ||
151 | |||
152 | /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether | 104 | /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether |
153 | the fault happened in user mode or privileged mode. */ | 105 | the fault happened in user mode or privileged mode. */ |
154 | static struct expevt_lookup expevt_lookup_table = { | 106 | static struct expevt_lookup expevt_lookup_table = { |
@@ -164,12 +116,10 @@ static struct expevt_lookup expevt_lookup_table = { | |||
164 | general fault handling in fault.c which deals with mapping file-backed | 116 | general fault handling in fault.c which deals with mapping file-backed |
165 | pages, stack growth, segmentation faults, swapping etc etc) | 117 | pages, stack growth, segmentation faults, swapping etc etc) |
166 | */ | 118 | */ |
167 | asmlinkage int do_fast_page_fault(unsigned long long ssr_md, | 119 | asmlinkage int __kprobes |
168 | unsigned long long expevt, | 120 | do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt, |
169 | unsigned long address) | 121 | unsigned long address) |
170 | { | 122 | { |
171 | struct task_struct *tsk; | ||
172 | struct mm_struct *mm; | ||
173 | unsigned long long protection_flags; | 123 | unsigned long long protection_flags; |
174 | unsigned long long index; | 124 | unsigned long long index; |
175 | unsigned long long expevt4; | 125 | unsigned long long expevt4; |
@@ -194,32 +144,5 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, | |||
194 | if (expevt_lookup_table.is_text_access[index]) | 144 | if (expevt_lookup_table.is_text_access[index]) |
195 | set_thread_fault_code(FAULT_CODE_ITLB); | 145 | set_thread_fault_code(FAULT_CODE_ITLB); |
196 | 146 | ||
197 | /* SIM | 147 | return handle_tlbmiss(protection_flags, address); |
198 | * Note this is now called with interrupts still disabled | ||
199 | * This is to cope with being called for a missing IO port | ||
200 | * address with interrupts disabled. This should be fixed as | ||
201 | * soon as we have a better 'fast path' miss handler. | ||
202 | * | ||
203 | * Plus take care how you try and debug this stuff. | ||
204 | * For example, writing debug data to a port which you | ||
205 | * have just faulted on is not going to work. | ||
206 | */ | ||
207 | |||
208 | tsk = current; | ||
209 | mm = tsk->mm; | ||
210 | |||
211 | if (is_vmalloc_addr((void *)address)) { | ||
212 | if (ssr_md) | ||
213 | /* | ||
214 | * Process-contexts can never have this address | ||
215 | * range mapped | ||
216 | */ | ||
217 | if (handle_vmalloc_fault(mm, protection_flags, address) == 0) | ||
218 | return 0; | ||
219 | } else if (!in_interrupt() && mm) { | ||
220 | if (handle_tlbmiss(mm, protection_flags, address) == 0) | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | return 1; | ||
225 | } | 148 | } |