aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2012-05-14 02:52:28 -0400
committerPaul Mundt <lethal@linux-sh.org>2012-05-14 02:52:28 -0400
commitc06fd28387a3da2cc4763f7f471f735ccdd61b88 (patch)
treee2d95ffa04f7e5b17958831e29935a231e094d09 /arch
parent28080329ede3e4110bb14306b4529a5b9a2ce163 (diff)
sh64: Migrate to __update_tlb() API.
Now that we have a method for finding out if we're handling an ITLB fault or not without passing it all the way down the chain, it's possible to use the __update_tlb() interface in place of a special __do_tlb_refill(). Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/mm/tlb-sh5.c40
-rw-r--r--arch/sh/mm/tlbex_64.c57
-rw-r--r--arch/sh/mm/tlbflush_64.c4
3 files changed, 49 insertions, 52 deletions
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index f27dbe1c1599..3aea25dc431a 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -182,3 +182,43 @@ void tlb_unwire_entry(void)
182 182
183 local_irq_restore(flags); 183 local_irq_restore(flags);
184} 184}
185
186void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
187{
188 unsigned long long ptel;
189 unsigned long long pteh=0;
190 struct tlb_info *tlbp;
191 unsigned long long next;
192 unsigned int fault_code = get_thread_fault_code();
193
194 /* Get PTEL first */
195 ptel = pte.pte_low;
196
197 /*
198 * Set PTEH register
199 */
200 pteh = neff_sign_extend(address & MMU_VPN_MASK);
201
202 /* Set the ASID. */
203 pteh |= get_asid() << PTEH_ASID_SHIFT;
204 pteh |= PTEH_VALID;
205
206 /* Set PTEL register, set_pte has performed the sign extension */
207 ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
208
209 if (fault_code & FAULT_CODE_ITLB)
210 tlbp = &cpu_data->itlb;
211 else
212 tlbp = &cpu_data->dtlb;
213
214 next = tlbp->next;
215 __flush_tlb_slot(next);
216 asm volatile ("putcfg %0,1,%2\n\n\t"
217 "putcfg %0,0,%1\n"
218 : : "r" (next), "r" (pteh), "r" (ptel) );
219
220 next += TLB_STEP;
221 if (next > tlbp->last)
222 next = tlbp->first;
223 tlbp->next = next;
224}
diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c
index d15b99466508..98b64278f8c7 100644
--- a/arch/sh/mm/tlbex_64.c
+++ b/arch/sh/mm/tlbex_64.c
@@ -38,54 +38,15 @@
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/pgalloc.h> 39#include <asm/pgalloc.h>
40#include <asm/mmu_context.h> 40#include <asm/mmu_context.h>
41#include <cpu/registers.h>
42
43/* Callable from fault.c, so not static */
44inline void __do_tlb_refill(unsigned long address,
45 unsigned long long is_text_not_data, pte_t *pte)
46{
47 unsigned long long ptel;
48 unsigned long long pteh=0;
49 struct tlb_info *tlbp;
50 unsigned long long next;
51
52 /* Get PTEL first */
53 ptel = pte_val(*pte);
54
55 /*
56 * Set PTEH register
57 */
58 pteh = neff_sign_extend(address & MMU_VPN_MASK);
59
60 /* Set the ASID. */
61 pteh |= get_asid() << PTEH_ASID_SHIFT;
62 pteh |= PTEH_VALID;
63
64 /* Set PTEL register, set_pte has performed the sign extension */
65 ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
66
67 tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
68 next = tlbp->next;
69 __flush_tlb_slot(next);
70 asm volatile ("putcfg %0,1,%2\n\n\t"
71 "putcfg %0,0,%1\n"
72 : : "r" (next), "r" (pteh), "r" (ptel) );
73
74 next += TLB_STEP;
75 if (next > tlbp->last) next = tlbp->first;
76 tlbp->next = next;
77
78}
79 41
80static int handle_vmalloc_fault(struct mm_struct *mm, 42static int handle_vmalloc_fault(struct mm_struct *mm,
81 unsigned long protection_flags, 43 unsigned long protection_flags,
82 unsigned long long textaccess,
83 unsigned long address) 44 unsigned long address)
84{ 45{
85 pgd_t *dir; 46 pgd_t *dir;
86 pud_t *pud; 47 pud_t *pud;
87 pmd_t *pmd; 48 pmd_t *pmd;
88 static pte_t *pte; 49 pte_t *pte;
89 pte_t entry; 50 pte_t entry;
90 51
91 dir = pgd_offset_k(address); 52 dir = pgd_offset_k(address);
@@ -106,14 +67,13 @@ static int handle_vmalloc_fault(struct mm_struct *mm,
106 if ((pte_val(entry) & protection_flags) != protection_flags) 67 if ((pte_val(entry) & protection_flags) != protection_flags)
107 return 0; 68 return 0;
108 69
109 __do_tlb_refill(address, textaccess, pte); 70 update_mmu_cache(NULL, address, pte);
110 71
111 return 1; 72 return 1;
112} 73}
113 74
114static int handle_tlbmiss(struct mm_struct *mm, 75static int handle_tlbmiss(struct mm_struct *mm,
115 unsigned long long protection_flags, 76 unsigned long long protection_flags,
116 unsigned long long textaccess,
117 unsigned long address) 77 unsigned long address)
118{ 78{
119 pgd_t *dir; 79 pgd_t *dir;
@@ -165,7 +125,7 @@ static int handle_tlbmiss(struct mm_struct *mm,
165 if ((pte_val(entry) & protection_flags) != protection_flags) 125 if ((pte_val(entry) & protection_flags) != protection_flags)
166 return 0; 126 return 0;
167 127
168 __do_tlb_refill(address, textaccess, pte); 128 update_mmu_cache(NULL, address, pte);
169 129
170 return 1; 130 return 1;
171} 131}
@@ -210,7 +170,6 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
210{ 170{
211 struct task_struct *tsk; 171 struct task_struct *tsk;
212 struct mm_struct *mm; 172 struct mm_struct *mm;
213 unsigned long long textaccess;
214 unsigned long long protection_flags; 173 unsigned long long protection_flags;
215 unsigned long long index; 174 unsigned long long index;
216 unsigned long long expevt4; 175 unsigned long long expevt4;
@@ -229,8 +188,11 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
229 * that PRU is set when it needs to be. */ 188 * that PRU is set when it needs to be. */
230 index = expevt4 ^ (expevt4 >> 5); 189 index = expevt4 ^ (expevt4 >> 5);
231 index &= 7; 190 index &= 7;
191
232 protection_flags = expevt_lookup_table.protection_flags[index]; 192 protection_flags = expevt_lookup_table.protection_flags[index];
233 textaccess = expevt_lookup_table.is_text_access[index]; 193
194 if (expevt_lookup_table.is_text_access[index])
195 set_thread_fault_code(FAULT_CODE_ITLB);
234 196
235 /* SIM 197 /* SIM
236 * Note this is now called with interrupts still disabled 198 * Note this is now called with interrupts still disabled
@@ -252,11 +214,10 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
252 * Process-contexts can never have this address 214 * Process-contexts can never have this address
253 * range mapped 215 * range mapped
254 */ 216 */
255 if (handle_vmalloc_fault(mm, protection_flags, 217 if (handle_vmalloc_fault(mm, protection_flags, address))
256 textaccess, address))
257 return 1; 218 return 1;
258 } else if (!in_interrupt() && mm) { 219 } else if (!in_interrupt() && mm) {
259 if (handle_tlbmiss(mm, protection_flags, textaccess, address)) 220 if (handle_tlbmiss(mm, protection_flags, address))
260 return 1; 221 return 1;
261 } 222 }
262 223
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 908167bdfc04..f33fdd2558e8 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -170,7 +170,3 @@ void __flush_tlb_global(void)
170{ 170{
171 flush_tlb_all(); 171 flush_tlb_all();
172} 172}
173
174void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
175{
176}