diff options
Diffstat (limited to 'arch/arc/mm/tlb.c')
-rw-r--r-- | arch/arc/mm/tlb.c | 94 |
1 files changed, 54 insertions, 40 deletions
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 85a8716e6028..f58d5f62bccc 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -52,6 +52,7 @@ | |||
52 | */ | 52 | */ |
53 | 53 | ||
54 | #include <linux/module.h> | 54 | #include <linux/module.h> |
55 | #include <linux/bug.h> | ||
55 | #include <asm/arcregs.h> | 56 | #include <asm/arcregs.h> |
56 | #include <asm/setup.h> | 57 | #include <asm/setup.h> |
57 | #include <asm/mmu_context.h> | 58 | #include <asm/mmu_context.h> |
@@ -109,38 +110,41 @@ struct mm_struct *asid_mm_map[NUM_ASID + 1]; | |||
109 | 110 | ||
110 | /* | 111 | /* |
111 | * Utility Routine to erase a J-TLB entry | 112 | * Utility Routine to erase a J-TLB entry |
112 | * The procedure is to look it up in the MMU. If found, ERASE it by | 113 | * Caller needs to setup Index Reg (manually or via getIndex) |
113 | * issuing a TlbWrite CMD with PD0 = PD1 = 0 | ||
114 | */ | 114 | */ |
115 | 115 | static inline void __tlb_entry_erase(void) | |
116 | static void __tlb_entry_erase(void) | ||
117 | { | 116 | { |
118 | write_aux_reg(ARC_REG_TLBPD1, 0); | 117 | write_aux_reg(ARC_REG_TLBPD1, 0); |
119 | write_aux_reg(ARC_REG_TLBPD0, 0); | 118 | write_aux_reg(ARC_REG_TLBPD0, 0); |
120 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | 119 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
121 | } | 120 | } |
122 | 121 | ||
123 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | 122 | static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) |
124 | { | 123 | { |
125 | unsigned int idx; | 124 | unsigned int idx; |
126 | 125 | ||
127 | /* Locate the TLB entry for this vaddr + ASID */ | ||
128 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); | 126 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); |
127 | |||
129 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | 128 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); |
130 | idx = read_aux_reg(ARC_REG_TLBINDEX); | 129 | idx = read_aux_reg(ARC_REG_TLBINDEX); |
131 | 130 | ||
131 | return idx; | ||
132 | } | ||
133 | |||
134 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | ||
135 | { | ||
136 | unsigned int idx; | ||
137 | |||
138 | /* Locate the TLB entry for this vaddr + ASID */ | ||
139 | idx = tlb_entry_lkup(vaddr_n_asid); | ||
140 | |||
132 | /* No error means entry found, zero it out */ | 141 | /* No error means entry found, zero it out */ |
133 | if (likely(!(idx & TLB_LKUP_ERR))) { | 142 | if (likely(!(idx & TLB_LKUP_ERR))) { |
134 | __tlb_entry_erase(); | 143 | __tlb_entry_erase(); |
135 | } else { /* Some sort of Error */ | 144 | } else { |
136 | |||
137 | /* Duplicate entry error */ | 145 | /* Duplicate entry error */ |
138 | if (idx & 0x1) { | 146 | WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", |
139 | /* TODO we need to handle this case too */ | 147 | vaddr_n_asid); |
140 | pr_emerg("unhandled Duplicate flush for %x\n", | ||
141 | vaddr_n_asid); | ||
142 | } | ||
143 | /* else entry not found so nothing to do */ | ||
144 | } | 148 | } |
145 | } | 149 | } |
146 | 150 | ||
@@ -159,7 +163,7 @@ static void utlb_invalidate(void) | |||
159 | { | 163 | { |
160 | #if (CONFIG_ARC_MMU_VER >= 2) | 164 | #if (CONFIG_ARC_MMU_VER >= 2) |
161 | 165 | ||
162 | #if (CONFIG_ARC_MMU_VER < 3) | 166 | #if (CONFIG_ARC_MMU_VER == 2) |
163 | /* MMU v2 introduced the uTLB Flush command. | 167 | /* MMU v2 introduced the uTLB Flush command. |
164 | * There was however an obscure hardware bug, where uTLB flush would | 168 | * There was however an obscure hardware bug, where uTLB flush would |
165 | * fail when a prior probe for J-TLB (both totally unrelated) would | 169 | * fail when a prior probe for J-TLB (both totally unrelated) would |
@@ -182,6 +186,36 @@ static void utlb_invalidate(void) | |||
182 | 186 | ||
183 | } | 187 | } |
184 | 188 | ||
189 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) | ||
190 | { | ||
191 | unsigned int idx; | ||
192 | |||
193 | /* | ||
194 | * First verify if entry for this vaddr+ASID already exists | ||
195 | * This also sets up PD0 (vaddr, ASID..) for final commit | ||
196 | */ | ||
197 | idx = tlb_entry_lkup(pd0); | ||
198 | |||
199 | /* | ||
200 | * If Not already present get a free slot from MMU. | ||
201 | * Otherwise, Probe would have located the entry and set INDEX Reg | ||
202 | * with existing location. This will cause Write CMD to over-write | ||
203 | * existing entry with new PD0 and PD1 | ||
204 | */ | ||
205 | if (likely(idx & TLB_LKUP_ERR)) | ||
206 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | ||
207 | |||
208 | /* setup the other half of TLB entry (pfn, rwx..) */ | ||
209 | write_aux_reg(ARC_REG_TLBPD1, pd1); | ||
210 | |||
211 | /* | ||
212 | * Commit the Entry to MMU | ||
213 | * It doesnt sound safe to use the TLBWriteNI cmd here | ||
214 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | ||
215 | */ | ||
216 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | ||
217 | } | ||
218 | |||
185 | /* | 219 | /* |
186 | * Un-conditionally (without lookup) erase the entire MMU contents | 220 | * Un-conditionally (without lookup) erase the entire MMU contents |
187 | */ | 221 | */ |
@@ -341,7 +375,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
341 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | 375 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
342 | { | 376 | { |
343 | unsigned long flags; | 377 | unsigned long flags; |
344 | unsigned int idx, asid_or_sasid, rwx; | 378 | unsigned int asid_or_sasid, rwx; |
379 | unsigned long pd0, pd1; | ||
345 | 380 | ||
346 | /* | 381 | /* |
347 | * create_tlb() assumes that current->mm == vma->mm, since | 382 | * create_tlb() assumes that current->mm == vma->mm, since |
@@ -385,8 +420,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
385 | /* ASID for this task */ | 420 | /* ASID for this task */ |
386 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | 421 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; |
387 | 422 | ||
388 | write_aux_reg(ARC_REG_TLBPD0, address | asid_or_sasid | | 423 | pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); |
389 | (pte_val(*ptep) & PTE_BITS_IN_PD0)); | ||
390 | 424 | ||
391 | /* | 425 | /* |
392 | * ARC MMU provides fully orthogonal access bits for K/U mode, | 426 | * ARC MMU provides fully orthogonal access bits for K/U mode, |
@@ -402,29 +436,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
402 | else | 436 | else |
403 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ | 437 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ |
404 | 438 | ||
405 | /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ | 439 | pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); |
406 | write_aux_reg(ARC_REG_TLBPD1, | ||
407 | rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1)); | ||
408 | |||
409 | /* First verify if entry for this vaddr+ASID already exists */ | ||
410 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | ||
411 | idx = read_aux_reg(ARC_REG_TLBINDEX); | ||
412 | |||
413 | /* | ||
414 | * If Not already present get a free slot from MMU. | ||
415 | * Otherwise, Probe would have located the entry and set INDEX Reg | ||
416 | * with existing location. This will cause Write CMD to over-write | ||
417 | * existing entry with new PD0 and PD1 | ||
418 | */ | ||
419 | if (likely(idx & TLB_LKUP_ERR)) | ||
420 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | ||
421 | 440 | ||
422 | /* | 441 | tlb_entry_insert(pd0, pd1); |
423 | * Commit the Entry to MMU | ||
424 | * It doesnt sound safe to use the TLBWriteNI cmd here | ||
425 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | ||
426 | */ | ||
427 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | ||
428 | 442 | ||
429 | local_irq_restore(flags); | 443 | local_irq_restore(flags); |
430 | } | 444 | } |