aboutsummaryrefslogtreecommitdiffstats
path: root/arch/cris/arch-v32/mm/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris/arch-v32/mm/tlb.c')
-rw-r--r--arch/cris/arch-v32/mm/tlb.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
index a076ef6e9389..eda5ebcaea54 100644
--- a/arch/cris/arch-v32/mm/tlb.c
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -13,8 +13,8 @@
13#include <asm/arch/hwregs/supp_reg.h> 13#include <asm/arch/hwregs/supp_reg.h>
14 14
15#define UPDATE_TLB_SEL_IDX(val) \ 15#define UPDATE_TLB_SEL_IDX(val) \
16do { \ 16do { \
17 unsigned long tlb_sel; \ 17 unsigned long tlb_sel; \
18 \ 18 \
19 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \ 19 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
20 SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \ 20 SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \
@@ -30,8 +30,8 @@ do { \
30 * The TLB can host up to 256 different mm contexts at the same time. The running 30 * The TLB can host up to 256 different mm contexts at the same time. The running
31 * context is found in the PID register. Each TLB entry contains a page_id that 31 * context is found in the PID register. Each TLB entry contains a page_id that
32 * has to match the PID register to give a hit. page_id_map keeps track of which 32 * has to match the PID register to give a hit. page_id_map keeps track of which
33 * mm is assigned to which page_id, making sure it's known when to invalidate TLB 33 * mm's is assigned to which page_id's, making sure it's known when to
34 * entries. 34 * invalidate TLB entries.
35 * 35 *
36 * The last page_id is never running, it is used as an invalid page_id so that 36 * The last page_id is never running, it is used as an invalid page_id so that
37 * it's possible to make TLB entries that will nerver match. 37 * it's possible to make TLB entries that will nerver match.
@@ -179,29 +179,29 @@ void
179switch_mm(struct mm_struct *prev, struct mm_struct *next, 179switch_mm(struct mm_struct *prev, struct mm_struct *next,
180 struct task_struct *tsk) 180 struct task_struct *tsk)
181{ 181{
182 int cpu = smp_processor_id(); 182 if (prev != next) {
183 183 int cpu = smp_processor_id();
184 /* Make sure there is a MMU context. */ 184
185 spin_lock(&mmu_context_lock); 185 /* Make sure there is a MMU context. */
186 get_mmu_context(next); 186 spin_lock(&mmu_context_lock);
187 cpu_set(cpu, next->cpu_vm_mask); 187 get_mmu_context(next);
188 spin_unlock(&mmu_context_lock); 188 cpu_set(cpu, next->cpu_vm_mask);
189 189 spin_unlock(&mmu_context_lock);
190 /* 190
191 * Remember the pgd for the fault handlers. Keep a separate copy of it 191 /*
192 * because current and active_mm might be invalid at points where 192 * Remember the pgd for the fault handlers. Keep a seperate
193 * there's still a need to derefer the pgd. 193 * copy of it because current and active_mm might be invalid
194 */ 194 * at points where * there's still a need to derefer the pgd.
195 per_cpu(current_pgd, cpu) = next->pgd; 195 */
196 196 per_cpu(current_pgd, cpu) = next->pgd;
197 /* Switch context in the MMU. */ 197
198 if (tsk && task_thread_info(tsk)) 198 /* Switch context in the MMU. */
199 { 199 if (tsk && task_thread_info(tsk)) {
200 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | task_thread_info(tsk)->tls); 200 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id |
201 } 201 task_thread_info(tsk)->tls);
202 else 202 } else {
203 { 203 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);
204 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id); 204 }
205 } 205 }
206} 206}
207 207