aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/mmu_context.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-24 19:51:47 -0500
committerPaul Mundt <lethal@linux-sh.org>2007-02-12 20:54:45 -0500
commitaec5e0e1c179fac4bbca4007a3f0d3107275a73c (patch)
tree3b251e52a89445a5546f398fb16a002435b6c2b6 /include/asm-sh/mmu_context.h
parent506b85f4114b912d2e91fab8da9849289e43857f (diff)
sh: Use a per-cpu ASID cache.
Previously this was implemented using a global cache, cache this per-CPU instead and bump up the number of context IDs to match NR_CPUS. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/mmu_context.h')
-rw-r--r--include/asm-sh/mmu_context.h61
1 files changed, 36 insertions, 25 deletions
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 46f04e23bd45..342024425b7d 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1999 Niibe Yutaka 2 * Copyright (C) 1999 Niibe Yutaka
3 * Copyright (C) 2003 Paul Mundt 3 * Copyright (C) 2003 - 2006 Paul Mundt
4 * 4 *
5 * ASID handling idea taken from MIPS implementation. 5 * ASID handling idea taken from MIPS implementation.
6 */ 6 */
@@ -19,11 +19,6 @@
19 * (b) ASID (Address Space IDentifier) 19 * (b) ASID (Address Space IDentifier)
20 */ 20 */
21 21
22/*
23 * Cache of MMU context last used.
24 */
25extern unsigned long mmu_context_cache;
26
27#define MMU_CONTEXT_ASID_MASK 0x000000ff 22#define MMU_CONTEXT_ASID_MASK 0x000000ff
28#define MMU_CONTEXT_VERSION_MASK 0xffffff00 23#define MMU_CONTEXT_VERSION_MASK 0xffffff00
29#define MMU_CONTEXT_FIRST_VERSION 0x00000100 24#define MMU_CONTEXT_FIRST_VERSION 0x00000100
@@ -32,6 +27,11 @@ extern unsigned long mmu_context_cache;
32/* ASID is 8-bit value, so it can't be 0x100 */ 27/* ASID is 8-bit value, so it can't be 0x100 */
33#define MMU_NO_ASID 0x100 28#define MMU_NO_ASID 0x100
34 29
30#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
31#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \
32 MMU_CONTEXT_ASID_MASK)
33#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
34
35/* 35/*
36 * Virtual Page Number mask 36 * Virtual Page Number mask
37 */ 37 */
@@ -41,18 +41,17 @@ extern unsigned long mmu_context_cache;
41/* 41/*
42 * Get MMU context if needed. 42 * Get MMU context if needed.
43 */ 43 */
44static inline void get_mmu_context(struct mm_struct *mm) 44static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
45{ 45{
46 unsigned long mc = mmu_context_cache; 46 unsigned long asid = asid_cache(cpu);
47 47
48 /* Check if we have old version of context. */ 48 /* Check if we have old version of context. */
49 if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) 49 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
50 /* It's up to date, do nothing */ 50 /* It's up to date, do nothing */
51 return; 51 return;
52 52
53 /* It's old, we need to get new context with new version. */ 53 /* It's old, we need to get new context with new version. */
54 mc = ++mmu_context_cache; 54 if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
55 if (!(mc & MMU_CONTEXT_ASID_MASK)) {
56 /* 55 /*
57 * We exhaust ASID of this version. 56 * We exhaust ASID of this version.
58 * Flush all TLB and start new cycle. 57 * Flush all TLB and start new cycle.
@@ -63,10 +62,11 @@ static inline void get_mmu_context(struct mm_struct *mm)
63 * Fix version; Note that we avoid version #0 62 * Fix version; Note that we avoid version #0
64 * to distingush NO_CONTEXT. 63 * to distingush NO_CONTEXT.
65 */ 64 */
66 if (!mc) 65 if (!asid)
67 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; 66 asid = MMU_CONTEXT_FIRST_VERSION;
68 } 67 }
69 mm->context.id = mc; 68
69 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
70} 70}
71 71
72/* 72/*
@@ -74,9 +74,13 @@ static inline void get_mmu_context(struct mm_struct *mm)
74 * instance. 74 * instance.
75 */ 75 */
76static inline int init_new_context(struct task_struct *tsk, 76static inline int init_new_context(struct task_struct *tsk,
77 struct mm_struct *mm) 77 struct mm_struct *mm)
78{ 78{
79 mm->context.id = NO_CONTEXT; 79 int i;
80
81 for (i = 0; i < num_online_cpus(); i++)
82 cpu_context(i, mm) = NO_CONTEXT;
83
80 return 0; 84 return 0;
81} 85}
82 86
@@ -117,10 +121,10 @@ static inline unsigned long get_asid(void)
117 * After we have set current->mm to a new value, this activates 121 * After we have set current->mm to a new value, this activates
118 * the context for the new mm so we see the new mappings. 122 * the context for the new mm so we see the new mappings.
119 */ 123 */
120static inline void activate_context(struct mm_struct *mm) 124static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
121{ 125{
122 get_mmu_context(mm); 126 get_mmu_context(mm, cpu);
123 set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); 127 set_asid(cpu_asid(cpu, mm));
124} 128}
125 129
126/* MMU_TTB is used for optimizing the fault handling. */ 130/* MMU_TTB is used for optimizing the fault handling. */
@@ -138,10 +142,15 @@ static inline void switch_mm(struct mm_struct *prev,
138 struct mm_struct *next, 142 struct mm_struct *next,
139 struct task_struct *tsk) 143 struct task_struct *tsk)
140{ 144{
145 unsigned int cpu = smp_processor_id();
146
141 if (likely(prev != next)) { 147 if (likely(prev != next)) {
148 cpu_set(cpu, next->cpu_vm_mask);
142 set_TTB(next->pgd); 149 set_TTB(next->pgd);
143 activate_context(next); 150 activate_context(next, cpu);
144 } 151 } else
152 if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
153 activate_context(next, cpu);
145} 154}
146 155
147#define deactivate_mm(tsk,mm) do { } while (0) 156#define deactivate_mm(tsk,mm) do { } while (0)
@@ -159,7 +168,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
159#define destroy_context(mm) do { } while (0) 168#define destroy_context(mm) do { } while (0)
160#define set_asid(asid) do { } while (0) 169#define set_asid(asid) do { } while (0)
161#define get_asid() (0) 170#define get_asid() (0)
162#define activate_context(mm) do { } while (0) 171#define activate_context(mm,cpu) do { } while (0)
163#define switch_mm(prev,next,tsk) do { } while (0) 172#define switch_mm(prev,next,tsk) do { } while (0)
164#define deactivate_mm(tsk,mm) do { } while (0) 173#define deactivate_mm(tsk,mm) do { } while (0)
165#define activate_mm(prev,next) do { } while (0) 174#define activate_mm(prev,next) do { } while (0)
@@ -174,14 +183,16 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
174 */ 183 */
175static inline void enable_mmu(void) 184static inline void enable_mmu(void)
176{ 185{
186 unsigned int cpu = smp_processor_id();
187
177 /* Enable MMU */ 188 /* Enable MMU */
178 ctrl_outl(MMU_CONTROL_INIT, MMUCR); 189 ctrl_outl(MMU_CONTROL_INIT, MMUCR);
179 ctrl_barrier(); 190 ctrl_barrier();
180 191
181 if (mmu_context_cache == NO_CONTEXT) 192 if (asid_cache(cpu) == NO_CONTEXT)
182 mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; 193 asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
183 194
184 set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); 195 set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
185} 196}
186 197
187static inline void disable_mmu(void) 198static inline void disable_mmu(void)