aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-xtensa/mmu_context.h
diff options
context:
space:
mode:
authorChris Zankel <czankel@tensilica.com>2006-12-10 05:18:48 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 12:55:39 -0500
commit173d6681380aa1d60dfc35ed7178bd7811ba2784 (patch)
tree9d6d4d2c6dd791499ebab558647efb67ac88ae3a /include/asm-xtensa/mmu_context.h
parentfd43fe19b830d6cd0eba08a6c6a5f71a6bd9c1b0 (diff)
[PATCH] xtensa: remove extra header files
The Xtensa port contained many header files that were never needed. This rather lengthy patch removes all those files. Unfortunately, there were many dependencies that needed to be updated, so this patch touches quite a few source files. Signed-off-by: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-xtensa/mmu_context.h')
-rw-r--r--include/asm-xtensa/mmu_context.h269
1 files changed, 37 insertions, 232 deletions
diff --git a/include/asm-xtensa/mmu_context.h b/include/asm-xtensa/mmu_context.h
index af683a74a4ec..f14851f086c3 100644
--- a/include/asm-xtensa/mmu_context.h
+++ b/include/asm-xtensa/mmu_context.h
@@ -16,187 +16,32 @@
16#include <linux/stringify.h> 16#include <linux/stringify.h>
17 17
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/mmu_context.h>
20#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
21#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
22 21
23/* 22#define XCHAL_MMU_ASID_BITS 8
24 * Linux was ported to Xtensa assuming all auto-refill ways in set 0
25 * had the same properties (a very likely assumption). Multiple sets
26 * of auto-refill ways will still work properly, but not as optimally
27 * as the Xtensa designer may have assumed.
28 *
29 * We make this case a hard #error, killing the kernel build, to alert
30 * the developer to this condition (which is more likely an error).
31 * You super-duper clever developers can change it to a warning or
32 * remove it altogether if you think you know what you're doing. :)
33 */
34 23
35#if (XCHAL_HAVE_TLBS != 1) 24#if (XCHAL_HAVE_TLBS != 1)
36# error "Linux must have an MMU!" 25# error "Linux must have an MMU!"
37#endif 26#endif
38 27
39#if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0))
40# error "MMU must have auto-refill ways"
41#endif
42
43#if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1))
44# error Linux may not use all auto-refill ways as efficiently as you think
45#endif
46
47#if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE)
48# error Only one page size allowed!
49#endif
50
51extern unsigned long asid_cache; 28extern unsigned long asid_cache;
52extern pgd_t *current_pgd;
53
54/*
55 * Define the number of entries per auto-refill way in set 0 of both I and D
56 * TLBs. We deal only with set 0 here (an assumption further explained in
57 * assertions.h). Also, define the total number of ARF entries in both TLBs.
58 */
59
60#define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES))
61#define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES))
62
63#define ITLB_ENTRIES \
64 (ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS)))
65#define DTLB_ENTRIES \
66 (DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS)))
67
68
69/*
70 * SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES.
71 * In practice, they are probably equal. This macro simplifies function
72 * flush_tlb_range().
73 */
74
75#if (DTLB_ENTRIES < ITLB_ENTRIES)
76# define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES
77#else
78# define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES
79#endif
80
81
82/*
83 * asid_cache tracks only the ASID[USER_RING] field of the RASID special
84 * register, which is the current user-task asid allocation value.
85 * mm->context has the same meaning. When it comes time to write the
86 * asid_cache or mm->context values to the RASID special register, we first
87 * shift the value left by 8, then insert the value.
88 * ASID[0] always contains the kernel's asid value, and we reserve three
89 * other asid values that we never assign to user tasks.
90 */
91
92#define ASID_INC 0x1
93#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
94
95/*
96 * XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant
97 * indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable
98 * Xtensa processor constant indicating the kernel address space. They can
99 * be arbitrary values.
100 *
101 * We identify three more unique, reserved ASID values to use in the unused
102 * ring positions. No other user process will be assigned these reserved
103 * ASID values.
104 *
105 * For example, given that
106 *
107 * XCHAL_MMU_ASID_INVALID == 0
108 * XCHAL_MMU_ASID_KERNEL == 1
109 *
110 * the following maze of #if statements would generate
111 *
112 * ASID_RESERVED_1 == 2
113 * ASID_RESERVED_2 == 3
114 * ASID_RESERVED_3 == 4
115 * ASID_FIRST_NONRESERVED == 5
116 */
117
118#if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1)
119# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK)
120#else
121# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK)
122#endif
123
124#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1)
125# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK)
126#else
127# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK)
128#endif
129
130#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1)
131# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK)
132#else
133# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK)
134#endif
135
136#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1)
137# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK)
138#else
139# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK)
140#endif
141
142#define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \
143 ((ASID_RESERVED_2) << 16) + \
144 ((ASID_RESERVED_3) << 8) + \
145 ((XCHAL_MMU_ASID_KERNEL)) )
146
147 29
148/* 30/*
149 * NO_CONTEXT is the invalid ASID value that we don't ever assign to 31 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
150 * any user or kernel context. NO_CONTEXT is a better mnemonic than 32 * any user or kernel context.
151 * XCHAL_MMU_ASID_INVALID, so we use it in code instead. 33 *
152 */ 34 * 0 invalid
153 35 * 1 kernel
154#define NO_CONTEXT XCHAL_MMU_ASID_INVALID 36 * 2 reserved
155 37 * 3 reserved
156#if (KERNEL_RING != 0) 38 * 4...255 available
157# error The KERNEL_RING really should be zero.
158#endif
159
160#if (USER_RING >= XCHAL_MMU_RINGS)
161# error USER_RING cannot be greater than the highest numbered ring.
162#endif
163
164#if (USER_RING == KERNEL_RING)
165# error The user and kernel rings really should not be equal.
166#endif
167
168#if (USER_RING == 1)
169#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
170 ((ASID_RESERVED_2) << 16) + \
171 (((x) & (ASID_MASK)) << 8) + \
172 ((XCHAL_MMU_ASID_KERNEL)) )
173
174#elif (USER_RING == 2)
175#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
176 (((x) & (ASID_MASK)) << 16) + \
177 ((ASID_RESERVED_2) << 8) + \
178 ((XCHAL_MMU_ASID_KERNEL)) )
179
180#elif (USER_RING == 3)
181#define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \
182 ((ASID_RESERVED_1) << 16) + \
183 ((ASID_RESERVED_2) << 8) + \
184 ((XCHAL_MMU_ASID_KERNEL)) )
185
186#else
187#error Goofy value for USER_RING
188
189#endif /* USER_RING == 1 */
190
191
192/*
193 * All unused by hardware upper bits will be considered
194 * as a software asid extension.
195 */ 39 */
196 40
197#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 41#define NO_CONTEXT 0
198#define ASID_FIRST_VERSION \ 42#define ASID_USER_FIRST 4
199 ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED) 43#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
44#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
200 45
201static inline void set_rasid_register (unsigned long val) 46static inline void set_rasid_register (unsigned long val)
202{ 47{
@@ -207,67 +52,28 @@ static inline void set_rasid_register (unsigned long val)
207static inline unsigned long get_rasid_register (void) 52static inline unsigned long get_rasid_register (void)
208{ 53{
209 unsigned long tmp; 54 unsigned long tmp;
210 __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp)); 55 __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
211 return tmp; 56 return tmp;
212} 57}
213 58
214
215#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
216
217static inline void 59static inline void
218get_new_mmu_context(struct mm_struct *mm, unsigned long asid) 60__get_new_mmu_context(struct mm_struct *mm)
219{ 61{
220 extern void flush_tlb_all(void); 62 extern void flush_tlb_all(void);
221 if (! ((asid += ASID_INC) & ASID_MASK) ) { 63 if (! (++asid_cache & ASID_MASK) ) {
222 flush_tlb_all(); /* start new asid cycle */ 64 flush_tlb_all(); /* start new asid cycle */
223 if (!asid) /* fix version if needed */ 65 asid_cache += ASID_USER_FIRST;
224 asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
225 asid += ASID_FIRST_NONRESERVED;
226 } 66 }
227 mm->context = asid_cache = asid; 67 mm->context = asid_cache;
228}
229
230#else
231#warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation
232
233/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
234 really the best, but if you insist... */
235
236static inline int validate_asid (unsigned long asid)
237{
238 switch (asid) {
239 case XCHAL_MMU_ASID_INVALID:
240 case XCHAL_MMU_ASID_KERNEL:
241 case ASID_RESERVED_1:
242 case ASID_RESERVED_2:
243 case ASID_RESERVED_3:
244 return 0; /* can't use these values as ASIDs */
245 }
246 return 1; /* valid */
247} 68}
248 69
249static inline void 70static inline void
250get_new_mmu_context(struct mm_struct *mm, unsigned long asid) 71__load_mmu_context(struct mm_struct *mm)
251{ 72{
252 extern void flush_tlb_all(void); 73 set_rasid_register(ASID_INSERT(mm->context));
253 while (1) { 74 invalidate_page_directory();
254 asid += ASID_INC;
255 if ( ! (asid & ASID_MASK) ) {
256 flush_tlb_all(); /* start new asid cycle */
257 if (!asid) /* fix version if needed */
258 asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
259 asid += ASID_FIRST_NONRESERVED;
260 break; /* no need to validate here */
261 }
262 if (validate_asid (asid & ASID_MASK))
263 break;
264 }
265 mm->context = asid_cache = asid;
266} 75}
267 76
268#endif
269
270
271/* 77/*
272 * Initialize the context related info for a new mm_struct 78 * Initialize the context related info for a new mm_struct
273 * instance. 79 * instance.
@@ -280,6 +86,20 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
280 return 0; 86 return 0;
281} 87}
282 88
89/*
90 * After we have set current->mm to a new value, this activates
91 * the context for the new mm so we see the new mappings.
92 */
93static inline void
94activate_mm(struct mm_struct *prev, struct mm_struct *next)
95{
96 /* Unconditionally get a new ASID. */
97
98 __get_new_mmu_context(next);
99 __load_mmu_context(next);
100}
101
102
283static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 103static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
284 struct task_struct *tsk) 104 struct task_struct *tsk)
285{ 105{
@@ -287,11 +107,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
287 107
288 /* Check if our ASID is of an older version and thus invalid */ 108 /* Check if our ASID is of an older version and thus invalid */
289 109
290 if ((next->context ^ asid) & ASID_VERSION_MASK) 110 if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
291 get_new_mmu_context(next, asid); 111 __get_new_mmu_context(next);
292 112
293 set_rasid_register (ASID_INSERT(next->context)); 113 __load_mmu_context(next);
294 invalidate_page_directory();
295} 114}
296 115
297#define deactivate_mm(tsk, mm) do { } while(0) 116#define deactivate_mm(tsk, mm) do { } while(0)
@@ -302,20 +121,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
302 */ 121 */
303static inline void destroy_context(struct mm_struct *mm) 122static inline void destroy_context(struct mm_struct *mm)
304{ 123{
305 /* Nothing to do. */
306}
307
308/*
309 * After we have set current->mm to a new value, this activates
310 * the context for the new mm so we see the new mappings.
311 */
312static inline void
313activate_mm(struct mm_struct *prev, struct mm_struct *next)
314{
315 /* Unconditionally get a new ASID. */
316
317 get_new_mmu_context(next, asid_cache);
318 set_rasid_register (ASID_INSERT(next->context));
319 invalidate_page_directory(); 124 invalidate_page_directory();
320} 125}
321 126