diff options
Diffstat (limited to 'arch/tile/include/asm')
117 files changed, 12062 insertions, 0 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild new file mode 100644 index 00000000000..3b8f55b82de --- /dev/null +++ b/arch/tile/include/asm/Kbuild | |||
@@ -0,0 +1,3 @@ | |||
1 | include include/asm-generic/Kbuild.asm | ||
2 | |||
3 | header-y += ucontext.h | ||
diff --git a/arch/tile/include/asm/asm-offsets.h b/arch/tile/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/tile/include/asm/asm-offsets.h | |||
@@ -0,0 +1 @@ | |||
#include <generated/asm-offsets.h> | |||
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h new file mode 100644 index 00000000000..b8c49f98a44 --- /dev/null +++ b/arch/tile/include/asm/atomic.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Atomic primitives. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_H | ||
18 | #define _ASM_TILE_ATOMIC_H | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | ||
21 | |||
22 | #include <linux/compiler.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | #define ATOMIC_INIT(i) { (i) } | ||
26 | |||
27 | /** | ||
28 | * atomic_read - read atomic variable | ||
29 | * @v: pointer of type atomic_t | ||
30 | * | ||
31 | * Atomically reads the value of @v. | ||
32 | */ | ||
33 | static inline int atomic_read(const atomic_t *v) | ||
34 | { | ||
35 | return v->counter; | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * atomic_sub_return - subtract integer and return | ||
40 | * @v: pointer of type atomic_t | ||
41 | * @i: integer value to subtract | ||
42 | * | ||
43 | * Atomically subtracts @i from @v and returns @v - @i | ||
44 | */ | ||
45 | #define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v)) | ||
46 | |||
47 | /** | ||
48 | * atomic_sub - subtract integer from atomic variable | ||
49 | * @i: integer value to subtract | ||
50 | * @v: pointer of type atomic_t | ||
51 | * | ||
52 | * Atomically subtracts @i from @v. | ||
53 | */ | ||
54 | #define atomic_sub(i, v) atomic_add((int)(-(i)), (v)) | ||
55 | |||
56 | /** | ||
57 | * atomic_sub_and_test - subtract value from variable and test result | ||
58 | * @i: integer value to subtract | ||
59 | * @v: pointer of type atomic_t | ||
60 | * | ||
61 | * Atomically subtracts @i from @v and returns true if the result is | ||
62 | * zero, or false for all other cases. | ||
63 | */ | ||
64 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | ||
65 | |||
66 | /** | ||
67 | * atomic_inc_return - increment memory and return | ||
68 | * @v: pointer of type atomic_t | ||
69 | * | ||
70 | * Atomically increments @v by 1 and returns the new value. | ||
71 | */ | ||
72 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
73 | |||
74 | /** | ||
75 | * atomic_dec_return - decrement memory and return | ||
76 | * @v: pointer of type atomic_t | ||
77 | * | ||
78 | * Atomically decrements @v by 1 and returns the new value. | ||
79 | */ | ||
80 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
81 | |||
82 | /** | ||
83 | * atomic_inc - increment atomic variable | ||
84 | * @v: pointer of type atomic_t | ||
85 | * | ||
86 | * Atomically increments @v by 1. | ||
87 | */ | ||
88 | #define atomic_inc(v) atomic_add(1, (v)) | ||
89 | |||
90 | /** | ||
91 | * atomic_dec - decrement atomic variable | ||
92 | * @v: pointer of type atomic_t | ||
93 | * | ||
94 | * Atomically decrements @v by 1. | ||
95 | */ | ||
96 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
97 | |||
98 | /** | ||
99 | * atomic_dec_and_test - decrement and test | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1 and returns true if the result is 0. | ||
103 | */ | ||
104 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | ||
105 | |||
106 | /** | ||
107 | * atomic_inc_and_test - increment and test | ||
108 | * @v: pointer of type atomic_t | ||
109 | * | ||
110 | * Atomically increments @v by 1 and returns true if the result is 0. | ||
111 | */ | ||
112 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
113 | |||
114 | /** | ||
115 | * atomic_add_negative - add and test if negative | ||
116 | * @v: pointer of type atomic_t | ||
117 | * @i: integer value to add | ||
118 | * | ||
119 | * Atomically adds @i to @v and returns true if the result is | ||
120 | * negative, or false when result is greater than or equal to zero. | ||
121 | */ | ||
122 | #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) | ||
123 | |||
124 | /** | ||
125 | * atomic_inc_not_zero - increment unless the number is zero | ||
126 | * @v: pointer of type atomic_t | ||
127 | * | ||
128 | * Atomically increments @v by 1, so long as @v is non-zero. | ||
129 | * Returns non-zero if @v was non-zero, and zero otherwise. | ||
130 | */ | ||
131 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
132 | |||
133 | |||
134 | /* | ||
135 | * We define xchg() and cmpxchg() in the included headers. | ||
136 | * Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply | ||
137 | * that cmpxchg() is an efficient operation, which is not particularly true. | ||
138 | */ | ||
139 | |||
140 | /* Nonexistent functions intended to cause link errors. */ | ||
141 | extern unsigned long __xchg_called_with_bad_pointer(void); | ||
142 | extern unsigned long __cmpxchg_called_with_bad_pointer(void); | ||
143 | |||
144 | #define tas(ptr) (xchg((ptr), 1)) | ||
145 | |||
146 | #endif /* __ASSEMBLY__ */ | ||
147 | |||
148 | #ifndef __tilegx__ | ||
149 | #include <asm/atomic_32.h> | ||
150 | #else | ||
151 | #include <asm/atomic_64.h> | ||
152 | #endif | ||
153 | |||
154 | /* Provide the appropriate atomic_long_t definitions. */ | ||
155 | #ifndef __ASSEMBLY__ | ||
156 | #include <asm-generic/atomic-long.h> | ||
157 | #endif | ||
158 | |||
159 | #endif /* _ASM_TILE_ATOMIC_H */ | ||
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h new file mode 100644 index 00000000000..40a5a3a876d --- /dev/null +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -0,0 +1,370 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Do not include directly; use <asm/atomic.h>. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_32_H | ||
18 | #define _ASM_TILE_ATOMIC_32_H | ||
19 | |||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
24 | /* Tile-specific routines to support <asm/atomic.h>. */ | ||
25 | int _atomic_xchg(atomic_t *v, int n); | ||
26 | int _atomic_xchg_add(atomic_t *v, int i); | ||
27 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u); | ||
28 | int _atomic_cmpxchg(atomic_t *v, int o, int n); | ||
29 | |||
30 | /** | ||
31 | * atomic_xchg - atomically exchange contents of memory with a new value | ||
32 | * @v: pointer of type atomic_t | ||
33 | * @i: integer value to store in memory | ||
34 | * | ||
35 | * Atomically sets @v to @i and returns old @v | ||
36 | */ | ||
37 | static inline int atomic_xchg(atomic_t *v, int n) | ||
38 | { | ||
39 | smp_mb(); /* barrier for proper semantics */ | ||
40 | return _atomic_xchg(v, n); | ||
41 | } | ||
42 | |||
43 | /** | ||
44 | * atomic_cmpxchg - atomically exchange contents of memory if it matches | ||
45 | * @v: pointer of type atomic_t | ||
46 | * @o: old value that memory should have | ||
47 | * @n: new value to write to memory if it matches | ||
48 | * | ||
49 | * Atomically checks if @v holds @o and replaces it with @n if so. | ||
50 | * Returns the old value at @v. | ||
51 | */ | ||
52 | static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | ||
53 | { | ||
54 | smp_mb(); /* barrier for proper semantics */ | ||
55 | return _atomic_cmpxchg(v, o, n); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * atomic_add - add integer to atomic variable | ||
60 | * @i: integer value to add | ||
61 | * @v: pointer of type atomic_t | ||
62 | * | ||
63 | * Atomically adds @i to @v. | ||
64 | */ | ||
65 | static inline void atomic_add(int i, atomic_t *v) | ||
66 | { | ||
67 | _atomic_xchg_add(v, i); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * atomic_add_return - add integer and return | ||
72 | * @v: pointer of type atomic_t | ||
73 | * @i: integer value to add | ||
74 | * | ||
75 | * Atomically adds @i to @v and returns @i + @v | ||
76 | */ | ||
77 | static inline int atomic_add_return(int i, atomic_t *v) | ||
78 | { | ||
79 | smp_mb(); /* barrier for proper semantics */ | ||
80 | return _atomic_xchg_add(v, i) + i; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * atomic_add_unless - add unless the number is already a given value | ||
85 | * @v: pointer of type atomic_t | ||
86 | * @a: the amount to add to v... | ||
87 | * @u: ...unless v is equal to u. | ||
88 | * | ||
89 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
90 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
91 | */ | ||
92 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
93 | { | ||
94 | smp_mb(); /* barrier for proper semantics */ | ||
95 | return _atomic_xchg_add_unless(v, a, u) != u; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_set - set atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * @i: required value | ||
102 | * | ||
103 | * Atomically sets the value of @v to @i. | ||
104 | * | ||
105 | * atomic_set() can't be just a raw store, since it would be lost if it | ||
106 | * fell between the load and store of one of the other atomic ops. | ||
107 | */ | ||
108 | static inline void atomic_set(atomic_t *v, int n) | ||
109 | { | ||
110 | _atomic_xchg(v, n); | ||
111 | } | ||
112 | |||
113 | #define xchg(ptr, x) ((typeof(*(ptr))) \ | ||
114 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | ||
115 | atomic_xchg((atomic_t *)(ptr), (long)(x)) : \ | ||
116 | __xchg_called_with_bad_pointer())) | ||
117 | |||
118 | #define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \ | ||
119 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | ||
120 | atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \ | ||
121 | __cmpxchg_called_with_bad_pointer())) | ||
122 | |||
123 | /* A 64bit atomic type */ | ||
124 | |||
125 | typedef struct { | ||
126 | u64 __aligned(8) counter; | ||
127 | } atomic64_t; | ||
128 | |||
129 | #define ATOMIC64_INIT(val) { (val) } | ||
130 | |||
131 | u64 _atomic64_xchg(atomic64_t *v, u64 n); | ||
132 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i); | ||
133 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u); | ||
134 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n); | ||
135 | |||
136 | /** | ||
137 | * atomic64_read - read atomic variable | ||
138 | * @v: pointer of type atomic64_t | ||
139 | * | ||
140 | * Atomically reads the value of @v. | ||
141 | */ | ||
142 | static inline u64 atomic64_read(const atomic64_t *v) | ||
143 | { | ||
144 | /* | ||
145 | * Requires an atomic op to read both 32-bit parts consistently. | ||
146 | * Casting away const is safe since the atomic support routines | ||
147 | * do not write to memory if the value has not been modified. | ||
148 | */ | ||
149 | return _atomic64_xchg_add((atomic64_t *)v, 0); | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * atomic64_xchg - atomically exchange contents of memory with a new value | ||
154 | * @v: pointer of type atomic64_t | ||
155 | * @i: integer value to store in memory | ||
156 | * | ||
157 | * Atomically sets @v to @i and returns old @v | ||
158 | */ | ||
159 | static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | ||
160 | { | ||
161 | smp_mb(); /* barrier for proper semantics */ | ||
162 | return _atomic64_xchg(v, n); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * atomic64_cmpxchg - atomically exchange contents of memory if it matches | ||
167 | * @v: pointer of type atomic64_t | ||
168 | * @o: old value that memory should have | ||
169 | * @n: new value to write to memory if it matches | ||
170 | * | ||
171 | * Atomically checks if @v holds @o and replaces it with @n if so. | ||
172 | * Returns the old value at @v. | ||
173 | */ | ||
174 | static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | ||
175 | { | ||
176 | smp_mb(); /* barrier for proper semantics */ | ||
177 | return _atomic64_cmpxchg(v, o, n); | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * atomic64_add - add integer to atomic variable | ||
182 | * @i: integer value to add | ||
183 | * @v: pointer of type atomic64_t | ||
184 | * | ||
185 | * Atomically adds @i to @v. | ||
186 | */ | ||
187 | static inline void atomic64_add(u64 i, atomic64_t *v) | ||
188 | { | ||
189 | _atomic64_xchg_add(v, i); | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * atomic64_add_return - add integer and return | ||
194 | * @v: pointer of type atomic64_t | ||
195 | * @i: integer value to add | ||
196 | * | ||
197 | * Atomically adds @i to @v and returns @i + @v | ||
198 | */ | ||
199 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | ||
200 | { | ||
201 | smp_mb(); /* barrier for proper semantics */ | ||
202 | return _atomic64_xchg_add(v, i) + i; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * atomic64_add_unless - add unless the number is already a given value | ||
207 | * @v: pointer of type atomic64_t | ||
208 | * @a: the amount to add to v... | ||
209 | * @u: ...unless v is equal to u. | ||
210 | * | ||
211 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
212 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
213 | */ | ||
214 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | ||
215 | { | ||
216 | smp_mb(); /* barrier for proper semantics */ | ||
217 | return _atomic64_xchg_add_unless(v, a, u) != u; | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * atomic64_set - set atomic variable | ||
222 | * @v: pointer of type atomic64_t | ||
223 | * @i: required value | ||
224 | * | ||
225 | * Atomically sets the value of @v to @i. | ||
226 | * | ||
227 | * atomic64_set() can't be just a raw store, since it would be lost if it | ||
228 | * fell between the load and store of one of the other atomic ops. | ||
229 | */ | ||
230 | static inline void atomic64_set(atomic64_t *v, u64 n) | ||
231 | { | ||
232 | _atomic64_xchg(v, n); | ||
233 | } | ||
234 | |||
235 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
236 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | ||
237 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | ||
238 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
239 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | ||
240 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | ||
241 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | ||
242 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | ||
243 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | ||
244 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | ||
245 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | ||
246 | |||
247 | /* | ||
248 | * We need to barrier before modifying the word, since the _atomic_xxx() | ||
249 | * routines just tns the lock and then read/modify/write of the word. | ||
250 | * But after the word is updated, the routine issues an "mf" before returning, | ||
251 | * and since it's a function call, we don't even need a compiler barrier. | ||
252 | */ | ||
253 | #define smp_mb__before_atomic_dec() smp_mb() | ||
254 | #define smp_mb__before_atomic_inc() smp_mb() | ||
255 | #define smp_mb__after_atomic_dec() do { } while (0) | ||
256 | #define smp_mb__after_atomic_inc() do { } while (0) | ||
257 | |||
258 | |||
259 | /* | ||
260 | * Support "tns" atomic integers. These are atomic integers that can | ||
261 | * hold any value but "1". They are more efficient than regular atomic | ||
262 | * operations because the "lock" (aka acquire) step is a single "tns" | ||
263 | * in the uncontended case, and the "unlock" (aka release) step is a | ||
264 | * single "store" without an mf. (However, note that on tilepro the | ||
265 | * "tns" will evict the local cache line, so it's not all upside.) | ||
266 | * | ||
267 | * Note that you can ONLY observe the value stored in the pointer | ||
268 | * using these operations; a direct read of the value may confusingly | ||
269 | * return the special value "1". | ||
270 | */ | ||
271 | |||
272 | int __tns_atomic_acquire(atomic_t *); | ||
273 | void __tns_atomic_release(atomic_t *p, int v); | ||
274 | |||
275 | static inline void tns_atomic_set(atomic_t *v, int i) | ||
276 | { | ||
277 | __tns_atomic_acquire(v); | ||
278 | __tns_atomic_release(v, i); | ||
279 | } | ||
280 | |||
281 | static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n) | ||
282 | { | ||
283 | int ret = __tns_atomic_acquire(v); | ||
284 | __tns_atomic_release(v, (ret == o) ? n : ret); | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static inline int tns_atomic_xchg(atomic_t *v, int n) | ||
289 | { | ||
290 | int ret = __tns_atomic_acquire(v); | ||
291 | __tns_atomic_release(v, n); | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | #endif /* !__ASSEMBLY__ */ | ||
296 | |||
297 | /* | ||
298 | * Internal definitions only beyond this point. | ||
299 | */ | ||
300 | |||
301 | #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \ | ||
302 | (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP)) | ||
303 | |||
304 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
305 | |||
306 | /* Number of entries in atomic_lock_ptr[]. */ | ||
307 | #define ATOMIC_HASH_L1_SHIFT 6 | ||
308 | #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT) | ||
309 | |||
310 | /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */ | ||
311 | #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2) | ||
312 | #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT) | ||
313 | |||
314 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
315 | |||
316 | /* | ||
317 | * Number of atomic locks in atomic_locks[]. Must be a power of two. | ||
318 | * There is no reason for more than PAGE_SIZE / 8 entries, since that | ||
319 | * is the maximum number of pointer bits we can use to index this. | ||
320 | * And we cannot have more than PAGE_SIZE / 4, since this has to | ||
321 | * fit on a single page and each entry takes 4 bytes. | ||
322 | */ | ||
323 | #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3) | ||
324 | #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT) | ||
325 | |||
326 | #ifndef __ASSEMBLY__ | ||
327 | extern int atomic_locks[]; | ||
328 | #endif | ||
329 | |||
330 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
331 | |||
332 | /* | ||
333 | * All the code that may fault while holding an atomic lock must | ||
334 | * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code | ||
335 | * can correctly release and reacquire the lock. Note that we | ||
336 | * mention the register number in a comment in "lib/atomic_asm.S" to help | ||
337 | * assembly coders from using this register by mistake, so if it | ||
338 | * is changed here, change that comment as well. | ||
339 | */ | ||
340 | #define ATOMIC_LOCK_REG 20 | ||
341 | #define ATOMIC_LOCK_REG_NAME r20 | ||
342 | |||
343 | #ifndef __ASSEMBLY__ | ||
344 | /* Called from setup to initialize a hash table to point to per_cpu locks. */ | ||
345 | void __init_atomic_per_cpu(void); | ||
346 | |||
347 | #ifdef CONFIG_SMP | ||
348 | /* Support releasing the atomic lock in do_page_fault_ics(). */ | ||
349 | void __atomic_fault_unlock(int *lock_ptr); | ||
350 | #endif | ||
351 | |||
352 | /* Private helper routines in lib/atomic_asm_32.S */ | ||
353 | extern struct __get_user __atomic_cmpxchg(volatile int *p, | ||
354 | int *lock, int o, int n); | ||
355 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | ||
356 | extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); | ||
357 | extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | ||
358 | int *lock, int o, int n); | ||
359 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | ||
360 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | ||
361 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | ||
362 | extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | ||
363 | extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | ||
364 | extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | ||
365 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | ||
366 | int *lock, u64 o, u64 n); | ||
367 | |||
368 | #endif /* !__ASSEMBLY__ */ | ||
369 | |||
370 | #endif /* _ASM_TILE_ATOMIC_32_H */ | ||
diff --git a/arch/tile/include/asm/auxvec.h b/arch/tile/include/asm/auxvec.h new file mode 100644 index 00000000000..1d393edb064 --- /dev/null +++ b/arch/tile/include/asm/auxvec.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_AUXVEC_H | ||
16 | #define _ASM_TILE_AUXVEC_H | ||
17 | |||
18 | /* No extensions to auxvec */ | ||
19 | |||
20 | #endif /* _ASM_TILE_AUXVEC_H */ | ||
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h new file mode 100644 index 00000000000..6970bfcad54 --- /dev/null +++ b/arch/tile/include/asm/backtrace.h | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _TILE_BACKTRACE_H | ||
16 | #define _TILE_BACKTRACE_H | ||
17 | |||
18 | |||
19 | |||
20 | #include <linux/types.h> | ||
21 | |||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #if CHIP_VA_WIDTH() > 32 | ||
25 | typedef unsigned long long VirtualAddress; | ||
26 | #else | ||
27 | typedef unsigned int VirtualAddress; | ||
28 | #endif | ||
29 | |||
30 | |||
31 | /** Reads 'size' bytes from 'address' and writes the data to 'result'. | ||
32 | * Returns true if successful, else false (e.g. memory not readable). | ||
33 | */ | ||
34 | typedef bool (*BacktraceMemoryReader)(void *result, | ||
35 | VirtualAddress address, | ||
36 | unsigned int size, | ||
37 | void *extra); | ||
38 | |||
39 | typedef struct { | ||
40 | /** Current PC. */ | ||
41 | VirtualAddress pc; | ||
42 | |||
43 | /** Current stack pointer value. */ | ||
44 | VirtualAddress sp; | ||
45 | |||
46 | /** Current frame pointer value (i.e. caller's stack pointer) */ | ||
47 | VirtualAddress fp; | ||
48 | |||
49 | /** Internal use only: caller's PC for first frame. */ | ||
50 | VirtualAddress initial_frame_caller_pc; | ||
51 | |||
52 | /** Internal use only: callback to read memory. */ | ||
53 | BacktraceMemoryReader read_memory_func; | ||
54 | |||
55 | /** Internal use only: arbitrary argument to read_memory_func. */ | ||
56 | void *read_memory_func_extra; | ||
57 | |||
58 | } BacktraceIterator; | ||
59 | |||
60 | |||
61 | /** Initializes a backtracer to start from the given location. | ||
62 | * | ||
63 | * If the frame pointer cannot be determined it is set to -1. | ||
64 | * | ||
65 | * @param state The state to be filled in. | ||
66 | * @param read_memory_func A callback that reads memory. If NULL, a default | ||
67 | * value is provided. | ||
68 | * @param read_memory_func_extra An arbitrary argument to read_memory_func. | ||
69 | * @param pc The current PC. | ||
70 | * @param lr The current value of the 'lr' register. | ||
71 | * @param sp The current value of the 'sp' register. | ||
72 | * @param r52 The current value of the 'r52' register. | ||
73 | */ | ||
74 | extern void backtrace_init(BacktraceIterator *state, | ||
75 | BacktraceMemoryReader read_memory_func, | ||
76 | void *read_memory_func_extra, | ||
77 | VirtualAddress pc, VirtualAddress lr, | ||
78 | VirtualAddress sp, VirtualAddress r52); | ||
79 | |||
80 | |||
81 | /** Advances the backtracing state to the calling frame, returning | ||
82 | * true iff successful. | ||
83 | */ | ||
84 | extern bool backtrace_next(BacktraceIterator *state); | ||
85 | |||
86 | |||
87 | typedef enum { | ||
88 | |||
89 | /* We have no idea what the caller's pc is. */ | ||
90 | PC_LOC_UNKNOWN, | ||
91 | |||
92 | /* The caller's pc is currently in lr. */ | ||
93 | PC_LOC_IN_LR, | ||
94 | |||
95 | /* The caller's pc can be found by dereferencing the caller's sp. */ | ||
96 | PC_LOC_ON_STACK | ||
97 | |||
98 | } CallerPCLocation; | ||
99 | |||
100 | |||
101 | typedef enum { | ||
102 | |||
103 | /* We have no idea what the caller's sp is. */ | ||
104 | SP_LOC_UNKNOWN, | ||
105 | |||
106 | /* The caller's sp is currently in r52. */ | ||
107 | SP_LOC_IN_R52, | ||
108 | |||
109 | /* The caller's sp can be found by adding a certain constant | ||
110 | * to the current value of sp. | ||
111 | */ | ||
112 | SP_LOC_OFFSET | ||
113 | |||
114 | } CallerSPLocation; | ||
115 | |||
116 | |||
117 | /* Bit values ORed into CALLER_* values for info ops. */ | ||
118 | enum { | ||
119 | /* Setting the low bit on any of these values means the info op | ||
120 | * applies only to one bundle ago. | ||
121 | */ | ||
122 | ONE_BUNDLE_AGO_FLAG = 1, | ||
123 | |||
124 | /* Setting this bit on a CALLER_SP_* value means the PC is in LR. | ||
125 | * If not set, PC is on the stack. | ||
126 | */ | ||
127 | PC_IN_LR_FLAG = 2, | ||
128 | |||
129 | /* This many of the low bits of a CALLER_SP_* value are for the | ||
130 | * flag bits above. | ||
131 | */ | ||
132 | NUM_INFO_OP_FLAGS = 2, | ||
133 | |||
134 | /* We cannot have one in the memory pipe so this is the maximum. */ | ||
135 | MAX_INFO_OPS_PER_BUNDLE = 2 | ||
136 | }; | ||
137 | |||
138 | |||
139 | /** Internal constants used to define 'info' operands. */ | ||
140 | enum { | ||
141 | /* 0 and 1 are reserved, as are all negative numbers. */ | ||
142 | |||
143 | CALLER_UNKNOWN_BASE = 2, | ||
144 | |||
145 | CALLER_SP_IN_R52_BASE = 4, | ||
146 | |||
147 | CALLER_SP_OFFSET_BASE = 8 | ||
148 | }; | ||
149 | |||
150 | |||
151 | /** Current backtracer state describing where it thinks the caller is. */ | ||
152 | typedef struct { | ||
153 | /* | ||
154 | * Public fields | ||
155 | */ | ||
156 | |||
157 | /* How do we find the caller's PC? */ | ||
158 | CallerPCLocation pc_location : 8; | ||
159 | |||
160 | /* How do we find the caller's SP? */ | ||
161 | CallerSPLocation sp_location : 8; | ||
162 | |||
163 | /* If sp_location == SP_LOC_OFFSET, then caller_sp == sp + | ||
164 | * loc->sp_offset. Else this field is undefined. | ||
165 | */ | ||
166 | uint16_t sp_offset; | ||
167 | |||
168 | /* In the most recently visited bundle a terminating bundle? */ | ||
169 | bool at_terminating_bundle; | ||
170 | |||
171 | /* | ||
172 | * Private fields | ||
173 | */ | ||
174 | |||
175 | /* Will the forward scanner see someone clobbering sp | ||
176 | * (i.e. changing it with something other than addi sp, sp, N?) | ||
177 | */ | ||
178 | bool sp_clobber_follows; | ||
179 | |||
180 | /* Operand to next "visible" info op (no more than one bundle past | ||
181 | * the next terminating bundle), or -32768 if none. | ||
182 | */ | ||
183 | int16_t next_info_operand; | ||
184 | |||
185 | /* Is the info of in next_info_op in the very next bundle? */ | ||
186 | bool is_next_info_operand_adjacent; | ||
187 | |||
188 | } CallerLocation; | ||
189 | |||
190 | |||
191 | |||
192 | |||
193 | #endif /* _TILE_BACKTRACE_H */ | ||
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h new file mode 100644 index 00000000000..84600f3514d --- /dev/null +++ b/arch/tile/include/asm/bitops.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright 1992, Linus Torvalds. | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_BITOPS_H | ||
17 | #define _ASM_TILE_BITOPS_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | |||
21 | #ifndef _LINUX_BITOPS_H | ||
22 | #error only <linux/bitops.h> can be included directly | ||
23 | #endif | ||
24 | |||
25 | #ifdef __tilegx__ | ||
26 | #include <asm/bitops_64.h> | ||
27 | #else | ||
28 | #include <asm/bitops_32.h> | ||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * __ffs - find first set bit in word | ||
33 | * @word: The word to search | ||
34 | * | ||
35 | * Undefined if no set bit exists, so code should check against 0 first. | ||
36 | */ | ||
37 | static inline unsigned long __ffs(unsigned long word) | ||
38 | { | ||
39 | return __builtin_ctzl(word); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * ffz - find first zero bit in word | ||
44 | * @word: The word to search | ||
45 | * | ||
46 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
47 | */ | ||
48 | static inline unsigned long ffz(unsigned long word) | ||
49 | { | ||
50 | return __builtin_ctzl(~word); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * __fls - find last set bit in word | ||
55 | * @word: The word to search | ||
56 | * | ||
57 | * Undefined if no set bit exists, so code should check against 0 first. | ||
58 | */ | ||
59 | static inline unsigned long __fls(unsigned long word) | ||
60 | { | ||
61 | return (sizeof(word) * 8) - 1 - __builtin_clzl(word); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * ffs - find first set bit in word | ||
66 | * @x: the word to search | ||
67 | * | ||
68 | * This is defined the same way as the libc and compiler builtin ffs | ||
69 | * routines, therefore differs in spirit from the other bitops. | ||
70 | * | ||
71 | * ffs(value) returns 0 if value is 0 or the position of the first | ||
72 | * set bit if value is nonzero. The first (least significant) bit | ||
73 | * is at position 1. | ||
74 | */ | ||
75 | static inline int ffs(int x) | ||
76 | { | ||
77 | return __builtin_ffs(x); | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * fls - find last set bit in word | ||
82 | * @x: the word to search | ||
83 | * | ||
84 | * This is defined in a similar way as the libc and compiler builtin | ||
85 | * ffs, but returns the position of the most significant set bit. | ||
86 | * | ||
87 | * fls(value) returns 0 if value is 0 or the position of the last | ||
88 | * set bit if value is nonzero. The last (most significant) bit is | ||
89 | * at position 32. | ||
90 | */ | ||
91 | static inline int fls(int x) | ||
92 | { | ||
93 | return (sizeof(int) * 8) - __builtin_clz(x); | ||
94 | } | ||
95 | |||
96 | static inline int fls64(__u64 w) | ||
97 | { | ||
98 | return (sizeof(__u64) * 8) - __builtin_clzll(w); | ||
99 | } | ||
100 | |||
101 | static inline unsigned int hweight32(unsigned int w) | ||
102 | { | ||
103 | return __builtin_popcount(w); | ||
104 | } | ||
105 | |||
106 | static inline unsigned int hweight16(unsigned int w) | ||
107 | { | ||
108 | return __builtin_popcount(w & 0xffff); | ||
109 | } | ||
110 | |||
111 | static inline unsigned int hweight8(unsigned int w) | ||
112 | { | ||
113 | return __builtin_popcount(w & 0xff); | ||
114 | } | ||
115 | |||
116 | static inline unsigned long hweight64(__u64 w) | ||
117 | { | ||
118 | return __builtin_popcountll(w); | ||
119 | } | ||
120 | |||
121 | #include <asm-generic/bitops/lock.h> | ||
122 | #include <asm-generic/bitops/sched.h> | ||
123 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
124 | #include <asm-generic/bitops/minix.h> | ||
125 | |||
126 | #endif /* _ASM_TILE_BITOPS_H */ | ||
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h new file mode 100644 index 00000000000..7a93c001ac1 --- /dev/null +++ b/arch/tile/include/asm/bitops_32.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_BITOPS_32_H | ||
16 | #define _ASM_TILE_BITOPS_32_H | ||
17 | |||
18 | #include <linux/compiler.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <asm/system.h> | ||
21 | |||
22 | /* Tile-specific routines to support <asm/bitops.h>. */ | ||
23 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); | ||
24 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask); | ||
25 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask); | ||
26 | |||
27 | /** | ||
28 | * set_bit - Atomically set a bit in memory | ||
29 | * @nr: the bit to set | ||
30 | * @addr: the address to start counting from | ||
31 | * | ||
32 | * This function is atomic and may not be reordered. | ||
33 | * See __set_bit() if you do not require the atomic guarantees. | ||
34 | * Note that @nr may be almost arbitrarily large; this function is not | ||
35 | * restricted to acting on a single-word quantity. | ||
36 | */ | ||
37 | static inline void set_bit(unsigned nr, volatile unsigned long *addr) | ||
38 | { | ||
39 | _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * clear_bit - Clears a bit in memory | ||
44 | * @nr: Bit to clear | ||
45 | * @addr: Address to start counting from | ||
46 | * | ||
47 | * clear_bit() is atomic and may not be reordered. | ||
48 | * See __clear_bit() if you do not require the atomic guarantees. | ||
49 | * Note that @nr may be almost arbitrarily large; this function is not | ||
50 | * restricted to acting on a single-word quantity. | ||
51 | * | ||
52 | * clear_bit() may not contain a memory barrier, so if it is used for | ||
53 | * locking purposes, you should call smp_mb__before_clear_bit() and/or | ||
54 | * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. | ||
55 | */ | ||
56 | static inline void clear_bit(unsigned nr, volatile unsigned long *addr) | ||
57 | { | ||
58 | _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * change_bit - Toggle a bit in memory | ||
63 | * @nr: Bit to change | ||
64 | * @addr: Address to start counting from | ||
65 | * | ||
66 | * change_bit() is atomic and may not be reordered. | ||
67 | * See __change_bit() if you do not require the atomic guarantees. | ||
68 | * Note that @nr may be almost arbitrarily large; this function is not | ||
69 | * restricted to acting on a single-word quantity. | ||
70 | */ | ||
71 | static inline void change_bit(unsigned nr, volatile unsigned long *addr) | ||
72 | { | ||
73 | _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * test_and_set_bit - Set a bit and return its old value | ||
78 | * @nr: Bit to set | ||
79 | * @addr: Address to count from | ||
80 | * | ||
81 | * This operation is atomic and cannot be reordered. | ||
82 | * It also implies a memory barrier. | ||
83 | */ | ||
84 | static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr) | ||
85 | { | ||
86 | unsigned long mask = BIT_MASK(nr); | ||
87 | addr += BIT_WORD(nr); | ||
88 | smp_mb(); /* barrier for proper semantics */ | ||
89 | return (_atomic_or(addr, mask) & mask) != 0; | ||
90 | } | ||
91 | |||
92 | /** | ||
93 | * test_and_clear_bit - Clear a bit and return its old value | ||
94 | * @nr: Bit to clear | ||
95 | * @addr: Address to count from | ||
96 | * | ||
97 | * This operation is atomic and cannot be reordered. | ||
98 | * It also implies a memory barrier. | ||
99 | */ | ||
100 | static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr) | ||
101 | { | ||
102 | unsigned long mask = BIT_MASK(nr); | ||
103 | addr += BIT_WORD(nr); | ||
104 | smp_mb(); /* barrier for proper semantics */ | ||
105 | return (_atomic_andn(addr, mask) & mask) != 0; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * test_and_change_bit - Change a bit and return its old value | ||
110 | * @nr: Bit to change | ||
111 | * @addr: Address to count from | ||
112 | * | ||
113 | * This operation is atomic and cannot be reordered. | ||
114 | * It also implies a memory barrier. | ||
115 | */ | ||
116 | static inline int test_and_change_bit(unsigned nr, | ||
117 | volatile unsigned long *addr) | ||
118 | { | ||
119 | unsigned long mask = BIT_MASK(nr); | ||
120 | addr += BIT_WORD(nr); | ||
121 | smp_mb(); /* barrier for proper semantics */ | ||
122 | return (_atomic_xor(addr, mask) & mask) != 0; | ||
123 | } | ||
124 | |||
125 | /* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */ | ||
126 | #define smp_mb__before_clear_bit() smp_mb() | ||
127 | #define smp_mb__after_clear_bit() do {} while (0) | ||
128 | |||
129 | #include <asm-generic/bitops/non-atomic.h> | ||
130 | #include <asm-generic/bitops/ext2-atomic.h> | ||
131 | |||
132 | #endif /* _ASM_TILE_BITOPS_32_H */ | ||
diff --git a/arch/tile/include/asm/bitsperlong.h b/arch/tile/include/asm/bitsperlong.h new file mode 100644 index 00000000000..58c771f2af2 --- /dev/null +++ b/arch/tile/include/asm/bitsperlong.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_BITSPERLONG_H | ||
16 | #define _ASM_TILE_BITSPERLONG_H | ||
17 | |||
18 | #ifdef __LP64__ | ||
19 | # define __BITS_PER_LONG 64 | ||
20 | #else | ||
21 | # define __BITS_PER_LONG 32 | ||
22 | #endif | ||
23 | |||
24 | #include <asm-generic/bitsperlong.h> | ||
25 | |||
26 | #endif /* _ASM_TILE_BITSPERLONG_H */ | ||
diff --git a/arch/tile/include/asm/bug.h b/arch/tile/include/asm/bug.h new file mode 100644 index 00000000000..b12fd89e42e --- /dev/null +++ b/arch/tile/include/asm/bug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bug.h> | |||
diff --git a/arch/tile/include/asm/bugs.h b/arch/tile/include/asm/bugs.h new file mode 100644 index 00000000000..61791e1ad9f --- /dev/null +++ b/arch/tile/include/asm/bugs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bugs.h> | |||
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h new file mode 100644 index 00000000000..9558416d578 --- /dev/null +++ b/arch/tile/include/asm/byteorder.h | |||
@@ -0,0 +1 @@ | |||
#include <linux/byteorder/little_endian.h> | |||
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h new file mode 100644 index 00000000000..f6101840c9e --- /dev/null +++ b/arch/tile/include/asm/cache.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHE_H | ||
16 | #define _ASM_TILE_CACHE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* bytes per L1 data cache line */ | ||
21 | #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() | ||
22 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | ||
23 | |||
24 | /* bytes per L2 cache line */ | ||
25 | #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() | ||
26 | #define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) | ||
27 | #define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) | ||
28 | |||
29 | /* | ||
30 | * TILE-Gx is fully coherents so we don't need to define | ||
31 | * ARCH_KMALLOC_MINALIGN. | ||
32 | */ | ||
33 | #ifndef __tilegx__ | ||
34 | #define ARCH_KMALLOC_MINALIGN L2_CACHE_BYTES | ||
35 | #endif | ||
36 | |||
37 | /* use the cache line size for the L2, which is where it counts */ | ||
38 | #define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT | ||
39 | #define SMP_CACHE_BYTES L2_CACHE_BYTES | ||
40 | #define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT | ||
41 | #define INTERNODE_CACHE_BYTES L2_CACHE_BYTES | ||
42 | |||
43 | /* Group together read-mostly things to avoid cache false sharing */ | ||
44 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
45 | |||
46 | /* | ||
47 | * Attribute for data that is kept read/write coherent until the end of | ||
48 | * initialization, then bumped to read/only incoherent for performance. | ||
49 | */ | ||
50 | #define __write_once __attribute__((__section__(".w1data"))) | ||
51 | |||
52 | #endif /* _ASM_TILE_CACHE_H */ | ||
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h new file mode 100644 index 00000000000..c5741da4eea --- /dev/null +++ b/arch/tile/include/asm/cacheflush.h | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHEFLUSH_H | ||
16 | #define _ASM_TILE_CACHEFLUSH_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* Keep includes the same across arches. */ | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/cache.h> | ||
23 | #include <asm/system.h> | ||
24 | #include <arch/icache.h> | ||
25 | |||
26 | /* Caches are physically-indexed and so don't need special treatment */ | ||
27 | #define flush_cache_all() do { } while (0) | ||
28 | #define flush_cache_mm(mm) do { } while (0) | ||
29 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
30 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
31 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
32 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
33 | #define flush_dcache_page(page) do { } while (0) | ||
34 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
35 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
36 | #define flush_cache_vmap(start, end) do { } while (0) | ||
37 | #define flush_cache_vunmap(start, end) do { } while (0) | ||
38 | #define flush_icache_page(vma, pg) do { } while (0) | ||
39 | #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) | ||
40 | |||
41 | /* Flush the icache just on this cpu */ | ||
42 | extern void __flush_icache_range(unsigned long start, unsigned long end); | ||
43 | |||
44 | /* Flush the entire icache on this cpu. */ | ||
45 | #define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE()) | ||
46 | |||
47 | #ifdef CONFIG_SMP | ||
48 | /* | ||
49 | * When the kernel writes to its own text we need to do an SMP | ||
50 | * broadcast to make the L1I coherent everywhere. This includes | ||
51 | * module load and single step. | ||
52 | */ | ||
53 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
54 | #else | ||
55 | #define flush_icache_range __flush_icache_range | ||
56 | #endif | ||
57 | |||
58 | /* | ||
59 | * An update to an executable user page requires icache flushing. | ||
60 | * We could carefully update only tiles that are running this process, | ||
61 | * and rely on the fact that we flush the icache on every context | ||
62 | * switch to avoid doing extra work here. But for now, I'll be | ||
63 | * conservative and just do a global icache flush. | ||
64 | */ | ||
65 | static inline void copy_to_user_page(struct vm_area_struct *vma, | ||
66 | struct page *page, unsigned long vaddr, | ||
67 | void *dst, void *src, int len) | ||
68 | { | ||
69 | memcpy(dst, src, len); | ||
70 | if (vma->vm_flags & VM_EXEC) { | ||
71 | flush_icache_range((unsigned long) dst, | ||
72 | (unsigned long) dst + len); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
77 | memcpy((dst), (src), (len)) | ||
78 | |||
79 | /* | ||
80 | * Invalidate a VA range; pads to L2 cacheline boundaries. | ||
81 | * | ||
82 | * Note that on TILE64, __inv_buffer() actually flushes modified | ||
83 | * cache lines in addition to invalidating them, i.e., it's the | ||
84 | * same as __finv_buffer(). | ||
85 | */ | ||
86 | static inline void __inv_buffer(void *buffer, size_t size) | ||
87 | { | ||
88 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
89 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
90 | while (next < finish) { | ||
91 | __insn_inv(next); | ||
92 | next += CHIP_INV_STRIDE(); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | /* Flush a VA range; pads to L2 cacheline boundaries. */ | ||
97 | static inline void __flush_buffer(void *buffer, size_t size) | ||
98 | { | ||
99 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
100 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
101 | while (next < finish) { | ||
102 | __insn_flush(next); | ||
103 | next += CHIP_FLUSH_STRIDE(); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | /* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */ | ||
108 | static inline void __finv_buffer(void *buffer, size_t size) | ||
109 | { | ||
110 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
111 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
112 | while (next < finish) { | ||
113 | __insn_finv(next); | ||
114 | next += CHIP_FINV_STRIDE(); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | |||
119 | /* Invalidate a VA range, then memory fence. */ | ||
120 | static inline void inv_buffer(void *buffer, size_t size) | ||
121 | { | ||
122 | __inv_buffer(buffer, size); | ||
123 | mb_incoherent(); | ||
124 | } | ||
125 | |||
126 | /* Flush a VA range, then memory fence. */ | ||
127 | static inline void flush_buffer(void *buffer, size_t size) | ||
128 | { | ||
129 | __flush_buffer(buffer, size); | ||
130 | mb_incoherent(); | ||
131 | } | ||
132 | |||
133 | /* Flush & invalidate a VA range, then memory fence. */ | ||
134 | static inline void finv_buffer(void *buffer, size_t size) | ||
135 | { | ||
136 | __finv_buffer(buffer, size); | ||
137 | mb_incoherent(); | ||
138 | } | ||
139 | |||
140 | #endif /* _ASM_TILE_CACHEFLUSH_H */ | ||
diff --git a/arch/tile/include/asm/checksum.h b/arch/tile/include/asm/checksum.h new file mode 100644 index 00000000000..a120766c726 --- /dev/null +++ b/arch/tile/include/asm/checksum.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CHECKSUM_H | ||
16 | #define _ASM_TILE_CHECKSUM_H | ||
17 | |||
18 | #include <asm-generic/checksum.h> | ||
19 | |||
20 | /* Allow us to provide a more optimized do_csum(). */ | ||
21 | __wsum do_csum(const unsigned char *buff, int len); | ||
22 | #define do_csum do_csum | ||
23 | |||
24 | #endif /* _ASM_TILE_CHECKSUM_H */ | ||
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h new file mode 100644 index 00000000000..5a34da6cdd7 --- /dev/null +++ b/arch/tile/include/asm/compat.h | |||
@@ -0,0 +1,257 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_COMPAT_H | ||
16 | #define _ASM_TILE_COMPAT_H | ||
17 | |||
18 | /* | ||
19 | * Architecture specific compatibility types | ||
20 | */ | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/sched.h> | ||
23 | |||
24 | #define COMPAT_USER_HZ 100 | ||
25 | |||
26 | /* "long" and pointer-based types are different. */ | ||
27 | typedef s32 compat_long_t; | ||
28 | typedef u32 compat_ulong_t; | ||
29 | typedef u32 compat_size_t; | ||
30 | typedef s32 compat_ssize_t; | ||
31 | typedef s32 compat_off_t; | ||
32 | typedef s32 compat_time_t; | ||
33 | typedef s32 compat_clock_t; | ||
34 | typedef u32 compat_ino_t; | ||
35 | typedef u32 compat_caddr_t; | ||
36 | typedef u32 compat_uptr_t; | ||
37 | |||
38 | /* Many types are "int" or otherwise the same. */ | ||
39 | typedef __kernel_pid_t compat_pid_t; | ||
40 | typedef __kernel_uid_t __compat_uid_t; | ||
41 | typedef __kernel_gid_t __compat_gid_t; | ||
42 | typedef __kernel_uid32_t __compat_uid32_t; | ||
43 | typedef __kernel_uid32_t __compat_gid32_t; | ||
44 | typedef __kernel_mode_t compat_mode_t; | ||
45 | typedef __kernel_dev_t compat_dev_t; | ||
46 | typedef __kernel_loff_t compat_loff_t; | ||
47 | typedef __kernel_nlink_t compat_nlink_t; | ||
48 | typedef __kernel_ipc_pid_t compat_ipc_pid_t; | ||
49 | typedef __kernel_daddr_t compat_daddr_t; | ||
50 | typedef __kernel_fsid_t compat_fsid_t; | ||
51 | typedef __kernel_timer_t compat_timer_t; | ||
52 | typedef __kernel_key_t compat_key_t; | ||
53 | typedef int compat_int_t; | ||
54 | typedef s64 compat_s64; | ||
55 | typedef uint compat_uint_t; | ||
56 | typedef u64 compat_u64; | ||
57 | |||
58 | /* We use the same register dump format in 32-bit images. */ | ||
59 | typedef unsigned long compat_elf_greg_t; | ||
60 | #define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t)) | ||
61 | typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; | ||
62 | |||
63 | struct compat_timespec { | ||
64 | compat_time_t tv_sec; | ||
65 | s32 tv_nsec; | ||
66 | }; | ||
67 | |||
68 | struct compat_timeval { | ||
69 | compat_time_t tv_sec; | ||
70 | s32 tv_usec; | ||
71 | }; | ||
72 | |||
73 | #define compat_stat stat | ||
74 | #define compat_statfs statfs | ||
75 | |||
76 | struct compat_sysctl { | ||
77 | unsigned int name; | ||
78 | int nlen; | ||
79 | unsigned int oldval; | ||
80 | unsigned int oldlenp; | ||
81 | unsigned int newval; | ||
82 | unsigned int newlen; | ||
83 | unsigned int __unused[4]; | ||
84 | }; | ||
85 | |||
86 | |||
87 | struct compat_flock { | ||
88 | short l_type; | ||
89 | short l_whence; | ||
90 | compat_off_t l_start; | ||
91 | compat_off_t l_len; | ||
92 | compat_pid_t l_pid; | ||
93 | }; | ||
94 | |||
95 | #define F_GETLK64 12 /* using 'struct flock64' */ | ||
96 | #define F_SETLK64 13 | ||
97 | #define F_SETLKW64 14 | ||
98 | |||
99 | struct compat_flock64 { | ||
100 | short l_type; | ||
101 | short l_whence; | ||
102 | compat_loff_t l_start; | ||
103 | compat_loff_t l_len; | ||
104 | compat_pid_t l_pid; | ||
105 | }; | ||
106 | |||
107 | #define COMPAT_RLIM_INFINITY 0xffffffff | ||
108 | |||
109 | #define _COMPAT_NSIG 64 | ||
110 | #define _COMPAT_NSIG_BPW 32 | ||
111 | |||
112 | typedef u32 compat_sigset_word; | ||
113 | |||
114 | #define COMPAT_OFF_T_MAX 0x7fffffff | ||
115 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL | ||
116 | |||
117 | struct compat_ipc64_perm { | ||
118 | compat_key_t key; | ||
119 | __compat_uid32_t uid; | ||
120 | __compat_gid32_t gid; | ||
121 | __compat_uid32_t cuid; | ||
122 | __compat_gid32_t cgid; | ||
123 | unsigned short mode; | ||
124 | unsigned short __pad1; | ||
125 | unsigned short seq; | ||
126 | unsigned short __pad2; | ||
127 | compat_ulong_t unused1; | ||
128 | compat_ulong_t unused2; | ||
129 | }; | ||
130 | |||
131 | struct compat_semid64_ds { | ||
132 | struct compat_ipc64_perm sem_perm; | ||
133 | compat_time_t sem_otime; | ||
134 | compat_ulong_t __unused1; | ||
135 | compat_time_t sem_ctime; | ||
136 | compat_ulong_t __unused2; | ||
137 | compat_ulong_t sem_nsems; | ||
138 | compat_ulong_t __unused3; | ||
139 | compat_ulong_t __unused4; | ||
140 | }; | ||
141 | |||
142 | struct compat_msqid64_ds { | ||
143 | struct compat_ipc64_perm msg_perm; | ||
144 | compat_time_t msg_stime; | ||
145 | compat_ulong_t __unused1; | ||
146 | compat_time_t msg_rtime; | ||
147 | compat_ulong_t __unused2; | ||
148 | compat_time_t msg_ctime; | ||
149 | compat_ulong_t __unused3; | ||
150 | compat_ulong_t msg_cbytes; | ||
151 | compat_ulong_t msg_qnum; | ||
152 | compat_ulong_t msg_qbytes; | ||
153 | compat_pid_t msg_lspid; | ||
154 | compat_pid_t msg_lrpid; | ||
155 | compat_ulong_t __unused4; | ||
156 | compat_ulong_t __unused5; | ||
157 | }; | ||
158 | |||
159 | struct compat_shmid64_ds { | ||
160 | struct compat_ipc64_perm shm_perm; | ||
161 | compat_size_t shm_segsz; | ||
162 | compat_time_t shm_atime; | ||
163 | compat_ulong_t __unused1; | ||
164 | compat_time_t shm_dtime; | ||
165 | compat_ulong_t __unused2; | ||
166 | compat_time_t shm_ctime; | ||
167 | compat_ulong_t __unused3; | ||
168 | compat_pid_t shm_cpid; | ||
169 | compat_pid_t shm_lpid; | ||
170 | compat_ulong_t shm_nattch; | ||
171 | compat_ulong_t __unused4; | ||
172 | compat_ulong_t __unused5; | ||
173 | }; | ||
174 | |||
175 | /* | ||
176 | * A pointer passed in from user mode. This should not | ||
177 | * be used for syscall parameters, just declare them | ||
178 | * as pointers because the syscall entry code will have | ||
179 | * appropriately converted them already. | ||
180 | */ | ||
181 | |||
182 | static inline void __user *compat_ptr(compat_uptr_t uptr) | ||
183 | { | ||
184 | return (void __user *)(long)(s32)uptr; | ||
185 | } | ||
186 | |||
187 | static inline compat_uptr_t ptr_to_compat(void __user *uptr) | ||
188 | { | ||
189 | return (u32)(unsigned long)uptr; | ||
190 | } | ||
191 | |||
192 | /* Sign-extend when storing a kernel pointer to a user's ptregs. */ | ||
193 | static inline unsigned long ptr_to_compat_reg(void __user *uptr) | ||
194 | { | ||
195 | return (long)(int)(long __force)uptr; | ||
196 | } | ||
197 | |||
198 | static inline void __user *compat_alloc_user_space(long len) | ||
199 | { | ||
200 | struct pt_regs *regs = task_pt_regs(current); | ||
201 | return (void __user *)regs->sp - len; | ||
202 | } | ||
203 | |||
204 | static inline int is_compat_task(void) | ||
205 | { | ||
206 | return current_thread_info()->status & TS_COMPAT; | ||
207 | } | ||
208 | |||
209 | extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka, | ||
210 | siginfo_t *info, sigset_t *set, | ||
211 | struct pt_regs *regs); | ||
212 | |||
213 | /* Compat syscalls. */ | ||
214 | struct compat_sigaction; | ||
215 | struct compat_siginfo; | ||
216 | struct compat_sigaltstack; | ||
217 | long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
218 | compat_uptr_t __user *envp); | ||
219 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, | ||
220 | struct compat_sigaction __user *oact, | ||
221 | size_t sigsetsize); | ||
222 | long compat_sys_rt_sigqueueinfo(int pid, int sig, | ||
223 | struct compat_siginfo __user *uinfo); | ||
224 | long compat_sys_rt_sigreturn(void); | ||
225 | long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
226 | struct compat_sigaltstack __user *uoss_ptr); | ||
227 | long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high); | ||
228 | long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high); | ||
229 | long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
230 | u32 dummy, u32 low, u32 high); | ||
231 | long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, | ||
232 | u32 dummy, u32 low, u32 high); | ||
233 | long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len); | ||
234 | long compat_sys_sync_file_range2(int fd, unsigned int flags, | ||
235 | u32 offset_lo, u32 offset_hi, | ||
236 | u32 nbytes_lo, u32 nbytes_hi); | ||
237 | long compat_sys_fallocate(int fd, int mode, | ||
238 | u32 offset_lo, u32 offset_hi, | ||
239 | u32 len_lo, u32 len_hi); | ||
240 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, | ||
241 | struct compat_timespec __user *interval); | ||
242 | |||
243 | /* Versions of compat functions that differ from generic Linux. */ | ||
244 | struct compat_msgbuf; | ||
245 | long tile_compat_sys_msgsnd(int msqid, | ||
246 | struct compat_msgbuf __user *msgp, | ||
247 | size_t msgsz, int msgflg); | ||
248 | long tile_compat_sys_msgrcv(int msqid, | ||
249 | struct compat_msgbuf __user *msgp, | ||
250 | size_t msgsz, long msgtyp, int msgflg); | ||
251 | long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid, | ||
252 | compat_long_t addr, compat_long_t data); | ||
253 | |||
254 | /* Tilera Linux syscalls that don't have "compat" versions. */ | ||
255 | #define compat_sys_flush_cache sys_flush_cache | ||
256 | |||
257 | #endif /* _ASM_TILE_COMPAT_H */ | ||
diff --git a/arch/tile/include/asm/cputime.h b/arch/tile/include/asm/cputime.h new file mode 100644 index 00000000000..6d68ad7e0ea --- /dev/null +++ b/arch/tile/include/asm/cputime.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/cputime.h> | |||
diff --git a/arch/tile/include/asm/current.h b/arch/tile/include/asm/current.h new file mode 100644 index 00000000000..da21acf020d --- /dev/null +++ b/arch/tile/include/asm/current.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CURRENT_H | ||
16 | #define _ASM_TILE_CURRENT_H | ||
17 | |||
18 | #include <linux/thread_info.h> | ||
19 | |||
20 | struct task_struct; | ||
21 | |||
22 | static inline struct task_struct *get_current(void) | ||
23 | { | ||
24 | return current_thread_info()->task; | ||
25 | } | ||
26 | #define current get_current() | ||
27 | |||
28 | /* Return a usable "task_struct" pointer even if the real one is corrupt. */ | ||
29 | struct task_struct *validate_current(void); | ||
30 | |||
31 | #endif /* _ASM_TILE_CURRENT_H */ | ||
diff --git a/arch/tile/include/asm/delay.h b/arch/tile/include/asm/delay.h new file mode 100644 index 00000000000..97b0e69e704 --- /dev/null +++ b/arch/tile/include/asm/delay.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DELAY_H | ||
16 | #define _ASM_TILE_DELAY_H | ||
17 | |||
18 | /* Undefined functions to get compile-time errors. */ | ||
19 | extern void __bad_udelay(void); | ||
20 | extern void __bad_ndelay(void); | ||
21 | |||
22 | extern void __udelay(unsigned long usecs); | ||
23 | extern void __ndelay(unsigned long nsecs); | ||
24 | extern void __delay(unsigned long loops); | ||
25 | |||
26 | #define udelay(n) (__builtin_constant_p(n) ? \ | ||
27 | ((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \ | ||
28 | __udelay(n)) | ||
29 | |||
30 | #define ndelay(n) (__builtin_constant_p(n) ? \ | ||
31 | ((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \ | ||
32 | __ndelay(n)) | ||
33 | |||
34 | #endif /* _ASM_TILE_DELAY_H */ | ||
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h new file mode 100644 index 00000000000..f0a4c256403 --- /dev/null +++ b/arch/tile/include/asm/device.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/device.h> | |||
diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h new file mode 100644 index 00000000000..6cd978cefb2 --- /dev/null +++ b/arch/tile/include/asm/div64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/div64.h> | |||
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h new file mode 100644 index 00000000000..cf466b39aa1 --- /dev/null +++ b/arch/tile/include/asm/dma-mapping.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DMA_MAPPING_H | ||
16 | #define _ASM_TILE_DMA_MAPPING_H | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | #include <linux/cache.h> | ||
21 | #include <linux/io.h> | ||
22 | |||
23 | /* | ||
24 | * Note that on x86 and powerpc, there is a "struct dma_mapping_ops" | ||
25 | * that is used for all the DMA operations. For now, we don't have an | ||
26 | * equivalent on tile, because we only have a single way of doing DMA. | ||
27 | * (Tilera bug 7994 to use dma_mapping_ops.) | ||
28 | */ | ||
29 | |||
30 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
31 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
32 | |||
33 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
34 | enum dma_data_direction); | ||
35 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
36 | size_t size, enum dma_data_direction); | ||
37 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
38 | enum dma_data_direction); | ||
39 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
40 | int nhwentries, enum dma_data_direction); | ||
41 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
42 | unsigned long offset, size_t size, | ||
43 | enum dma_data_direction); | ||
44 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
45 | size_t size, enum dma_data_direction); | ||
46 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
47 | int nelems, enum dma_data_direction); | ||
48 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
49 | int nelems, enum dma_data_direction); | ||
50 | |||
51 | |||
52 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
53 | dma_addr_t *dma_handle, gfp_t flag); | ||
54 | |||
55 | void dma_free_coherent(struct device *dev, size_t size, | ||
56 | void *vaddr, dma_addr_t dma_handle); | ||
57 | |||
58 | extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, | ||
59 | enum dma_data_direction); | ||
60 | extern void dma_sync_single_for_device(struct device *, dma_addr_t, | ||
61 | size_t, enum dma_data_direction); | ||
62 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, | ||
63 | unsigned long offset, size_t, | ||
64 | enum dma_data_direction); | ||
65 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, | ||
66 | unsigned long offset, size_t, | ||
67 | enum dma_data_direction); | ||
68 | extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction); | ||
69 | |||
70 | static inline int | ||
71 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static inline int | ||
77 | dma_supported(struct device *dev, u64 mask) | ||
78 | { | ||
79 | return 1; | ||
80 | } | ||
81 | |||
82 | static inline int | ||
83 | dma_set_mask(struct device *dev, u64 mask) | ||
84 | { | ||
85 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
86 | return -EIO; | ||
87 | |||
88 | *dev->dma_mask = mask; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline int | ||
94 | dma_get_cache_alignment(void) | ||
95 | { | ||
96 | return L2_CACHE_BYTES; | ||
97 | } | ||
98 | |||
99 | #define dma_is_consistent(d, h) (1) | ||
100 | |||
101 | |||
102 | #endif /* _ASM_TILE_DMA_MAPPING_H */ | ||
diff --git a/arch/tile/include/asm/dma.h b/arch/tile/include/asm/dma.h new file mode 100644 index 00000000000..12a7ca16d16 --- /dev/null +++ b/arch/tile/include/asm/dma.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DMA_H | ||
16 | #define _ASM_TILE_DMA_H | ||
17 | |||
18 | #include <asm-generic/dma.h> | ||
19 | |||
20 | /* Needed by drivers/pci/quirks.c */ | ||
21 | #ifdef CONFIG_PCI | ||
22 | extern int isa_dma_bridge_buggy; | ||
23 | #endif | ||
24 | |||
25 | #endif /* _ASM_TILE_DMA_H */ | ||
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h new file mode 100644 index 00000000000..623a6bb741c --- /dev/null +++ b/arch/tile/include/asm/elf.h | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_ELF_H | ||
16 | #define _ASM_TILE_ELF_H | ||
17 | |||
18 | /* | ||
19 | * ELF register definitions. | ||
20 | */ | ||
21 | |||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #include <linux/ptrace.h> | ||
25 | #include <asm/byteorder.h> | ||
26 | #include <asm/page.h> | ||
27 | |||
28 | typedef unsigned long elf_greg_t; | ||
29 | |||
30 | #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) | ||
31 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
32 | |||
33 | #define EM_TILE64 187 | ||
34 | #define EM_TILEPRO 188 | ||
35 | #define EM_TILEGX 191 | ||
36 | |||
37 | /* Provide a nominal data structure. */ | ||
38 | #define ELF_NFPREG 0 | ||
39 | typedef double elf_fpreg_t; | ||
40 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
41 | |||
42 | #ifdef __tilegx__ | ||
43 | #define ELF_CLASS ELFCLASS64 | ||
44 | #else | ||
45 | #define ELF_CLASS ELFCLASS32 | ||
46 | #endif | ||
47 | #define ELF_DATA ELFDATA2LSB | ||
48 | |||
49 | /* | ||
50 | * There seems to be a bug in how compat_binfmt_elf.c works: it | ||
51 | * #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info(). | ||
52 | * Hack around this by providing an enum value of ELF_ARCH. | ||
53 | */ | ||
54 | enum { ELF_ARCH = CHIP_ELF_TYPE() }; | ||
55 | #define ELF_ARCH ELF_ARCH | ||
56 | |||
57 | /* | ||
58 | * This is used to ensure we don't load something for the wrong architecture. | ||
59 | */ | ||
60 | #define elf_check_arch(x) \ | ||
61 | ((x)->e_ident[EI_CLASS] == ELF_CLASS && \ | ||
62 | (x)->e_machine == CHIP_ELF_TYPE()) | ||
63 | |||
64 | /* The module loader only handles a few relocation types. */ | ||
65 | #ifndef __tilegx__ | ||
66 | #define R_TILE_32 1 | ||
67 | #define R_TILE_JOFFLONG_X1 15 | ||
68 | #define R_TILE_IMM16_X0_LO 25 | ||
69 | #define R_TILE_IMM16_X1_LO 26 | ||
70 | #define R_TILE_IMM16_X0_HA 29 | ||
71 | #define R_TILE_IMM16_X1_HA 30 | ||
72 | #else | ||
73 | #define R_TILEGX_64 1 | ||
74 | #define R_TILEGX_JUMPOFF_X1 21 | ||
75 | #define R_TILEGX_IMM16_X0_HW0 36 | ||
76 | #define R_TILEGX_IMM16_X1_HW0 37 | ||
77 | #define R_TILEGX_IMM16_X0_HW1 38 | ||
78 | #define R_TILEGX_IMM16_X1_HW1 39 | ||
79 | #define R_TILEGX_IMM16_X0_HW2_LAST 48 | ||
80 | #define R_TILEGX_IMM16_X1_HW2_LAST 49 | ||
81 | #endif | ||
82 | |||
83 | /* Use standard page size for core dumps. */ | ||
84 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | ||
85 | |||
86 | /* | ||
87 | * This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
88 | * use of this is to invoke "./ld.so someprog" to test out a new version of | ||
89 | * the loader. We need to make sure that it is out of the way of the program | ||
90 | * that it will "exec", and that there is sufficient room for the brk. | ||
91 | */ | ||
92 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | ||
93 | |||
94 | #define ELF_CORE_COPY_REGS(_dest, _regs) \ | ||
95 | memcpy((char *) &_dest, (char *) _regs, \ | ||
96 | sizeof(struct pt_regs)); | ||
97 | |||
98 | /* No additional FP registers to copy. */ | ||
99 | #define ELF_CORE_COPY_FPREGS(t, fpu) 0 | ||
100 | |||
101 | /* | ||
102 | * This yields a mask that user programs can use to figure out what | ||
103 | * instruction set this CPU supports. This could be done in user space, | ||
104 | * but it's not easy, and we've already done it here. | ||
105 | */ | ||
106 | #define ELF_HWCAP (0) | ||
107 | |||
108 | /* | ||
109 | * This yields a string that ld.so will use to load implementation | ||
110 | * specific libraries for optimization. This is more specific in | ||
111 | * intent than poking at uname or /proc/cpuinfo. | ||
112 | */ | ||
113 | #define ELF_PLATFORM (NULL) | ||
114 | |||
115 | extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr); | ||
116 | |||
117 | #define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr) | ||
118 | |||
119 | extern int dump_task_regs(struct task_struct *, elf_gregset_t *); | ||
120 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | ||
121 | |||
122 | /* Tilera Linux has no personalities currently, so no need to do anything. */ | ||
123 | #define SET_PERSONALITY(ex) do { } while (0) | ||
124 | |||
125 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | ||
126 | /* Support auto-mapping of the user interrupt vectors. */ | ||
127 | struct linux_binprm; | ||
128 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
129 | int executable_stack); | ||
130 | #ifdef CONFIG_COMPAT | ||
131 | |||
132 | #define COMPAT_ELF_PLATFORM "tilegx-m32" | ||
133 | |||
134 | /* | ||
135 | * "Compat" binaries have the same machine type, but 32-bit class, | ||
136 | * since they're not a separate machine type, but just a 32-bit | ||
137 | * variant of the standard 64-bit architecture. | ||
138 | */ | ||
139 | #define compat_elf_check_arch(x) \ | ||
140 | ((x)->e_ident[EI_CLASS] == ELFCLASS32 && \ | ||
141 | (x)->e_machine == CHIP_ELF_TYPE()) | ||
142 | |||
143 | #define compat_start_thread(regs, ip, usp) do { \ | ||
144 | regs->pc = ptr_to_compat_reg((void *)(ip)); \ | ||
145 | regs->sp = ptr_to_compat_reg((void *)(usp)); \ | ||
146 | } while (0) | ||
147 | |||
148 | /* | ||
149 | * Use SET_PERSONALITY to indicate compatibility via TS_COMPAT. | ||
150 | */ | ||
151 | #undef SET_PERSONALITY | ||
152 | #define SET_PERSONALITY(ex) \ | ||
153 | do { \ | ||
154 | current->personality = PER_LINUX; \ | ||
155 | current_thread_info()->status &= ~TS_COMPAT; \ | ||
156 | } while (0) | ||
157 | #define COMPAT_SET_PERSONALITY(ex) \ | ||
158 | do { \ | ||
159 | current->personality = PER_LINUX_32BIT; \ | ||
160 | current_thread_info()->status |= TS_COMPAT; \ | ||
161 | } while (0) | ||
162 | |||
163 | #define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2) | ||
164 | |||
165 | #endif /* CONFIG_COMPAT */ | ||
166 | |||
167 | #endif /* _ASM_TILE_ELF_H */ | ||
diff --git a/arch/tile/include/asm/emergency-restart.h b/arch/tile/include/asm/emergency-restart.h new file mode 100644 index 00000000000..3711bd9d50b --- /dev/null +++ b/arch/tile/include/asm/emergency-restart.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/emergency-restart.h> | |||
diff --git a/arch/tile/include/asm/errno.h b/arch/tile/include/asm/errno.h new file mode 100644 index 00000000000..4c82b503d92 --- /dev/null +++ b/arch/tile/include/asm/errno.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/errno.h> | |||
diff --git a/arch/tile/include/asm/fcntl.h b/arch/tile/include/asm/fcntl.h new file mode 100644 index 00000000000..46ab12db573 --- /dev/null +++ b/arch/tile/include/asm/fcntl.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/fcntl.h> | |||
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h new file mode 100644 index 00000000000..51537ff9265 --- /dev/null +++ b/arch/tile/include/asm/fixmap.h | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1998 Ingo Molnar | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_FIXMAP_H | ||
17 | #define _ASM_TILE_FIXMAP_H | ||
18 | |||
19 | #include <asm/page.h> | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | #include <linux/kernel.h> | ||
23 | #ifdef CONFIG_HIGHMEM | ||
24 | #include <linux/threads.h> | ||
25 | #include <asm/kmap_types.h> | ||
26 | #endif | ||
27 | |||
28 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
29 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
30 | |||
31 | /* | ||
32 | * Here we define all the compile-time 'special' virtual | ||
33 | * addresses. The point is to have a constant address at | ||
34 | * compile time, but to set the physical address only | ||
35 | * in the boot process. We allocate these special addresses | ||
36 | * from the end of supervisor virtual memory backwards. | ||
37 | * Also this lets us do fail-safe vmalloc(), we | ||
38 | * can guarantee that these special addresses and | ||
39 | * vmalloc()-ed addresses never overlap. | ||
40 | * | ||
41 | * these 'compile-time allocated' memory buffers are | ||
42 | * fixed-size 4k pages. (or larger if used with an increment | ||
43 | * higher than 1) use fixmap_set(idx,phys) to associate | ||
44 | * physical memory with fixmap indices. | ||
45 | * | ||
46 | * TLB entries of such buffers will not be flushed across | ||
47 | * task switches. | ||
48 | * | ||
49 | * We don't bother with a FIX_HOLE since above the fixmaps | ||
50 | * is unmapped memory in any case. | ||
51 | */ | ||
52 | enum fixed_addresses { | ||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
55 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
56 | #endif | ||
57 | __end_of_permanent_fixed_addresses, | ||
58 | |||
59 | /* | ||
60 | * Temporary boot-time mappings, used before ioremap() is functional. | ||
61 | * Not currently needed by the Tile architecture. | ||
62 | */ | ||
63 | #define NR_FIX_BTMAPS 0 | ||
64 | #if NR_FIX_BTMAPS | ||
65 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | ||
66 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, | ||
67 | __end_of_fixed_addresses | ||
68 | #else | ||
69 | __end_of_fixed_addresses = __end_of_permanent_fixed_addresses | ||
70 | #endif | ||
71 | }; | ||
72 | |||
73 | extern void __set_fixmap(enum fixed_addresses idx, | ||
74 | unsigned long phys, pgprot_t flags); | ||
75 | |||
76 | #define set_fixmap(idx, phys) \ | ||
77 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
78 | /* | ||
79 | * Some hardware wants to get fixmapped without caching. | ||
80 | */ | ||
81 | #define set_fixmap_nocache(idx, phys) \ | ||
82 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
83 | |||
84 | #define clear_fixmap(idx) \ | ||
85 | __set_fixmap(idx, 0, __pgprot(0)) | ||
86 | |||
87 | #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | ||
88 | #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
89 | #define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) | ||
90 | #define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) | ||
91 | |||
92 | extern void __this_fixmap_does_not_exist(void); | ||
93 | |||
94 | /* | ||
95 | * 'index to address' translation. If anyone tries to use the idx | ||
96 | * directly without tranlation, we catch the bug with a NULL-deference | ||
97 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
98 | */ | ||
99 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
100 | { | ||
101 | /* | ||
102 | * this branch gets completely eliminated after inlining, | ||
103 | * except when someone tries to use fixaddr indices in an | ||
104 | * illegal way. (such as mixing up address types or using | ||
105 | * out-of-range indices). | ||
106 | * | ||
107 | * If it doesn't get removed, the linker will complain | ||
108 | * loudly with a reasonably clear error message.. | ||
109 | */ | ||
110 | if (idx >= __end_of_fixed_addresses) | ||
111 | __this_fixmap_does_not_exist(); | ||
112 | |||
113 | return __fix_to_virt(idx); | ||
114 | } | ||
115 | |||
116 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
117 | { | ||
118 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
119 | return __virt_to_fix(vaddr); | ||
120 | } | ||
121 | |||
122 | #endif /* !__ASSEMBLY__ */ | ||
123 | |||
124 | #endif /* _ASM_TILE_FIXMAP_H */ | ||
diff --git a/arch/tile/include/asm/ftrace.h b/arch/tile/include/asm/ftrace.h new file mode 100644 index 00000000000..461459b06d9 --- /dev/null +++ b/arch/tile/include/asm/ftrace.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_FTRACE_H | ||
16 | #define _ASM_TILE_FTRACE_H | ||
17 | |||
18 | /* empty */ | ||
19 | |||
20 | #endif /* _ASM_TILE_FTRACE_H */ | ||
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h new file mode 100644 index 00000000000..fe0d10dcae5 --- /dev/null +++ b/arch/tile/include/asm/futex.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * These routines make two important assumptions: | ||
15 | * | ||
16 | * 1. atomic_t is really an int and can be freely cast back and forth | ||
17 | * (validated in __init_atomic_per_cpu). | ||
18 | * | ||
19 | * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using | ||
20 | * the same locking convention that all the kernel atomic routines use. | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_TILE_FUTEX_H | ||
24 | #define _ASM_TILE_FUTEX_H | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | #include <linux/futex.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/errno.h> | ||
31 | |||
32 | extern struct __get_user futex_set(int __user *v, int i); | ||
33 | extern struct __get_user futex_add(int __user *v, int n); | ||
34 | extern struct __get_user futex_or(int __user *v, int n); | ||
35 | extern struct __get_user futex_andn(int __user *v, int n); | ||
36 | extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); | ||
37 | |||
38 | #ifndef __tilegx__ | ||
39 | extern struct __get_user futex_xor(int __user *v, int n); | ||
40 | #else | ||
41 | static inline struct __get_user futex_xor(int __user *uaddr, int n) | ||
42 | { | ||
43 | struct __get_user asm_ret = __get_user_4(uaddr); | ||
44 | if (!asm_ret.err) { | ||
45 | int oldval, newval; | ||
46 | do { | ||
47 | oldval = asm_ret.val; | ||
48 | newval = oldval ^ n; | ||
49 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
50 | } while (asm_ret.err == 0 && oldval != asm_ret.val); | ||
51 | } | ||
52 | return asm_ret; | ||
53 | } | ||
54 | #endif | ||
55 | |||
56 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | ||
57 | { | ||
58 | int op = (encoded_op >> 28) & 7; | ||
59 | int cmp = (encoded_op >> 24) & 15; | ||
60 | int oparg = (encoded_op << 8) >> 20; | ||
61 | int cmparg = (encoded_op << 20) >> 20; | ||
62 | int ret; | ||
63 | struct __get_user asm_ret; | ||
64 | |||
65 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
66 | oparg = 1 << oparg; | ||
67 | |||
68 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
69 | return -EFAULT; | ||
70 | |||
71 | pagefault_disable(); | ||
72 | switch (op) { | ||
73 | case FUTEX_OP_SET: | ||
74 | asm_ret = futex_set(uaddr, oparg); | ||
75 | break; | ||
76 | case FUTEX_OP_ADD: | ||
77 | asm_ret = futex_add(uaddr, oparg); | ||
78 | break; | ||
79 | case FUTEX_OP_OR: | ||
80 | asm_ret = futex_or(uaddr, oparg); | ||
81 | break; | ||
82 | case FUTEX_OP_ANDN: | ||
83 | asm_ret = futex_andn(uaddr, oparg); | ||
84 | break; | ||
85 | case FUTEX_OP_XOR: | ||
86 | asm_ret = futex_xor(uaddr, oparg); | ||
87 | break; | ||
88 | default: | ||
89 | asm_ret.err = -ENOSYS; | ||
90 | } | ||
91 | pagefault_enable(); | ||
92 | |||
93 | ret = asm_ret.err; | ||
94 | |||
95 | if (!ret) { | ||
96 | switch (cmp) { | ||
97 | case FUTEX_OP_CMP_EQ: | ||
98 | ret = (asm_ret.val == cmparg); | ||
99 | break; | ||
100 | case FUTEX_OP_CMP_NE: | ||
101 | ret = (asm_ret.val != cmparg); | ||
102 | break; | ||
103 | case FUTEX_OP_CMP_LT: | ||
104 | ret = (asm_ret.val < cmparg); | ||
105 | break; | ||
106 | case FUTEX_OP_CMP_GE: | ||
107 | ret = (asm_ret.val >= cmparg); | ||
108 | break; | ||
109 | case FUTEX_OP_CMP_LE: | ||
110 | ret = (asm_ret.val <= cmparg); | ||
111 | break; | ||
112 | case FUTEX_OP_CMP_GT: | ||
113 | ret = (asm_ret.val > cmparg); | ||
114 | break; | ||
115 | default: | ||
116 | ret = -ENOSYS; | ||
117 | } | ||
118 | } | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | ||
123 | int newval) | ||
124 | { | ||
125 | struct __get_user asm_ret; | ||
126 | |||
127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
128 | return -EFAULT; | ||
129 | |||
130 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
131 | return asm_ret.err ? asm_ret.err : asm_ret.val; | ||
132 | } | ||
133 | |||
134 | #ifndef __tilegx__ | ||
135 | /* Return failure from the atomic wrappers. */ | ||
136 | struct __get_user __atomic_bad_address(int __user *addr); | ||
137 | #endif | ||
138 | |||
139 | #endif /* !__ASSEMBLY__ */ | ||
140 | |||
141 | #endif /* _ASM_TILE_FUTEX_H */ | ||
diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h new file mode 100644 index 00000000000..822390f9a15 --- /dev/null +++ b/arch/tile/include/asm/hardirq.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HARDIRQ_H | ||
16 | #define _ASM_TILE_HARDIRQ_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/cache.h> | ||
20 | |||
21 | #include <asm/irq.h> | ||
22 | |||
23 | typedef struct { | ||
24 | unsigned int __softirq_pending; | ||
25 | long idle_timestamp; | ||
26 | |||
27 | /* Hard interrupt statistics. */ | ||
28 | unsigned int irq_timer_count; | ||
29 | unsigned int irq_syscall_count; | ||
30 | unsigned int irq_resched_count; | ||
31 | unsigned int irq_hv_flush_count; | ||
32 | unsigned int irq_call_count; | ||
33 | unsigned int irq_hv_msg_count; | ||
34 | unsigned int irq_dev_intr_count; | ||
35 | |||
36 | } ____cacheline_aligned irq_cpustat_t; | ||
37 | |||
38 | DECLARE_PER_CPU(irq_cpustat_t, irq_stat); | ||
39 | |||
40 | #define __ARCH_IRQ_STAT | ||
41 | #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) | ||
42 | |||
43 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
44 | |||
45 | #define HARDIRQ_BITS 8 | ||
46 | |||
47 | #endif /* _ASM_TILE_HARDIRQ_H */ | ||
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h new file mode 100644 index 00000000000..0bed3ec7b42 --- /dev/null +++ b/arch/tile/include/asm/hardwall.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Provide methods for the HARDWALL_FILE for accessing the UDN. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_HARDWALL_H | ||
18 | #define _ASM_TILE_HARDWALL_H | ||
19 | |||
20 | #include <linux/ioctl.h> | ||
21 | |||
22 | #define HARDWALL_IOCTL_BASE 0xa2 | ||
23 | |||
24 | /* | ||
25 | * The HARDWALL_CREATE() ioctl is a macro with a "size" argument. | ||
26 | * The resulting ioctl value is passed to the kernel in conjunction | ||
27 | * with a pointer to a little-endian bitmask of cpus, which must be | ||
28 | * physically in a rectangular configuration on the chip. | ||
29 | * The "size" is the number of bytes of cpu mask data. | ||
30 | */ | ||
31 | #define _HARDWALL_CREATE 1 | ||
32 | #define HARDWALL_CREATE(size) \ | ||
33 | _IOC(_IOC_READ, HARDWALL_IOCTL_BASE, _HARDWALL_CREATE, (size)) | ||
34 | |||
35 | #define _HARDWALL_ACTIVATE 2 | ||
36 | #define HARDWALL_ACTIVATE \ | ||
37 | _IO(HARDWALL_IOCTL_BASE, _HARDWALL_ACTIVATE) | ||
38 | |||
39 | #define _HARDWALL_DEACTIVATE 3 | ||
40 | #define HARDWALL_DEACTIVATE \ | ||
41 | _IO(HARDWALL_IOCTL_BASE, _HARDWALL_DEACTIVATE) | ||
42 | |||
43 | #ifndef __KERNEL__ | ||
44 | |||
45 | /* This is the canonical name expected by userspace. */ | ||
46 | #define HARDWALL_FILE "/dev/hardwall" | ||
47 | |||
48 | #else | ||
49 | |||
50 | /* Hook for /proc/tile/hardwall. */ | ||
51 | struct seq_file; | ||
52 | int proc_tile_hardwall_show(struct seq_file *sf, void *v); | ||
53 | |||
54 | #endif | ||
55 | |||
56 | #endif /* _ASM_TILE_HARDWALL_H */ | ||
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h new file mode 100644 index 00000000000..efdd12e9102 --- /dev/null +++ b/arch/tile/include/asm/highmem.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
3 | * Gerhard.Wichert@pdb.siemens.de | ||
4 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation, version 2. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
17 | * are not addressable by direct kernel virtual addresses. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_TILE_HIGHMEM_H | ||
22 | #define _ASM_TILE_HIGHMEM_H | ||
23 | |||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/threads.h> | ||
26 | #include <asm/kmap_types.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm/homecache.h> | ||
29 | |||
30 | /* declarations for highmem.c */ | ||
31 | extern unsigned long highstart_pfn, highend_pfn; | ||
32 | |||
33 | extern pte_t *pkmap_page_table; | ||
34 | |||
35 | /* | ||
36 | * Ordering is: | ||
37 | * | ||
38 | * FIXADDR_TOP | ||
39 | * fixed_addresses | ||
40 | * FIXADDR_START | ||
41 | * temp fixed addresses | ||
42 | * FIXADDR_BOOT_START | ||
43 | * Persistent kmap area | ||
44 | * PKMAP_BASE | ||
45 | * VMALLOC_END | ||
46 | * Vmalloc area | ||
47 | * VMALLOC_START | ||
48 | * high_memory | ||
49 | */ | ||
50 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | ||
51 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | ||
52 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
53 | |||
54 | void *kmap_high(struct page *page); | ||
55 | void kunmap_high(struct page *page); | ||
56 | void *kmap(struct page *page); | ||
57 | void kunmap(struct page *page); | ||
58 | void *kmap_fix_kpte(struct page *page, int finished); | ||
59 | |||
60 | /* This macro is used only in map_new_virtual() to map "page". */ | ||
61 | #define kmap_prot page_to_kpgprot(page) | ||
62 | |||
63 | void kunmap_atomic(void *kvaddr, enum km_type type); | ||
64 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | ||
65 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | ||
66 | struct page *kmap_atomic_to_page(void *ptr); | ||
67 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); | ||
68 | void *kmap_atomic(struct page *page, enum km_type type); | ||
69 | void kmap_atomic_fix_kpte(struct page *page, int finished); | ||
70 | |||
71 | #define flush_cache_kmaps() do { } while (0) | ||
72 | |||
73 | #endif /* _ASM_TILE_HIGHMEM_H */ | ||
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h new file mode 100644 index 00000000000..a8243865d49 --- /dev/null +++ b/arch/tile/include/asm/homecache.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Handle issues around the Tile "home cache" model of coherence. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_HOMECACHE_H | ||
18 | #define _ASM_TILE_HOMECACHE_H | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | |||
23 | struct page; | ||
24 | struct task_struct; | ||
25 | struct vm_area_struct; | ||
26 | struct zone; | ||
27 | |||
28 | /* | ||
29 | * Coherence point for the page is its memory controller. | ||
30 | * It is not present in any cache (L1 or L2). | ||
31 | */ | ||
32 | #define PAGE_HOME_UNCACHED -1 | ||
33 | |||
34 | /* | ||
35 | * Is this page immutable (unwritable) and thus able to be cached more | ||
36 | * widely than would otherwise be possible? On tile64 this means we | ||
37 | * mark the PTE to cache locally; on tilepro it means we have "nc" set. | ||
38 | */ | ||
39 | #define PAGE_HOME_IMMUTABLE -2 | ||
40 | |||
41 | /* | ||
42 | * Each cpu considers its own cache to be the home for the page, | ||
43 | * which makes it incoherent. | ||
44 | */ | ||
45 | #define PAGE_HOME_INCOHERENT -3 | ||
46 | |||
47 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
48 | /* Home for the page is distributed via hash-for-home. */ | ||
49 | #define PAGE_HOME_HASH -4 | ||
50 | #endif | ||
51 | |||
52 | /* Homing is unknown or unspecified. Not valid for page_home(). */ | ||
53 | #define PAGE_HOME_UNKNOWN -5 | ||
54 | |||
55 | /* Home on the current cpu. Not valid for page_home(). */ | ||
56 | #define PAGE_HOME_HERE -6 | ||
57 | |||
58 | /* Support wrapper to use instead of explicit hv_flush_remote(). */ | ||
59 | extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length, | ||
60 | const struct cpumask *cache_cpumask, | ||
61 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
62 | unsigned long tlb_pgsize, | ||
63 | const struct cpumask *tlb_cpumask, | ||
64 | HV_Remote_ASID *asids, int asidcount); | ||
65 | |||
66 | /* Set homing-related bits in a PTE (can also pass a pgprot_t). */ | ||
67 | extern pte_t pte_set_home(pte_t pte, int home); | ||
68 | |||
69 | /* Do a cache eviction on the specified cpus. */ | ||
70 | extern void homecache_evict(const struct cpumask *mask); | ||
71 | |||
72 | /* | ||
73 | * Change a kernel page's homecache. It must not be mapped in user space. | ||
74 | * If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when | ||
75 | * no other cpu can reference the page, and causes a full-chip cache/TLB flush. | ||
76 | */ | ||
77 | extern void homecache_change_page_home(struct page *, int order, int home); | ||
78 | |||
79 | /* | ||
80 | * Flush a page out of whatever cache(s) it is in. | ||
81 | * This is more than just finv, since it properly handles waiting | ||
82 | * for the data to reach memory on tilepro, but it can be quite | ||
83 | * heavyweight, particularly on hash-for-home memory. | ||
84 | */ | ||
85 | extern void homecache_flush_cache(struct page *, int order); | ||
86 | |||
87 | /* | ||
88 | * Allocate a page with the given GFP flags, home, and optionally | ||
89 | * node. These routines are actually just wrappers around the normal | ||
90 | * alloc_pages() / alloc_pages_node() functions, which set and clear | ||
91 | * a per-cpu variable to communicate with homecache_new_kernel_page(). | ||
92 | * If !CONFIG_HOMECACHE, uses homecache_change_page_home(). | ||
93 | */ | ||
94 | extern struct page *homecache_alloc_pages(gfp_t gfp_mask, | ||
95 | unsigned int order, int home); | ||
96 | extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||
97 | unsigned int order, int home); | ||
98 | #define homecache_alloc_page(gfp_mask, home) \ | ||
99 | homecache_alloc_pages(gfp_mask, 0, home) | ||
100 | |||
101 | /* | ||
102 | * These routines are just pass-throughs to free_pages() when | ||
103 | * we support full homecaching. If !CONFIG_HOMECACHE, then these | ||
104 | * routines use homecache_change_page_home() to reset the home | ||
105 | * back to the default before returning the page to the allocator. | ||
106 | */ | ||
107 | void homecache_free_pages(unsigned long addr, unsigned int order); | ||
108 | #define homecache_free_page(page) \ | ||
109 | homecache_free_pages((page), 0) | ||
110 | |||
111 | |||
112 | |||
113 | /* | ||
114 | * Report the page home for LOWMEM pages by examining their kernel PTE, | ||
115 | * or for highmem pages as the default home. | ||
116 | */ | ||
117 | extern int page_home(struct page *); | ||
118 | |||
119 | #define homecache_migrate_kthread() do {} while (0) | ||
120 | |||
121 | #define homecache_kpte_lock() 0 | ||
122 | #define homecache_kpte_unlock(flags) do {} while (0) | ||
123 | |||
124 | |||
125 | #endif /* _ASM_TILE_HOMECACHE_H */ | ||
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h new file mode 100644 index 00000000000..0521c277bbd --- /dev/null +++ b/arch/tile/include/asm/hugetlb.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HUGETLB_H | ||
16 | #define _ASM_TILE_HUGETLB_H | ||
17 | |||
18 | #include <asm/page.h> | ||
19 | |||
20 | |||
21 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
22 | unsigned long addr, | ||
23 | unsigned long len) { | ||
24 | return 0; | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * If the arch doesn't supply something else, assume that hugepage | ||
29 | * size aligned regions are ok without further preparation. | ||
30 | */ | ||
31 | static inline int prepare_hugepage_range(struct file *file, | ||
32 | unsigned long addr, unsigned long len) | ||
33 | { | ||
34 | struct hstate *h = hstate_file(file); | ||
35 | if (len & ~huge_page_mask(h)) | ||
36 | return -EINVAL; | ||
37 | if (addr & ~huge_page_mask(h)) | ||
38 | return -EINVAL; | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
43 | { | ||
44 | } | ||
45 | |||
46 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
47 | unsigned long addr, unsigned long end, | ||
48 | unsigned long floor, | ||
49 | unsigned long ceiling) | ||
50 | { | ||
51 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
52 | } | ||
53 | |||
54 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
55 | pte_t *ptep, pte_t pte) | ||
56 | { | ||
57 | set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER); | ||
58 | } | ||
59 | |||
60 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
61 | unsigned long addr, pte_t *ptep) | ||
62 | { | ||
63 | return ptep_get_and_clear(mm, addr, ptep); | ||
64 | } | ||
65 | |||
66 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
67 | unsigned long addr, pte_t *ptep) | ||
68 | { | ||
69 | ptep_clear_flush(vma, addr, ptep); | ||
70 | } | ||
71 | |||
72 | static inline int huge_pte_none(pte_t pte) | ||
73 | { | ||
74 | return pte_none(pte); | ||
75 | } | ||
76 | |||
77 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
78 | { | ||
79 | return pte_wrprotect(pte); | ||
80 | } | ||
81 | |||
82 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
83 | unsigned long addr, pte_t *ptep) | ||
84 | { | ||
85 | ptep_set_wrprotect(mm, addr, ptep); | ||
86 | } | ||
87 | |||
88 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
89 | unsigned long addr, pte_t *ptep, | ||
90 | pte_t pte, int dirty) | ||
91 | { | ||
92 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
93 | } | ||
94 | |||
95 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
96 | { | ||
97 | return *ptep; | ||
98 | } | ||
99 | |||
100 | static inline int arch_prepare_hugepage(struct page *page) | ||
101 | { | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static inline void arch_release_hugepage(struct page *page) | ||
106 | { | ||
107 | } | ||
108 | |||
109 | #endif /* _ASM_TILE_HUGETLB_H */ | ||
diff --git a/arch/tile/include/asm/hv_driver.h b/arch/tile/include/asm/hv_driver.h new file mode 100644 index 00000000000..ad614de899b --- /dev/null +++ b/arch/tile/include/asm/hv_driver.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This header defines a wrapper interface for managing hypervisor | ||
15 | * device calls that will result in an interrupt at some later time. | ||
16 | * In particular, this provides wrappers for hv_preada() and | ||
17 | * hv_pwritea(). | ||
18 | */ | ||
19 | |||
20 | #ifndef _ASM_TILE_HV_DRIVER_H | ||
21 | #define _ASM_TILE_HV_DRIVER_H | ||
22 | |||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | struct hv_driver_cb; | ||
26 | |||
27 | /* A callback to be invoked when an operation completes. */ | ||
28 | typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result); | ||
29 | |||
30 | /* | ||
31 | * A structure to hold information about an outstanding call. | ||
32 | * The driver must allocate a separate structure for each call. | ||
33 | */ | ||
34 | struct hv_driver_cb { | ||
35 | hv_driver_callback_t *callback; /* Function to call on interrupt. */ | ||
36 | void *dev; /* Driver-specific state variable. */ | ||
37 | }; | ||
38 | |||
39 | /* Wrapper for invoking hv_dev_preada(). */ | ||
40 | static inline int | ||
41 | tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
42 | HV_SGL sgl[/* sgl_len */], __hv64 offset, | ||
43 | struct hv_driver_cb *callback) | ||
44 | { | ||
45 | return hv_dev_preada(devhdl, flags, sgl_len, sgl, | ||
46 | offset, (HV_IntArg)callback); | ||
47 | } | ||
48 | |||
49 | /* Wrapper for invoking hv_dev_pwritea(). */ | ||
50 | static inline int | ||
51 | tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
52 | HV_SGL sgl[/* sgl_len */], __hv64 offset, | ||
53 | struct hv_driver_cb *callback) | ||
54 | { | ||
55 | return hv_dev_pwritea(devhdl, flags, sgl_len, sgl, | ||
56 | offset, (HV_IntArg)callback); | ||
57 | } | ||
58 | |||
59 | |||
60 | #endif /* _ASM_TILE_HV_DRIVER_H */ | ||
diff --git a/arch/tile/include/asm/hw_irq.h b/arch/tile/include/asm/hw_irq.h new file mode 100644 index 00000000000..4fac5fbf333 --- /dev/null +++ b/arch/tile/include/asm/hw_irq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HW_IRQ_H | ||
16 | #define _ASM_TILE_HW_IRQ_H | ||
17 | |||
18 | #endif /* _ASM_TILE_HW_IRQ_H */ | ||
diff --git a/arch/tile/include/asm/ide.h b/arch/tile/include/asm/ide.h new file mode 100644 index 00000000000..3c6f2ed894c --- /dev/null +++ b/arch/tile/include/asm/ide.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IDE_H | ||
16 | #define _ASM_TILE_IDE_H | ||
17 | |||
18 | /* For IDE on PCI */ | ||
19 | #define MAX_HWIFS 10 | ||
20 | |||
21 | #define ide_default_io_ctl(base) (0) | ||
22 | |||
23 | #include <asm-generic/ide_iops.h> | ||
24 | |||
25 | #endif /* _ASM_TILE_IDE_H */ | ||
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h new file mode 100644 index 00000000000..8c95bef3fa4 --- /dev/null +++ b/arch/tile/include/asm/io.h | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IO_H | ||
16 | #define _ASM_TILE_IO_H | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/bug.h> | ||
20 | #include <asm/page.h> | ||
21 | |||
22 | #define IO_SPACE_LIMIT 0xfffffffful | ||
23 | |||
24 | /* | ||
25 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
26 | * access. | ||
27 | */ | ||
28 | #define xlate_dev_mem_ptr(p) __va(p) | ||
29 | |||
30 | /* | ||
31 | * Convert a virtual cached pointer to an uncached pointer. | ||
32 | */ | ||
33 | #define xlate_dev_kmem_ptr(p) p | ||
34 | |||
35 | /* | ||
36 | * Change "struct page" to physical address. | ||
37 | */ | ||
38 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | ||
39 | |||
40 | /* | ||
41 | * Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to | ||
42 | * long before casting it to a pointer to avoid compiler warnings. | ||
43 | */ | ||
44 | #if CHIP_HAS_MMIO() | ||
45 | extern void __iomem *ioremap(resource_size_t offset, unsigned long size); | ||
46 | extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, | ||
47 | pgprot_t pgprot); | ||
48 | extern void iounmap(volatile void __iomem *addr); | ||
49 | #else | ||
50 | #define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr)) | ||
51 | #define iounmap(addr) ((void)0) | ||
52 | #endif | ||
53 | |||
54 | #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) | ||
55 | #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) | ||
56 | #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) | ||
57 | |||
58 | void __iomem *ioport_map(unsigned long port, unsigned int len); | ||
59 | extern inline void ioport_unmap(void __iomem *addr) {} | ||
60 | |||
61 | #define mmiowb() | ||
62 | |||
63 | /* Conversion between virtual and physical mappings. */ | ||
64 | #define mm_ptov(addr) ((void *)phys_to_virt(addr)) | ||
65 | #define mm_vtop(addr) ((unsigned long)virt_to_phys(addr)) | ||
66 | |||
67 | #ifdef CONFIG_PCI | ||
68 | |||
69 | extern u8 _tile_readb(unsigned long addr); | ||
70 | extern u16 _tile_readw(unsigned long addr); | ||
71 | extern u32 _tile_readl(unsigned long addr); | ||
72 | extern u64 _tile_readq(unsigned long addr); | ||
73 | extern void _tile_writeb(u8 val, unsigned long addr); | ||
74 | extern void _tile_writew(u16 val, unsigned long addr); | ||
75 | extern void _tile_writel(u32 val, unsigned long addr); | ||
76 | extern void _tile_writeq(u64 val, unsigned long addr); | ||
77 | |||
78 | #else | ||
79 | |||
80 | /* | ||
81 | * The Tile architecture does not support IOMEM unless PCI is enabled. | ||
82 | * Unfortunately we can't yet simply not declare these methods, | ||
83 | * since some generic code that compiles into the kernel, but | ||
84 | * we never run, uses them unconditionally. | ||
85 | */ | ||
86 | |||
87 | static inline int iomem_panic(void) | ||
88 | { | ||
89 | panic("readb/writeb and friends do not exist on tile without PCI"); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline u8 _tile_readb(unsigned long addr) | ||
94 | { | ||
95 | return iomem_panic(); | ||
96 | } | ||
97 | |||
98 | static inline u16 _tile_readw(unsigned long addr) | ||
99 | { | ||
100 | return iomem_panic(); | ||
101 | } | ||
102 | |||
103 | static inline u32 _tile_readl(unsigned long addr) | ||
104 | { | ||
105 | return iomem_panic(); | ||
106 | } | ||
107 | |||
108 | static inline u64 _tile_readq(unsigned long addr) | ||
109 | { | ||
110 | return iomem_panic(); | ||
111 | } | ||
112 | |||
113 | static inline void _tile_writeb(u8 val, unsigned long addr) | ||
114 | { | ||
115 | iomem_panic(); | ||
116 | } | ||
117 | |||
118 | static inline void _tile_writew(u16 val, unsigned long addr) | ||
119 | { | ||
120 | iomem_panic(); | ||
121 | } | ||
122 | |||
123 | static inline void _tile_writel(u32 val, unsigned long addr) | ||
124 | { | ||
125 | iomem_panic(); | ||
126 | } | ||
127 | |||
128 | static inline void _tile_writeq(u64 val, unsigned long addr) | ||
129 | { | ||
130 | iomem_panic(); | ||
131 | } | ||
132 | |||
133 | #endif | ||
134 | |||
135 | #define readb(addr) _tile_readb((unsigned long)addr) | ||
136 | #define readw(addr) _tile_readw((unsigned long)addr) | ||
137 | #define readl(addr) _tile_readl((unsigned long)addr) | ||
138 | #define readq(addr) _tile_readq((unsigned long)addr) | ||
139 | #define writeb(val, addr) _tile_writeb(val, (unsigned long)addr) | ||
140 | #define writew(val, addr) _tile_writew(val, (unsigned long)addr) | ||
141 | #define writel(val, addr) _tile_writel(val, (unsigned long)addr) | ||
142 | #define writeq(val, addr) _tile_writeq(val, (unsigned long)addr) | ||
143 | |||
144 | #define __raw_readb readb | ||
145 | #define __raw_readw readw | ||
146 | #define __raw_readl readl | ||
147 | #define __raw_readq readq | ||
148 | #define __raw_writeb writeb | ||
149 | #define __raw_writew writew | ||
150 | #define __raw_writel writel | ||
151 | #define __raw_writeq writeq | ||
152 | |||
153 | #define readb_relaxed readb | ||
154 | #define readw_relaxed readw | ||
155 | #define readl_relaxed readl | ||
156 | #define readq_relaxed readq | ||
157 | |||
158 | #define ioread8 readb | ||
159 | #define ioread16 readw | ||
160 | #define ioread32 readl | ||
161 | #define ioread64 readq | ||
162 | #define iowrite8 writeb | ||
163 | #define iowrite16 writew | ||
164 | #define iowrite32 writel | ||
165 | #define iowrite64 writeq | ||
166 | |||
167 | static inline void *memcpy_fromio(void *dst, void *src, int len) | ||
168 | { | ||
169 | int x; | ||
170 | BUG_ON((unsigned long)src & 0x3); | ||
171 | for (x = 0; x < len; x += 4) | ||
172 | *(u32 *)(dst + x) = readl(src + x); | ||
173 | return dst; | ||
174 | } | ||
175 | |||
176 | static inline void *memcpy_toio(void *dst, void *src, int len) | ||
177 | { | ||
178 | int x; | ||
179 | BUG_ON((unsigned long)dst & 0x3); | ||
180 | for (x = 0; x < len; x += 4) | ||
181 | writel(*(u32 *)(src + x), dst + x); | ||
182 | return dst; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * The Tile architecture does not support IOPORT, even with PCI. | ||
187 | * Unfortunately we can't yet simply not declare these methods, | ||
188 | * since some generic code that compiles into the kernel, but | ||
189 | * we never run, uses them unconditionally. | ||
190 | */ | ||
191 | |||
192 | static inline int ioport_panic(void) | ||
193 | { | ||
194 | panic("inb/outb and friends do not exist on tile"); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static inline u8 inb(unsigned long addr) | ||
199 | { | ||
200 | return ioport_panic(); | ||
201 | } | ||
202 | |||
203 | static inline u16 inw(unsigned long addr) | ||
204 | { | ||
205 | return ioport_panic(); | ||
206 | } | ||
207 | |||
208 | static inline u32 inl(unsigned long addr) | ||
209 | { | ||
210 | return ioport_panic(); | ||
211 | } | ||
212 | |||
213 | static inline void outb(u8 b, unsigned long addr) | ||
214 | { | ||
215 | ioport_panic(); | ||
216 | } | ||
217 | |||
218 | static inline void outw(u16 b, unsigned long addr) | ||
219 | { | ||
220 | ioport_panic(); | ||
221 | } | ||
222 | |||
223 | static inline void outl(u32 b, unsigned long addr) | ||
224 | { | ||
225 | ioport_panic(); | ||
226 | } | ||
227 | |||
228 | #define inb_p(addr) inb(addr) | ||
229 | #define inw_p(addr) inw(addr) | ||
230 | #define inl_p(addr) inl(addr) | ||
231 | #define outb_p(x, addr) outb((x), (addr)) | ||
232 | #define outw_p(x, addr) outw((x), (addr)) | ||
233 | #define outl_p(x, addr) outl((x), (addr)) | ||
234 | |||
235 | static inline void insb(unsigned long addr, void *buffer, int count) | ||
236 | { | ||
237 | ioport_panic(); | ||
238 | } | ||
239 | |||
240 | static inline void insw(unsigned long addr, void *buffer, int count) | ||
241 | { | ||
242 | ioport_panic(); | ||
243 | } | ||
244 | |||
245 | static inline void insl(unsigned long addr, void *buffer, int count) | ||
246 | { | ||
247 | ioport_panic(); | ||
248 | } | ||
249 | |||
250 | static inline void outsb(unsigned long addr, const void *buffer, int count) | ||
251 | { | ||
252 | ioport_panic(); | ||
253 | } | ||
254 | |||
255 | static inline void outsw(unsigned long addr, const void *buffer, int count) | ||
256 | { | ||
257 | ioport_panic(); | ||
258 | } | ||
259 | |||
260 | static inline void outsl(unsigned long addr, const void *buffer, int count) | ||
261 | { | ||
262 | ioport_panic(); | ||
263 | } | ||
264 | |||
265 | #define ioread8_rep(p, dst, count) \ | ||
266 | insb((unsigned long) (p), (dst), (count)) | ||
267 | #define ioread16_rep(p, dst, count) \ | ||
268 | insw((unsigned long) (p), (dst), (count)) | ||
269 | #define ioread32_rep(p, dst, count) \ | ||
270 | insl((unsigned long) (p), (dst), (count)) | ||
271 | |||
272 | #define iowrite8_rep(p, src, count) \ | ||
273 | outsb((unsigned long) (p), (src), (count)) | ||
274 | #define iowrite16_rep(p, src, count) \ | ||
275 | outsw((unsigned long) (p), (src), (count)) | ||
276 | #define iowrite32_rep(p, src, count) \ | ||
277 | outsl((unsigned long) (p), (src), (count)) | ||
278 | |||
279 | #endif /* _ASM_TILE_IO_H */ | ||
diff --git a/arch/tile/include/asm/ioctl.h b/arch/tile/include/asm/ioctl.h new file mode 100644 index 00000000000..b279fe06dfe --- /dev/null +++ b/arch/tile/include/asm/ioctl.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ioctl.h> | |||
diff --git a/arch/tile/include/asm/ioctls.h b/arch/tile/include/asm/ioctls.h new file mode 100644 index 00000000000..ec34c760665 --- /dev/null +++ b/arch/tile/include/asm/ioctls.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ioctls.h> | |||
diff --git a/arch/tile/include/asm/ipc.h b/arch/tile/include/asm/ipc.h new file mode 100644 index 00000000000..a46e3d9c2a3 --- /dev/null +++ b/arch/tile/include/asm/ipc.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipc.h> | |||
diff --git a/arch/tile/include/asm/ipcbuf.h b/arch/tile/include/asm/ipcbuf.h new file mode 100644 index 00000000000..84c7e51cb6d --- /dev/null +++ b/arch/tile/include/asm/ipcbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipcbuf.h> | |||
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h new file mode 100644 index 00000000000..572fd3ef1d7 --- /dev/null +++ b/arch/tile/include/asm/irq.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IRQ_H | ||
16 | #define _ASM_TILE_IRQ_H | ||
17 | |||
18 | #include <linux/hardirq.h> | ||
19 | |||
20 | /* The hypervisor interface provides 32 IRQs. */ | ||
21 | #define NR_IRQS 32 | ||
22 | |||
23 | /* IRQ numbers used for linux IPIs. */ | ||
24 | #define IRQ_RESCHEDULE 1 | ||
25 | |||
26 | void ack_bad_irq(unsigned int irq); | ||
27 | |||
28 | /* | ||
29 | * Different ways of handling interrupts. Tile interrupts are always | ||
30 | * per-cpu; there is no global interrupt controller to implement | ||
31 | * enable/disable. Most onboard devices can send their interrupts to | ||
32 | * many tiles at the same time, and Tile-specific drivers know how to | ||
33 | * deal with this. | ||
34 | * | ||
35 | * However, generic devices (usually PCIE based, sometimes GPIO) | ||
36 | * expect that interrupts will fire on a single core at a time and | ||
37 | * that the irq can be enabled or disabled from any core at any time. | ||
38 | * We implement this by directing such interrupts to a single core. | ||
39 | * | ||
40 | * One added wrinkle is that PCI interrupts can be either | ||
41 | * hardware-cleared (legacy interrupts) or software cleared (MSI). | ||
42 | * Other generic device systems (GPIO) are always software-cleared. | ||
43 | * | ||
44 | * The enums below are used by drivers for onboard devices, including | ||
45 | * the internals of PCI root complex and GPIO. They allow the driver | ||
46 | * to tell the generic irq code what kind of interrupt is mapped to a | ||
47 | * particular IRQ number. | ||
48 | */ | ||
49 | enum { | ||
50 | /* per-cpu interrupt; use enable/disable_percpu_irq() to mask */ | ||
51 | TILE_IRQ_PERCPU, | ||
52 | /* global interrupt, hardware responsible for clearing. */ | ||
53 | TILE_IRQ_HW_CLEAR, | ||
54 | /* global interrupt, software responsible for clearing. */ | ||
55 | TILE_IRQ_SW_CLEAR, | ||
56 | }; | ||
57 | |||
58 | |||
59 | /* | ||
60 | * Paravirtualized drivers should call this when they dynamically | ||
61 | * allocate a new IRQ or discover an IRQ that was pre-allocated by the | ||
62 | * hypervisor for use with their particular device. This gives the | ||
63 | * IRQ subsystem an opportunity to do interrupt-type-specific | ||
64 | * initialization. | ||
65 | * | ||
66 | * ISSUE: We should modify this API so that registering anything | ||
67 | * except percpu interrupts also requires providing callback methods | ||
68 | * for enabling and disabling the interrupt. This would allow the | ||
69 | * generic IRQ code to proxy enable/disable_irq() calls back into the | ||
70 | * PCI subsystem, which in turn could enable or disable the interrupt | ||
71 | * at the PCI shim. | ||
72 | */ | ||
73 | void tile_irq_activate(unsigned int irq, int tile_irq_type); | ||
74 | |||
75 | /* | ||
76 | * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know | ||
77 | * how to use enable/disable_percpu_irq() to manage interrupts on each | ||
78 | * core. We can't use the generic enable/disable_irq() because they | ||
79 | * use a single reference count per irq, rather than per cpu per irq. | ||
80 | */ | ||
81 | void enable_percpu_irq(unsigned int irq); | ||
82 | void disable_percpu_irq(unsigned int irq); | ||
83 | |||
84 | |||
85 | void setup_irq_regs(void); | ||
86 | |||
87 | #endif /* _ASM_TILE_IRQ_H */ | ||
diff --git a/arch/tile/include/asm/irq_regs.h b/arch/tile/include/asm/irq_regs.h new file mode 100644 index 00000000000..3dd9c0b7027 --- /dev/null +++ b/arch/tile/include/asm/irq_regs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/irq_regs.h> | |||
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h new file mode 100644 index 00000000000..45cf67c2f28 --- /dev/null +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -0,0 +1,266 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IRQFLAGS_H | ||
16 | #define _ASM_TILE_IRQFLAGS_H | ||
17 | |||
18 | #include <arch/interrupts.h> | ||
19 | #include <arch/chip.h> | ||
20 | |||
21 | /* | ||
22 | * The set of interrupts we want to allow when interrupts are nominally | ||
23 | * disabled. The remainder are effectively "NMI" interrupts from | ||
24 | * the point of view of the generic Linux code. Note that synchronous | ||
25 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | ||
26 | */ | ||
27 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
28 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
29 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | ||
30 | #else | ||
31 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
32 | (~(INT_MASK(INT_PERF_COUNT))) | ||
33 | #endif | ||
34 | |||
35 | #ifndef __ASSEMBLY__ | ||
36 | |||
37 | /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ | ||
38 | #include <asm/percpu.h> | ||
39 | #include <arch/spr_def.h> | ||
40 | |||
41 | /* Set and clear kernel interrupt masks. */ | ||
42 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
43 | #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 | ||
44 | # error Fix assumptions about which word various interrupts are in | ||
45 | #endif | ||
46 | #define interrupt_mask_set(n) do { \ | ||
47 | int __n = (n); \ | ||
48 | int __mask = 1 << (__n & 0x1f); \ | ||
49 | if (__n < 32) \ | ||
50 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ | ||
51 | else \ | ||
52 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ | ||
53 | } while (0) | ||
54 | #define interrupt_mask_reset(n) do { \ | ||
55 | int __n = (n); \ | ||
56 | int __mask = 1 << (__n & 0x1f); \ | ||
57 | if (__n < 32) \ | ||
58 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ | ||
59 | else \ | ||
60 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ | ||
61 | } while (0) | ||
62 | #define interrupt_mask_check(n) ({ \ | ||
63 | int __n = (n); \ | ||
64 | (((__n < 32) ? \ | ||
65 | __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ | ||
66 | __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ | ||
67 | >> (__n & 0x1f)) & 1; \ | ||
68 | }) | ||
69 | #define interrupt_mask_set_mask(mask) do { \ | ||
70 | unsigned long long __m = (mask); \ | ||
71 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ | ||
72 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ | ||
73 | } while (0) | ||
74 | #define interrupt_mask_reset_mask(mask) do { \ | ||
75 | unsigned long long __m = (mask); \ | ||
76 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ | ||
77 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ | ||
78 | } while (0) | ||
79 | #else | ||
80 | #define interrupt_mask_set(n) \ | ||
81 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) | ||
82 | #define interrupt_mask_reset(n) \ | ||
83 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) | ||
84 | #define interrupt_mask_check(n) \ | ||
85 | ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) | ||
86 | #define interrupt_mask_set_mask(mask) \ | ||
87 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) | ||
88 | #define interrupt_mask_reset_mask(mask) \ | ||
89 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) | ||
90 | #endif | ||
91 | |||
92 | /* | ||
93 | * The set of interrupts we want active if irqs are enabled. | ||
94 | * Note that in particular, the tile timer interrupt comes and goes | ||
95 | * from this set, since we have no other way to turn off the timer. | ||
96 | * Likewise, INTCTRL_1 is removed and re-added during device | ||
97 | * interrupts, as is the the hardwall UDN_FIREWALL interrupt. | ||
98 | * We use a low bit (MEM_ERROR) as our sentinel value and make sure it | ||
99 | * is always claimed as an "active interrupt" so we can query that bit | ||
100 | * to know our current state. | ||
101 | */ | ||
102 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | ||
103 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | ||
104 | |||
105 | /* Disable interrupts. */ | ||
106 | #define raw_local_irq_disable() \ | ||
107 | interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) | ||
108 | |||
109 | /* Disable all interrupts, including NMIs. */ | ||
110 | #define raw_local_irq_disable_all() \ | ||
111 | interrupt_mask_set_mask(-1UL) | ||
112 | |||
113 | /* Re-enable all maskable interrupts. */ | ||
114 | #define raw_local_irq_enable() \ | ||
115 | interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) | ||
116 | |||
117 | /* Disable or enable interrupts based on flag argument. */ | ||
118 | #define raw_local_irq_restore(disabled) do { \ | ||
119 | if (disabled) \ | ||
120 | raw_local_irq_disable(); \ | ||
121 | else \ | ||
122 | raw_local_irq_enable(); \ | ||
123 | } while (0) | ||
124 | |||
125 | /* Return true if "flags" argument means interrupts are disabled. */ | ||
126 | #define raw_irqs_disabled_flags(flags) ((flags) != 0) | ||
127 | |||
128 | /* Return true if interrupts are currently disabled. */ | ||
129 | #define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) | ||
130 | |||
131 | /* Save whether interrupts are currently disabled. */ | ||
132 | #define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) | ||
133 | |||
134 | /* Save whether interrupts are currently disabled, then disable them. */ | ||
135 | #define raw_local_irq_save(flags) \ | ||
136 | do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) | ||
137 | |||
138 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | ||
139 | #define raw_local_irq_mask(interrupt) \ | ||
140 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | ||
141 | |||
142 | /* Prevent the given interrupt from being enabled immediately. */ | ||
143 | #define raw_local_irq_mask_now(interrupt) do { \ | ||
144 | raw_local_irq_mask(interrupt); \ | ||
145 | interrupt_mask_set(interrupt); \ | ||
146 | } while (0) | ||
147 | |||
148 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | ||
149 | #define raw_local_irq_unmask(interrupt) \ | ||
150 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | ||
151 | |||
152 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | ||
153 | #define raw_local_irq_unmask_now(interrupt) do { \ | ||
154 | raw_local_irq_unmask(interrupt); \ | ||
155 | if (!irqs_disabled()) \ | ||
156 | interrupt_mask_reset(interrupt); \ | ||
157 | } while (0) | ||
158 | |||
159 | #else /* __ASSEMBLY__ */ | ||
160 | |||
161 | /* We provide a somewhat more restricted set for assembly. */ | ||
162 | |||
163 | #ifdef __tilegx__ | ||
164 | |||
165 | #if INT_MEM_ERROR != 0 | ||
166 | # error Fix IRQ_DISABLED() macro | ||
167 | #endif | ||
168 | |||
169 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ | ||
170 | #define IRQS_DISABLED(tmp) \ | ||
171 | mfspr tmp, INTERRUPT_MASK_1; \ | ||
172 | andi tmp, tmp, 1 | ||
173 | |||
174 | /* Load up a pointer to &interrupts_enabled_mask. */ | ||
175 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | ||
176 | moveli reg, hw2_last(interrupts_enabled_mask); \ | ||
177 | shl16insli reg, reg, hw1(interrupts_enabled_mask); \ | ||
178 | shl16insli reg, reg, hw0(interrupts_enabled_mask); \ | ||
179 | add reg, reg, tp | ||
180 | |||
181 | /* Disable interrupts. */ | ||
182 | #define IRQ_DISABLE(tmp0, tmp1) \ | ||
183 | moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ | ||
184 | shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ | ||
185 | shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ | ||
186 | mtspr INTERRUPT_MASK_SET_1, tmp0 | ||
187 | |||
188 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | ||
189 | #define IRQ_DISABLE_ALL(tmp) \ | ||
190 | movei tmp, -1; \ | ||
191 | mtspr INTERRUPT_MASK_SET_1, tmp | ||
192 | |||
193 | /* Enable interrupts. */ | ||
194 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
195 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | ||
196 | ld tmp0, tmp0; \ | ||
197 | mtspr INTERRUPT_MASK_RESET_1, tmp0 | ||
198 | |||
199 | #else /* !__tilegx__ */ | ||
200 | |||
201 | /* | ||
202 | * Return 0 or 1 to indicate whether interrupts are currently disabled. | ||
203 | * Note that it's important that we use a bit from the "low" mask word, | ||
204 | * since when we are enabling, that is the word we write first, so if we | ||
205 | * are interrupted after only writing half of the mask, the interrupt | ||
206 | * handler will correctly observe that we have interrupts enabled, and | ||
207 | * will enable interrupts itself on return from the interrupt handler | ||
208 | * (making the original code's write of the "high" mask word idempotent). | ||
209 | */ | ||
210 | #define IRQS_DISABLED(tmp) \ | ||
211 | mfspr tmp, INTERRUPT_MASK_1_0; \ | ||
212 | shri tmp, tmp, INT_MEM_ERROR; \ | ||
213 | andi tmp, tmp, 1 | ||
214 | |||
215 | /* Load up a pointer to &interrupts_enabled_mask. */ | ||
216 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | ||
217 | moveli reg, lo16(interrupts_enabled_mask); \ | ||
218 | auli reg, reg, ha16(interrupts_enabled_mask);\ | ||
219 | add reg, reg, tp | ||
220 | |||
221 | /* Disable interrupts. */ | ||
222 | #define IRQ_DISABLE(tmp0, tmp1) \ | ||
223 | { \ | ||
224 | movei tmp0, -1; \ | ||
225 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ | ||
226 | }; \ | ||
227 | { \ | ||
228 | mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ | ||
229 | auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ | ||
230 | }; \ | ||
231 | mtspr INTERRUPT_MASK_SET_1_1, tmp1 | ||
232 | |||
233 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | ||
234 | #define IRQ_DISABLE_ALL(tmp) \ | ||
235 | movei tmp, -1; \ | ||
236 | mtspr INTERRUPT_MASK_SET_1_0, tmp; \ | ||
237 | mtspr INTERRUPT_MASK_SET_1_1, tmp | ||
238 | |||
239 | /* Enable interrupts. */ | ||
240 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
241 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | ||
242 | { \ | ||
243 | lw tmp0, tmp0; \ | ||
244 | addi tmp1, tmp0, 4 \ | ||
245 | }; \ | ||
246 | lw tmp1, tmp1; \ | ||
247 | mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ | ||
248 | mtspr INTERRUPT_MASK_RESET_1_1, tmp1 | ||
249 | #endif | ||
250 | |||
251 | /* | ||
252 | * Do the CPU's IRQ-state tracing from assembly code. We call a | ||
253 | * C function, but almost everywhere we do, we don't mind clobbering | ||
254 | * all the caller-saved registers. | ||
255 | */ | ||
256 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
257 | # define TRACE_IRQS_ON jal trace_hardirqs_on | ||
258 | # define TRACE_IRQS_OFF jal trace_hardirqs_off | ||
259 | #else | ||
260 | # define TRACE_IRQS_ON | ||
261 | # define TRACE_IRQS_OFF | ||
262 | #endif | ||
263 | |||
264 | #endif /* __ASSEMBLY__ */ | ||
265 | |||
266 | #endif /* _ASM_TILE_IRQFLAGS_H */ | ||
diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h new file mode 100644 index 00000000000..6ece1b03766 --- /dev/null +++ b/arch/tile/include/asm/kdebug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/kdebug.h> | |||
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h new file mode 100644 index 00000000000..c11a6cc73bb --- /dev/null +++ b/arch/tile/include/asm/kexec.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * based on kexec.h from other architectures in linux-2.6.18 | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_KEXEC_H | ||
18 | #define _ASM_TILE_KEXEC_H | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | |||
22 | /* Maximum physical address we can use pages from. */ | ||
23 | #define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE | ||
24 | /* Maximum address we can reach in physical address mode. */ | ||
25 | #define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE | ||
26 | /* Maximum address we can use for the control code buffer. */ | ||
27 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE | ||
28 | |||
29 | #define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE | ||
30 | |||
31 | /* | ||
32 | * We don't bother to provide a unique identifier, since we can only | ||
33 | * reboot with a single type of kernel image anyway. | ||
34 | */ | ||
35 | #define KEXEC_ARCH KEXEC_ARCH_DEFAULT | ||
36 | |||
37 | /* Use the tile override for the page allocator. */ | ||
38 | struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order); | ||
39 | #define kimage_alloc_pages_arch kimage_alloc_pages_arch | ||
40 | |||
41 | #define MAX_NOTE_BYTES 1024 | ||
42 | |||
43 | /* Defined in arch/tile/kernel/relocate_kernel.S */ | ||
44 | extern const unsigned char relocate_new_kernel[]; | ||
45 | extern const unsigned long relocate_new_kernel_size; | ||
46 | extern void relocate_new_kernel_end(void); | ||
47 | |||
48 | /* Provide a dummy definition to avoid build failures. */ | ||
49 | static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | #endif /* _ASM_TILE_KEXEC_H */ | ||
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h new file mode 100644 index 00000000000..1480106d1c0 --- /dev/null +++ b/arch/tile/include/asm/kmap_types.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_KMAP_TYPES_H | ||
16 | #define _ASM_TILE_KMAP_TYPES_H | ||
17 | |||
18 | /* | ||
19 | * In TILE Linux each set of four of these uses another 16MB chunk of | ||
20 | * address space, given 64 tiles and 64KB pages, so we only enable | ||
21 | * ones that are required by the kernel configuration. | ||
22 | */ | ||
23 | enum km_type { | ||
24 | KM_BOUNCE_READ, | ||
25 | KM_SKB_SUNRPC_DATA, | ||
26 | KM_SKB_DATA_SOFTIRQ, | ||
27 | KM_USER0, | ||
28 | KM_USER1, | ||
29 | KM_BIO_SRC_IRQ, | ||
30 | KM_IRQ0, | ||
31 | KM_IRQ1, | ||
32 | KM_SOFTIRQ0, | ||
33 | KM_SOFTIRQ1, | ||
34 | KM_MEMCPY0, | ||
35 | KM_MEMCPY1, | ||
36 | #if defined(CONFIG_HIGHPTE) | ||
37 | KM_PTE0, | ||
38 | KM_PTE1, | ||
39 | #endif | ||
40 | KM_TYPE_NR | ||
41 | }; | ||
42 | |||
43 | #endif /* _ASM_TILE_KMAP_TYPES_H */ | ||
diff --git a/arch/tile/include/asm/linkage.h b/arch/tile/include/asm/linkage.h new file mode 100644 index 00000000000..e121c39751a --- /dev/null +++ b/arch/tile/include/asm/linkage.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_LINKAGE_H | ||
16 | #define _ASM_TILE_LINKAGE_H | ||
17 | |||
18 | #include <feedback.h> | ||
19 | |||
20 | #define __ALIGN .align 8 | ||
21 | |||
22 | /* | ||
23 | * The STD_ENTRY and STD_ENDPROC macros put the function in a | ||
24 | * self-named .text.foo section, and if linker feedback collection | ||
25 | * is enabled, add a suitable call to the feedback collection code. | ||
26 | * STD_ENTRY_SECTION lets you specify a non-standard section name. | ||
27 | */ | ||
28 | |||
29 | #define STD_ENTRY(name) \ | ||
30 | .pushsection .text.##name, "ax"; \ | ||
31 | ENTRY(name); \ | ||
32 | FEEDBACK_ENTER(name) | ||
33 | |||
34 | #define STD_ENTRY_SECTION(name, section) \ | ||
35 | .pushsection section, "ax"; \ | ||
36 | ENTRY(name); \ | ||
37 | FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name) | ||
38 | |||
39 | #define STD_ENDPROC(name) \ | ||
40 | ENDPROC(name); \ | ||
41 | .Lend_##name:; \ | ||
42 | .popsection | ||
43 | |||
44 | /* Create a file-static function entry set up for feedback gathering. */ | ||
45 | #define STD_ENTRY_LOCAL(name) \ | ||
46 | .pushsection .text.##name, "ax"; \ | ||
47 | ALIGN; \ | ||
48 | name:; \ | ||
49 | FEEDBACK_ENTER(name) | ||
50 | |||
51 | #endif /* _ASM_TILE_LINKAGE_H */ | ||
diff --git a/arch/tile/include/asm/local.h b/arch/tile/include/asm/local.h new file mode 100644 index 00000000000..c11c530f74d --- /dev/null +++ b/arch/tile/include/asm/local.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/local.h> | |||
diff --git a/arch/tile/include/asm/memprof.h b/arch/tile/include/asm/memprof.h new file mode 100644 index 00000000000..359949be28c --- /dev/null +++ b/arch/tile/include/asm/memprof.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * The hypervisor's memory controller profiling infrastructure allows | ||
15 | * the programmer to find out what fraction of the available memory | ||
16 | * bandwidth is being consumed at each memory controller. The | ||
17 | * profiler provides start, stop, and clear operations to allows | ||
18 | * profiling over a specific time window, as well as an interface for | ||
19 | * reading the most recent profile values. | ||
20 | * | ||
21 | * This header declares IOCTL codes necessary to control memprof. | ||
22 | */ | ||
23 | #ifndef _ASM_TILE_MEMPROF_H | ||
24 | #define _ASM_TILE_MEMPROF_H | ||
25 | |||
26 | #include <linux/ioctl.h> | ||
27 | |||
28 | #define MEMPROF_IOCTL_TYPE 0xB4 | ||
29 | #define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0) | ||
30 | #define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1) | ||
31 | #define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2) | ||
32 | |||
33 | #endif /* _ASM_TILE_MEMPROF_H */ | ||
diff --git a/arch/tile/include/asm/mman.h b/arch/tile/include/asm/mman.h new file mode 100644 index 00000000000..4c6811e3e8d --- /dev/null +++ b/arch/tile/include/asm/mman.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMAN_H | ||
16 | #define _ASM_TILE_MMAN_H | ||
17 | |||
18 | #include <asm-generic/mman-common.h> | ||
19 | #include <arch/chip.h> | ||
20 | |||
21 | /* Standard Linux flags */ | ||
22 | |||
23 | #define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */ | ||
24 | #define MAP_NONBLOCK 0x0080 /* do not block on IO */ | ||
25 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | ||
26 | #define MAP_LOCKED 0x0200 /* pages are locked */ | ||
27 | #define MAP_NORESERVE 0x0400 /* don't check for reservations */ | ||
28 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
29 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
30 | #define MAP_HUGETLB 0x4000 /* create a huge page mapping */ | ||
31 | |||
32 | |||
33 | /* | ||
34 | * Flags for mlockall | ||
35 | */ | ||
36 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
37 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
38 | |||
39 | |||
40 | #endif /* _ASM_TILE_MMAN_H */ | ||
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h new file mode 100644 index 00000000000..92f94c77b6e --- /dev/null +++ b/arch/tile/include/asm/mmu.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMU_H | ||
16 | #define _ASM_TILE_MMU_H | ||
17 | |||
18 | /* Capture any arch- and mm-specific information. */ | ||
19 | struct mm_context { | ||
20 | /* | ||
21 | * Written under the mmap_sem semaphore; read without the | ||
22 | * semaphore but atomically, but it is conservatively set. | ||
23 | */ | ||
24 | unsigned int priority_cached; | ||
25 | }; | ||
26 | |||
27 | typedef struct mm_context mm_context_t; | ||
28 | |||
29 | void leave_mm(int cpu); | ||
30 | |||
31 | #endif /* _ASM_TILE_MMU_H */ | ||
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h new file mode 100644 index 00000000000..9bc0d0725c2 --- /dev/null +++ b/arch/tile/include/asm/mmu_context.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMU_CONTEXT_H | ||
16 | #define _ASM_TILE_MMU_CONTEXT_H | ||
17 | |||
18 | #include <linux/smp.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <asm/homecache.h> | ||
25 | #include <asm-generic/mm_hooks.h> | ||
26 | |||
27 | static inline int | ||
28 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | /* Note that arch/tile/kernel/head.S also calls hv_install_context() */ | ||
34 | static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) | ||
35 | { | ||
36 | /* FIXME: DIRECTIO should not always be set. FIXME. */ | ||
37 | int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); | ||
38 | if (rc < 0) | ||
39 | panic("hv_install_context failed: %d", rc); | ||
40 | } | ||
41 | |||
42 | static inline void install_page_table(pgd_t *pgdir, int asid) | ||
43 | { | ||
44 | pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); | ||
45 | __install_page_table(pgdir, asid, *ptep); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * "Lazy" TLB mode is entered when we are switching to a kernel task, | ||
50 | * which borrows the mm of the previous task. The goal of this | ||
51 | * optimization is to avoid having to install a new page table. On | ||
52 | * early x86 machines (where the concept originated) you couldn't do | ||
53 | * anything short of a full page table install for invalidation, so | ||
54 | * handling a remote TLB invalidate required doing a page table | ||
55 | * re-install. Someone clearly decided that it was silly to keep | ||
56 | * doing this while in "lazy" TLB mode, so the optimization involves | ||
57 | * installing the swapper page table instead the first time one | ||
58 | * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running | ||
59 | * the kernel task doesn't need to take any more interrupts. At that | ||
60 | * point it's then necessary to explicitly reinstall it when context | ||
61 | * switching back to the original mm. | ||
62 | * | ||
63 | * On Tile, we have to do a page-table install whenever DMA is enabled, | ||
64 | * so in that case lazy mode doesn't help anyway. And more generally, | ||
65 | * we have efficient per-page TLB shootdown, and don't expect to spend | ||
66 | * that much time in kernel tasks in general, so just leaving the | ||
67 | * kernel task borrowing the old page table, but handling TLB | ||
68 | * shootdowns, is a reasonable thing to do. And importantly, this | ||
69 | * lets us use the hypervisor's internal APIs for TLB shootdown, which | ||
70 | * means we don't have to worry about having TLB shootdowns blocked | ||
71 | * when Linux is disabling interrupts; see the page migration code for | ||
72 | * an example of where it's important for TLB shootdowns to complete | ||
73 | * even when interrupts are disabled at the Linux level. | ||
74 | */ | ||
75 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t) | ||
76 | { | ||
77 | #if CHIP_HAS_TILE_DMA() | ||
78 | /* | ||
79 | * We have to do an "identity" page table switch in order to | ||
80 | * clear any pending DMA interrupts. | ||
81 | */ | ||
82 | if (current->thread.tile_dma_state.enabled) | ||
83 | install_page_table(mm->pgd, __get_cpu_var(current_asid)); | ||
84 | #endif | ||
85 | } | ||
86 | |||
87 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
88 | struct task_struct *tsk) | ||
89 | { | ||
90 | if (likely(prev != next)) { | ||
91 | |||
92 | int cpu = smp_processor_id(); | ||
93 | |||
94 | /* Pick new ASID. */ | ||
95 | int asid = __get_cpu_var(current_asid) + 1; | ||
96 | if (asid > max_asid) { | ||
97 | asid = min_asid; | ||
98 | local_flush_tlb(); | ||
99 | } | ||
100 | __get_cpu_var(current_asid) = asid; | ||
101 | |||
102 | /* Clear cpu from the old mm, and set it in the new one. */ | ||
103 | cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); | ||
104 | cpumask_set_cpu(cpu, &next->cpu_vm_mask); | ||
105 | |||
106 | /* Re-load page tables */ | ||
107 | install_page_table(next->pgd, asid); | ||
108 | |||
109 | /* See how we should set the red/black cache info */ | ||
110 | check_mm_caching(prev, next); | ||
111 | |||
112 | /* | ||
113 | * Since we're changing to a new mm, we have to flush | ||
114 | * the icache in case some physical page now being mapped | ||
115 | * has subsequently been repurposed and has new code. | ||
116 | */ | ||
117 | __flush_icache(); | ||
118 | |||
119 | } | ||
120 | } | ||
121 | |||
122 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
123 | struct mm_struct *next_mm) | ||
124 | { | ||
125 | switch_mm(prev_mm, next_mm, NULL); | ||
126 | } | ||
127 | |||
128 | #define destroy_context(mm) do { } while (0) | ||
129 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
130 | |||
131 | #endif /* _ASM_TILE_MMU_CONTEXT_H */ | ||
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h new file mode 100644 index 00000000000..c6344c4f32a --- /dev/null +++ b/arch/tile/include/asm/mmzone.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMZONE_H | ||
16 | #define _ASM_TILE_MMZONE_H | ||
17 | |||
18 | extern struct pglist_data node_data[]; | ||
19 | #define NODE_DATA(nid) (&node_data[nid]) | ||
20 | |||
21 | extern void get_memcfg_numa(void); | ||
22 | |||
23 | #ifdef CONFIG_DISCONTIGMEM | ||
24 | |||
25 | #include <asm/page.h> | ||
26 | |||
27 | /* | ||
28 | * Generally, memory ranges are always doled out by the hypervisor in | ||
29 | * fixed-size, power-of-two increments. That would make computing the node | ||
30 | * very easy. We could just take a couple high bits of the PA, which | ||
31 | * denote the memory shim, and we'd be done. However, when we're doing | ||
32 | * memory striping, this may not be true; PAs with different high bit | ||
33 | * values might be in the same node. Thus, we keep a lookup table to | ||
34 | * translate the high bits of the PFN to the node number. | ||
35 | */ | ||
36 | extern int highbits_to_node[]; | ||
37 | |||
38 | static inline int pfn_to_nid(unsigned long pfn) | ||
39 | { | ||
40 | return highbits_to_node[__pfn_to_highbits(pfn)]; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Following are macros that each numa implmentation must define. | ||
45 | */ | ||
46 | |||
47 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
48 | #define node_end_pfn(nid) \ | ||
49 | ({ \ | ||
50 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
51 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
52 | }) | ||
53 | |||
54 | #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) | ||
55 | |||
56 | static inline int pfn_valid(int pfn) | ||
57 | { | ||
58 | int nid = pfn_to_nid(pfn); | ||
59 | |||
60 | if (nid >= 0) | ||
61 | return (pfn < node_end_pfn(nid)); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* Information on the NUMA nodes that we compute early */ | ||
66 | extern unsigned long node_start_pfn[]; | ||
67 | extern unsigned long node_end_pfn[]; | ||
68 | extern unsigned long node_memmap_pfn[]; | ||
69 | extern unsigned long node_percpu_pfn[]; | ||
70 | extern unsigned long node_free_pfn[]; | ||
71 | #ifdef CONFIG_HIGHMEM | ||
72 | extern unsigned long node_lowmem_end_pfn[]; | ||
73 | #endif | ||
74 | #ifdef CONFIG_PCI | ||
75 | extern unsigned long pci_reserve_start_pfn; | ||
76 | extern unsigned long pci_reserve_end_pfn; | ||
77 | #endif | ||
78 | |||
79 | #endif /* CONFIG_DISCONTIGMEM */ | ||
80 | |||
81 | #endif /* _ASM_TILE_MMZONE_H */ | ||
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h new file mode 100644 index 00000000000..1e4b79fe858 --- /dev/null +++ b/arch/tile/include/asm/module.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/module.h> | |||
diff --git a/arch/tile/include/asm/msgbuf.h b/arch/tile/include/asm/msgbuf.h new file mode 100644 index 00000000000..809134c644a --- /dev/null +++ b/arch/tile/include/asm/msgbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/msgbuf.h> | |||
diff --git a/arch/tile/include/asm/mutex.h b/arch/tile/include/asm/mutex.h new file mode 100644 index 00000000000..ff6101aa2c7 --- /dev/null +++ b/arch/tile/include/asm/mutex.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/mutex-dec.h> | |||
diff --git a/arch/tile/include/asm/opcode-tile.h b/arch/tile/include/asm/opcode-tile.h new file mode 100644 index 00000000000..ba38959137d --- /dev/null +++ b/arch/tile/include/asm/opcode-tile.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_OPCODE_TILE_H | ||
16 | #define _ASM_TILE_OPCODE_TILE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_WORD_SIZE() == 64 | ||
21 | #include <asm/opcode-tile_64.h> | ||
22 | #else | ||
23 | #include <asm/opcode-tile_32.h> | ||
24 | #endif | ||
25 | |||
26 | /* These definitions are not correct for TILE64, so just avoid them. */ | ||
27 | #undef TILE_ELF_MACHINE_CODE | ||
28 | #undef TILE_ELF_NAME | ||
29 | |||
30 | #endif /* _ASM_TILE_OPCODE_TILE_H */ | ||
diff --git a/arch/tile/include/asm/opcode-tile_32.h b/arch/tile/include/asm/opcode-tile_32.h new file mode 100644 index 00000000000..eda60ecbae3 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile_32.h | |||
@@ -0,0 +1,1506 @@ | |||
1 | /* tile.h -- Header file for TILE opcode table | ||
2 | Copyright (C) 2005 Free Software Foundation, Inc. | ||
3 | Contributed by Tilera Corp. */ | ||
4 | |||
5 | #ifndef opcode_tile_h | ||
6 | #define opcode_tile_h | ||
7 | |||
8 | typedef unsigned long long tile_bundle_bits; | ||
9 | |||
10 | |||
11 | enum | ||
12 | { | ||
13 | TILE_MAX_OPERANDS = 5 /* mm */ | ||
14 | }; | ||
15 | |||
16 | typedef enum | ||
17 | { | ||
18 | TILE_OPC_BPT, | ||
19 | TILE_OPC_INFO, | ||
20 | TILE_OPC_INFOL, | ||
21 | TILE_OPC_J, | ||
22 | TILE_OPC_JAL, | ||
23 | TILE_OPC_MOVE, | ||
24 | TILE_OPC_MOVE_SN, | ||
25 | TILE_OPC_MOVEI, | ||
26 | TILE_OPC_MOVEI_SN, | ||
27 | TILE_OPC_MOVELI, | ||
28 | TILE_OPC_MOVELI_SN, | ||
29 | TILE_OPC_MOVELIS, | ||
30 | TILE_OPC_PREFETCH, | ||
31 | TILE_OPC_RAISE, | ||
32 | TILE_OPC_ADD, | ||
33 | TILE_OPC_ADD_SN, | ||
34 | TILE_OPC_ADDB, | ||
35 | TILE_OPC_ADDB_SN, | ||
36 | TILE_OPC_ADDBS_U, | ||
37 | TILE_OPC_ADDBS_U_SN, | ||
38 | TILE_OPC_ADDH, | ||
39 | TILE_OPC_ADDH_SN, | ||
40 | TILE_OPC_ADDHS, | ||
41 | TILE_OPC_ADDHS_SN, | ||
42 | TILE_OPC_ADDI, | ||
43 | TILE_OPC_ADDI_SN, | ||
44 | TILE_OPC_ADDIB, | ||
45 | TILE_OPC_ADDIB_SN, | ||
46 | TILE_OPC_ADDIH, | ||
47 | TILE_OPC_ADDIH_SN, | ||
48 | TILE_OPC_ADDLI, | ||
49 | TILE_OPC_ADDLI_SN, | ||
50 | TILE_OPC_ADDLIS, | ||
51 | TILE_OPC_ADDS, | ||
52 | TILE_OPC_ADDS_SN, | ||
53 | TILE_OPC_ADIFFB_U, | ||
54 | TILE_OPC_ADIFFB_U_SN, | ||
55 | TILE_OPC_ADIFFH, | ||
56 | TILE_OPC_ADIFFH_SN, | ||
57 | TILE_OPC_AND, | ||
58 | TILE_OPC_AND_SN, | ||
59 | TILE_OPC_ANDI, | ||
60 | TILE_OPC_ANDI_SN, | ||
61 | TILE_OPC_AULI, | ||
62 | TILE_OPC_AVGB_U, | ||
63 | TILE_OPC_AVGB_U_SN, | ||
64 | TILE_OPC_AVGH, | ||
65 | TILE_OPC_AVGH_SN, | ||
66 | TILE_OPC_BBNS, | ||
67 | TILE_OPC_BBNS_SN, | ||
68 | TILE_OPC_BBNST, | ||
69 | TILE_OPC_BBNST_SN, | ||
70 | TILE_OPC_BBS, | ||
71 | TILE_OPC_BBS_SN, | ||
72 | TILE_OPC_BBST, | ||
73 | TILE_OPC_BBST_SN, | ||
74 | TILE_OPC_BGEZ, | ||
75 | TILE_OPC_BGEZ_SN, | ||
76 | TILE_OPC_BGEZT, | ||
77 | TILE_OPC_BGEZT_SN, | ||
78 | TILE_OPC_BGZ, | ||
79 | TILE_OPC_BGZ_SN, | ||
80 | TILE_OPC_BGZT, | ||
81 | TILE_OPC_BGZT_SN, | ||
82 | TILE_OPC_BITX, | ||
83 | TILE_OPC_BITX_SN, | ||
84 | TILE_OPC_BLEZ, | ||
85 | TILE_OPC_BLEZ_SN, | ||
86 | TILE_OPC_BLEZT, | ||
87 | TILE_OPC_BLEZT_SN, | ||
88 | TILE_OPC_BLZ, | ||
89 | TILE_OPC_BLZ_SN, | ||
90 | TILE_OPC_BLZT, | ||
91 | TILE_OPC_BLZT_SN, | ||
92 | TILE_OPC_BNZ, | ||
93 | TILE_OPC_BNZ_SN, | ||
94 | TILE_OPC_BNZT, | ||
95 | TILE_OPC_BNZT_SN, | ||
96 | TILE_OPC_BYTEX, | ||
97 | TILE_OPC_BYTEX_SN, | ||
98 | TILE_OPC_BZ, | ||
99 | TILE_OPC_BZ_SN, | ||
100 | TILE_OPC_BZT, | ||
101 | TILE_OPC_BZT_SN, | ||
102 | TILE_OPC_CLZ, | ||
103 | TILE_OPC_CLZ_SN, | ||
104 | TILE_OPC_CRC32_32, | ||
105 | TILE_OPC_CRC32_32_SN, | ||
106 | TILE_OPC_CRC32_8, | ||
107 | TILE_OPC_CRC32_8_SN, | ||
108 | TILE_OPC_CTZ, | ||
109 | TILE_OPC_CTZ_SN, | ||
110 | TILE_OPC_DRAIN, | ||
111 | TILE_OPC_DTLBPR, | ||
112 | TILE_OPC_DWORD_ALIGN, | ||
113 | TILE_OPC_DWORD_ALIGN_SN, | ||
114 | TILE_OPC_FINV, | ||
115 | TILE_OPC_FLUSH, | ||
116 | TILE_OPC_FNOP, | ||
117 | TILE_OPC_ICOH, | ||
118 | TILE_OPC_ILL, | ||
119 | TILE_OPC_INTHB, | ||
120 | TILE_OPC_INTHB_SN, | ||
121 | TILE_OPC_INTHH, | ||
122 | TILE_OPC_INTHH_SN, | ||
123 | TILE_OPC_INTLB, | ||
124 | TILE_OPC_INTLB_SN, | ||
125 | TILE_OPC_INTLH, | ||
126 | TILE_OPC_INTLH_SN, | ||
127 | TILE_OPC_INV, | ||
128 | TILE_OPC_IRET, | ||
129 | TILE_OPC_JALB, | ||
130 | TILE_OPC_JALF, | ||
131 | TILE_OPC_JALR, | ||
132 | TILE_OPC_JALRP, | ||
133 | TILE_OPC_JB, | ||
134 | TILE_OPC_JF, | ||
135 | TILE_OPC_JR, | ||
136 | TILE_OPC_JRP, | ||
137 | TILE_OPC_LB, | ||
138 | TILE_OPC_LB_SN, | ||
139 | TILE_OPC_LB_U, | ||
140 | TILE_OPC_LB_U_SN, | ||
141 | TILE_OPC_LBADD, | ||
142 | TILE_OPC_LBADD_SN, | ||
143 | TILE_OPC_LBADD_U, | ||
144 | TILE_OPC_LBADD_U_SN, | ||
145 | TILE_OPC_LH, | ||
146 | TILE_OPC_LH_SN, | ||
147 | TILE_OPC_LH_U, | ||
148 | TILE_OPC_LH_U_SN, | ||
149 | TILE_OPC_LHADD, | ||
150 | TILE_OPC_LHADD_SN, | ||
151 | TILE_OPC_LHADD_U, | ||
152 | TILE_OPC_LHADD_U_SN, | ||
153 | TILE_OPC_LNK, | ||
154 | TILE_OPC_LNK_SN, | ||
155 | TILE_OPC_LW, | ||
156 | TILE_OPC_LW_SN, | ||
157 | TILE_OPC_LW_NA, | ||
158 | TILE_OPC_LW_NA_SN, | ||
159 | TILE_OPC_LWADD, | ||
160 | TILE_OPC_LWADD_SN, | ||
161 | TILE_OPC_LWADD_NA, | ||
162 | TILE_OPC_LWADD_NA_SN, | ||
163 | TILE_OPC_MAXB_U, | ||
164 | TILE_OPC_MAXB_U_SN, | ||
165 | TILE_OPC_MAXH, | ||
166 | TILE_OPC_MAXH_SN, | ||
167 | TILE_OPC_MAXIB_U, | ||
168 | TILE_OPC_MAXIB_U_SN, | ||
169 | TILE_OPC_MAXIH, | ||
170 | TILE_OPC_MAXIH_SN, | ||
171 | TILE_OPC_MF, | ||
172 | TILE_OPC_MFSPR, | ||
173 | TILE_OPC_MINB_U, | ||
174 | TILE_OPC_MINB_U_SN, | ||
175 | TILE_OPC_MINH, | ||
176 | TILE_OPC_MINH_SN, | ||
177 | TILE_OPC_MINIB_U, | ||
178 | TILE_OPC_MINIB_U_SN, | ||
179 | TILE_OPC_MINIH, | ||
180 | TILE_OPC_MINIH_SN, | ||
181 | TILE_OPC_MM, | ||
182 | TILE_OPC_MNZ, | ||
183 | TILE_OPC_MNZ_SN, | ||
184 | TILE_OPC_MNZB, | ||
185 | TILE_OPC_MNZB_SN, | ||
186 | TILE_OPC_MNZH, | ||
187 | TILE_OPC_MNZH_SN, | ||
188 | TILE_OPC_MTSPR, | ||
189 | TILE_OPC_MULHH_SS, | ||
190 | TILE_OPC_MULHH_SS_SN, | ||
191 | TILE_OPC_MULHH_SU, | ||
192 | TILE_OPC_MULHH_SU_SN, | ||
193 | TILE_OPC_MULHH_UU, | ||
194 | TILE_OPC_MULHH_UU_SN, | ||
195 | TILE_OPC_MULHHA_SS, | ||
196 | TILE_OPC_MULHHA_SS_SN, | ||
197 | TILE_OPC_MULHHA_SU, | ||
198 | TILE_OPC_MULHHA_SU_SN, | ||
199 | TILE_OPC_MULHHA_UU, | ||
200 | TILE_OPC_MULHHA_UU_SN, | ||
201 | TILE_OPC_MULHHSA_UU, | ||
202 | TILE_OPC_MULHHSA_UU_SN, | ||
203 | TILE_OPC_MULHL_SS, | ||
204 | TILE_OPC_MULHL_SS_SN, | ||
205 | TILE_OPC_MULHL_SU, | ||
206 | TILE_OPC_MULHL_SU_SN, | ||
207 | TILE_OPC_MULHL_US, | ||
208 | TILE_OPC_MULHL_US_SN, | ||
209 | TILE_OPC_MULHL_UU, | ||
210 | TILE_OPC_MULHL_UU_SN, | ||
211 | TILE_OPC_MULHLA_SS, | ||
212 | TILE_OPC_MULHLA_SS_SN, | ||
213 | TILE_OPC_MULHLA_SU, | ||
214 | TILE_OPC_MULHLA_SU_SN, | ||
215 | TILE_OPC_MULHLA_US, | ||
216 | TILE_OPC_MULHLA_US_SN, | ||
217 | TILE_OPC_MULHLA_UU, | ||
218 | TILE_OPC_MULHLA_UU_SN, | ||
219 | TILE_OPC_MULHLSA_UU, | ||
220 | TILE_OPC_MULHLSA_UU_SN, | ||
221 | TILE_OPC_MULLL_SS, | ||
222 | TILE_OPC_MULLL_SS_SN, | ||
223 | TILE_OPC_MULLL_SU, | ||
224 | TILE_OPC_MULLL_SU_SN, | ||
225 | TILE_OPC_MULLL_UU, | ||
226 | TILE_OPC_MULLL_UU_SN, | ||
227 | TILE_OPC_MULLLA_SS, | ||
228 | TILE_OPC_MULLLA_SS_SN, | ||
229 | TILE_OPC_MULLLA_SU, | ||
230 | TILE_OPC_MULLLA_SU_SN, | ||
231 | TILE_OPC_MULLLA_UU, | ||
232 | TILE_OPC_MULLLA_UU_SN, | ||
233 | TILE_OPC_MULLLSA_UU, | ||
234 | TILE_OPC_MULLLSA_UU_SN, | ||
235 | TILE_OPC_MVNZ, | ||
236 | TILE_OPC_MVNZ_SN, | ||
237 | TILE_OPC_MVZ, | ||
238 | TILE_OPC_MVZ_SN, | ||
239 | TILE_OPC_MZ, | ||
240 | TILE_OPC_MZ_SN, | ||
241 | TILE_OPC_MZB, | ||
242 | TILE_OPC_MZB_SN, | ||
243 | TILE_OPC_MZH, | ||
244 | TILE_OPC_MZH_SN, | ||
245 | TILE_OPC_NAP, | ||
246 | TILE_OPC_NOP, | ||
247 | TILE_OPC_NOR, | ||
248 | TILE_OPC_NOR_SN, | ||
249 | TILE_OPC_OR, | ||
250 | TILE_OPC_OR_SN, | ||
251 | TILE_OPC_ORI, | ||
252 | TILE_OPC_ORI_SN, | ||
253 | TILE_OPC_PACKBS_U, | ||
254 | TILE_OPC_PACKBS_U_SN, | ||
255 | TILE_OPC_PACKHB, | ||
256 | TILE_OPC_PACKHB_SN, | ||
257 | TILE_OPC_PACKHS, | ||
258 | TILE_OPC_PACKHS_SN, | ||
259 | TILE_OPC_PACKLB, | ||
260 | TILE_OPC_PACKLB_SN, | ||
261 | TILE_OPC_PCNT, | ||
262 | TILE_OPC_PCNT_SN, | ||
263 | TILE_OPC_RL, | ||
264 | TILE_OPC_RL_SN, | ||
265 | TILE_OPC_RLI, | ||
266 | TILE_OPC_RLI_SN, | ||
267 | TILE_OPC_S1A, | ||
268 | TILE_OPC_S1A_SN, | ||
269 | TILE_OPC_S2A, | ||
270 | TILE_OPC_S2A_SN, | ||
271 | TILE_OPC_S3A, | ||
272 | TILE_OPC_S3A_SN, | ||
273 | TILE_OPC_SADAB_U, | ||
274 | TILE_OPC_SADAB_U_SN, | ||
275 | TILE_OPC_SADAH, | ||
276 | TILE_OPC_SADAH_SN, | ||
277 | TILE_OPC_SADAH_U, | ||
278 | TILE_OPC_SADAH_U_SN, | ||
279 | TILE_OPC_SADB_U, | ||
280 | TILE_OPC_SADB_U_SN, | ||
281 | TILE_OPC_SADH, | ||
282 | TILE_OPC_SADH_SN, | ||
283 | TILE_OPC_SADH_U, | ||
284 | TILE_OPC_SADH_U_SN, | ||
285 | TILE_OPC_SB, | ||
286 | TILE_OPC_SBADD, | ||
287 | TILE_OPC_SEQ, | ||
288 | TILE_OPC_SEQ_SN, | ||
289 | TILE_OPC_SEQB, | ||
290 | TILE_OPC_SEQB_SN, | ||
291 | TILE_OPC_SEQH, | ||
292 | TILE_OPC_SEQH_SN, | ||
293 | TILE_OPC_SEQI, | ||
294 | TILE_OPC_SEQI_SN, | ||
295 | TILE_OPC_SEQIB, | ||
296 | TILE_OPC_SEQIB_SN, | ||
297 | TILE_OPC_SEQIH, | ||
298 | TILE_OPC_SEQIH_SN, | ||
299 | TILE_OPC_SH, | ||
300 | TILE_OPC_SHADD, | ||
301 | TILE_OPC_SHL, | ||
302 | TILE_OPC_SHL_SN, | ||
303 | TILE_OPC_SHLB, | ||
304 | TILE_OPC_SHLB_SN, | ||
305 | TILE_OPC_SHLH, | ||
306 | TILE_OPC_SHLH_SN, | ||
307 | TILE_OPC_SHLI, | ||
308 | TILE_OPC_SHLI_SN, | ||
309 | TILE_OPC_SHLIB, | ||
310 | TILE_OPC_SHLIB_SN, | ||
311 | TILE_OPC_SHLIH, | ||
312 | TILE_OPC_SHLIH_SN, | ||
313 | TILE_OPC_SHR, | ||
314 | TILE_OPC_SHR_SN, | ||
315 | TILE_OPC_SHRB, | ||
316 | TILE_OPC_SHRB_SN, | ||
317 | TILE_OPC_SHRH, | ||
318 | TILE_OPC_SHRH_SN, | ||
319 | TILE_OPC_SHRI, | ||
320 | TILE_OPC_SHRI_SN, | ||
321 | TILE_OPC_SHRIB, | ||
322 | TILE_OPC_SHRIB_SN, | ||
323 | TILE_OPC_SHRIH, | ||
324 | TILE_OPC_SHRIH_SN, | ||
325 | TILE_OPC_SLT, | ||
326 | TILE_OPC_SLT_SN, | ||
327 | TILE_OPC_SLT_U, | ||
328 | TILE_OPC_SLT_U_SN, | ||
329 | TILE_OPC_SLTB, | ||
330 | TILE_OPC_SLTB_SN, | ||
331 | TILE_OPC_SLTB_U, | ||
332 | TILE_OPC_SLTB_U_SN, | ||
333 | TILE_OPC_SLTE, | ||
334 | TILE_OPC_SLTE_SN, | ||
335 | TILE_OPC_SLTE_U, | ||
336 | TILE_OPC_SLTE_U_SN, | ||
337 | TILE_OPC_SLTEB, | ||
338 | TILE_OPC_SLTEB_SN, | ||
339 | TILE_OPC_SLTEB_U, | ||
340 | TILE_OPC_SLTEB_U_SN, | ||
341 | TILE_OPC_SLTEH, | ||
342 | TILE_OPC_SLTEH_SN, | ||
343 | TILE_OPC_SLTEH_U, | ||
344 | TILE_OPC_SLTEH_U_SN, | ||
345 | TILE_OPC_SLTH, | ||
346 | TILE_OPC_SLTH_SN, | ||
347 | TILE_OPC_SLTH_U, | ||
348 | TILE_OPC_SLTH_U_SN, | ||
349 | TILE_OPC_SLTI, | ||
350 | TILE_OPC_SLTI_SN, | ||
351 | TILE_OPC_SLTI_U, | ||
352 | TILE_OPC_SLTI_U_SN, | ||
353 | TILE_OPC_SLTIB, | ||
354 | TILE_OPC_SLTIB_SN, | ||
355 | TILE_OPC_SLTIB_U, | ||
356 | TILE_OPC_SLTIB_U_SN, | ||
357 | TILE_OPC_SLTIH, | ||
358 | TILE_OPC_SLTIH_SN, | ||
359 | TILE_OPC_SLTIH_U, | ||
360 | TILE_OPC_SLTIH_U_SN, | ||
361 | TILE_OPC_SNE, | ||
362 | TILE_OPC_SNE_SN, | ||
363 | TILE_OPC_SNEB, | ||
364 | TILE_OPC_SNEB_SN, | ||
365 | TILE_OPC_SNEH, | ||
366 | TILE_OPC_SNEH_SN, | ||
367 | TILE_OPC_SRA, | ||
368 | TILE_OPC_SRA_SN, | ||
369 | TILE_OPC_SRAB, | ||
370 | TILE_OPC_SRAB_SN, | ||
371 | TILE_OPC_SRAH, | ||
372 | TILE_OPC_SRAH_SN, | ||
373 | TILE_OPC_SRAI, | ||
374 | TILE_OPC_SRAI_SN, | ||
375 | TILE_OPC_SRAIB, | ||
376 | TILE_OPC_SRAIB_SN, | ||
377 | TILE_OPC_SRAIH, | ||
378 | TILE_OPC_SRAIH_SN, | ||
379 | TILE_OPC_SUB, | ||
380 | TILE_OPC_SUB_SN, | ||
381 | TILE_OPC_SUBB, | ||
382 | TILE_OPC_SUBB_SN, | ||
383 | TILE_OPC_SUBBS_U, | ||
384 | TILE_OPC_SUBBS_U_SN, | ||
385 | TILE_OPC_SUBH, | ||
386 | TILE_OPC_SUBH_SN, | ||
387 | TILE_OPC_SUBHS, | ||
388 | TILE_OPC_SUBHS_SN, | ||
389 | TILE_OPC_SUBS, | ||
390 | TILE_OPC_SUBS_SN, | ||
391 | TILE_OPC_SW, | ||
392 | TILE_OPC_SWADD, | ||
393 | TILE_OPC_SWINT0, | ||
394 | TILE_OPC_SWINT1, | ||
395 | TILE_OPC_SWINT2, | ||
396 | TILE_OPC_SWINT3, | ||
397 | TILE_OPC_TBLIDXB0, | ||
398 | TILE_OPC_TBLIDXB0_SN, | ||
399 | TILE_OPC_TBLIDXB1, | ||
400 | TILE_OPC_TBLIDXB1_SN, | ||
401 | TILE_OPC_TBLIDXB2, | ||
402 | TILE_OPC_TBLIDXB2_SN, | ||
403 | TILE_OPC_TBLIDXB3, | ||
404 | TILE_OPC_TBLIDXB3_SN, | ||
405 | TILE_OPC_TNS, | ||
406 | TILE_OPC_TNS_SN, | ||
407 | TILE_OPC_WH64, | ||
408 | TILE_OPC_XOR, | ||
409 | TILE_OPC_XOR_SN, | ||
410 | TILE_OPC_XORI, | ||
411 | TILE_OPC_XORI_SN, | ||
412 | TILE_OPC_NONE | ||
413 | } tile_mnemonic; | ||
414 | |||
415 | /* 64-bit pattern for a { bpt ; nop } bundle. */ | ||
416 | #define TILE_BPT_BUNDLE 0x400b3cae70166000ULL | ||
417 | |||
418 | |||
419 | #define TILE_ELF_MACHINE_CODE EM_TILEPRO | ||
420 | |||
421 | #define TILE_ELF_NAME "elf32-tilepro" | ||
422 | |||
423 | |||
424 | static __inline unsigned int | ||
425 | get_BrOff_SN(tile_bundle_bits num) | ||
426 | { | ||
427 | const unsigned int n = (unsigned int)num; | ||
428 | return (((n >> 0)) & 0x3ff); | ||
429 | } | ||
430 | |||
431 | static __inline unsigned int | ||
432 | get_BrOff_X1(tile_bundle_bits n) | ||
433 | { | ||
434 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
435 | (((unsigned int)(n >> 20)) & 0x00018000); | ||
436 | } | ||
437 | |||
438 | static __inline unsigned int | ||
439 | get_BrType_X1(tile_bundle_bits n) | ||
440 | { | ||
441 | return (((unsigned int)(n >> 31)) & 0xf); | ||
442 | } | ||
443 | |||
444 | static __inline unsigned int | ||
445 | get_Dest_Imm8_X1(tile_bundle_bits n) | ||
446 | { | ||
447 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
448 | (((unsigned int)(n >> 43)) & 0x000000c0); | ||
449 | } | ||
450 | |||
451 | static __inline unsigned int | ||
452 | get_Dest_SN(tile_bundle_bits num) | ||
453 | { | ||
454 | const unsigned int n = (unsigned int)num; | ||
455 | return (((n >> 2)) & 0x3); | ||
456 | } | ||
457 | |||
458 | static __inline unsigned int | ||
459 | get_Dest_X0(tile_bundle_bits num) | ||
460 | { | ||
461 | const unsigned int n = (unsigned int)num; | ||
462 | return (((n >> 0)) & 0x3f); | ||
463 | } | ||
464 | |||
465 | static __inline unsigned int | ||
466 | get_Dest_X1(tile_bundle_bits n) | ||
467 | { | ||
468 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
469 | } | ||
470 | |||
471 | static __inline unsigned int | ||
472 | get_Dest_Y0(tile_bundle_bits num) | ||
473 | { | ||
474 | const unsigned int n = (unsigned int)num; | ||
475 | return (((n >> 0)) & 0x3f); | ||
476 | } | ||
477 | |||
478 | static __inline unsigned int | ||
479 | get_Dest_Y1(tile_bundle_bits n) | ||
480 | { | ||
481 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
482 | } | ||
483 | |||
484 | static __inline unsigned int | ||
485 | get_Imm16_X0(tile_bundle_bits num) | ||
486 | { | ||
487 | const unsigned int n = (unsigned int)num; | ||
488 | return (((n >> 12)) & 0xffff); | ||
489 | } | ||
490 | |||
491 | static __inline unsigned int | ||
492 | get_Imm16_X1(tile_bundle_bits n) | ||
493 | { | ||
494 | return (((unsigned int)(n >> 43)) & 0xffff); | ||
495 | } | ||
496 | |||
497 | static __inline unsigned int | ||
498 | get_Imm8_SN(tile_bundle_bits num) | ||
499 | { | ||
500 | const unsigned int n = (unsigned int)num; | ||
501 | return (((n >> 0)) & 0xff); | ||
502 | } | ||
503 | |||
504 | static __inline unsigned int | ||
505 | get_Imm8_X0(tile_bundle_bits num) | ||
506 | { | ||
507 | const unsigned int n = (unsigned int)num; | ||
508 | return (((n >> 12)) & 0xff); | ||
509 | } | ||
510 | |||
511 | static __inline unsigned int | ||
512 | get_Imm8_X1(tile_bundle_bits n) | ||
513 | { | ||
514 | return (((unsigned int)(n >> 43)) & 0xff); | ||
515 | } | ||
516 | |||
517 | static __inline unsigned int | ||
518 | get_Imm8_Y0(tile_bundle_bits num) | ||
519 | { | ||
520 | const unsigned int n = (unsigned int)num; | ||
521 | return (((n >> 12)) & 0xff); | ||
522 | } | ||
523 | |||
524 | static __inline unsigned int | ||
525 | get_Imm8_Y1(tile_bundle_bits n) | ||
526 | { | ||
527 | return (((unsigned int)(n >> 43)) & 0xff); | ||
528 | } | ||
529 | |||
530 | static __inline unsigned int | ||
531 | get_ImmOpcodeExtension_X0(tile_bundle_bits num) | ||
532 | { | ||
533 | const unsigned int n = (unsigned int)num; | ||
534 | return (((n >> 20)) & 0x7f); | ||
535 | } | ||
536 | |||
537 | static __inline unsigned int | ||
538 | get_ImmOpcodeExtension_X1(tile_bundle_bits n) | ||
539 | { | ||
540 | return (((unsigned int)(n >> 51)) & 0x7f); | ||
541 | } | ||
542 | |||
543 | static __inline unsigned int | ||
544 | get_ImmRROpcodeExtension_SN(tile_bundle_bits num) | ||
545 | { | ||
546 | const unsigned int n = (unsigned int)num; | ||
547 | return (((n >> 8)) & 0x3); | ||
548 | } | ||
549 | |||
550 | static __inline unsigned int | ||
551 | get_JOffLong_X1(tile_bundle_bits n) | ||
552 | { | ||
553 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
554 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
555 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
556 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
557 | (((unsigned int)(n >> 31)) & 0x18000000); | ||
558 | } | ||
559 | |||
560 | static __inline unsigned int | ||
561 | get_JOff_X1(tile_bundle_bits n) | ||
562 | { | ||
563 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
564 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
565 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
566 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
567 | (((unsigned int)(n >> 31)) & 0x08000000); | ||
568 | } | ||
569 | |||
570 | static __inline unsigned int | ||
571 | get_MF_Imm15_X1(tile_bundle_bits n) | ||
572 | { | ||
573 | return (((unsigned int)(n >> 37)) & 0x00003fff) | | ||
574 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
575 | } | ||
576 | |||
577 | static __inline unsigned int | ||
578 | get_MMEnd_X0(tile_bundle_bits num) | ||
579 | { | ||
580 | const unsigned int n = (unsigned int)num; | ||
581 | return (((n >> 18)) & 0x1f); | ||
582 | } | ||
583 | |||
584 | static __inline unsigned int | ||
585 | get_MMEnd_X1(tile_bundle_bits n) | ||
586 | { | ||
587 | return (((unsigned int)(n >> 49)) & 0x1f); | ||
588 | } | ||
589 | |||
590 | static __inline unsigned int | ||
591 | get_MMStart_X0(tile_bundle_bits num) | ||
592 | { | ||
593 | const unsigned int n = (unsigned int)num; | ||
594 | return (((n >> 23)) & 0x1f); | ||
595 | } | ||
596 | |||
597 | static __inline unsigned int | ||
598 | get_MMStart_X1(tile_bundle_bits n) | ||
599 | { | ||
600 | return (((unsigned int)(n >> 54)) & 0x1f); | ||
601 | } | ||
602 | |||
603 | static __inline unsigned int | ||
604 | get_MT_Imm15_X1(tile_bundle_bits n) | ||
605 | { | ||
606 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
607 | (((unsigned int)(n >> 37)) & 0x00003fc0) | | ||
608 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
609 | } | ||
610 | |||
611 | static __inline unsigned int | ||
612 | get_Mode(tile_bundle_bits n) | ||
613 | { | ||
614 | return (((unsigned int)(n >> 63)) & 0x1); | ||
615 | } | ||
616 | |||
617 | static __inline unsigned int | ||
618 | get_NoRegOpcodeExtension_SN(tile_bundle_bits num) | ||
619 | { | ||
620 | const unsigned int n = (unsigned int)num; | ||
621 | return (((n >> 0)) & 0xf); | ||
622 | } | ||
623 | |||
624 | static __inline unsigned int | ||
625 | get_Opcode_SN(tile_bundle_bits num) | ||
626 | { | ||
627 | const unsigned int n = (unsigned int)num; | ||
628 | return (((n >> 10)) & 0x3f); | ||
629 | } | ||
630 | |||
631 | static __inline unsigned int | ||
632 | get_Opcode_X0(tile_bundle_bits num) | ||
633 | { | ||
634 | const unsigned int n = (unsigned int)num; | ||
635 | return (((n >> 28)) & 0x7); | ||
636 | } | ||
637 | |||
638 | static __inline unsigned int | ||
639 | get_Opcode_X1(tile_bundle_bits n) | ||
640 | { | ||
641 | return (((unsigned int)(n >> 59)) & 0xf); | ||
642 | } | ||
643 | |||
644 | static __inline unsigned int | ||
645 | get_Opcode_Y0(tile_bundle_bits num) | ||
646 | { | ||
647 | const unsigned int n = (unsigned int)num; | ||
648 | return (((n >> 27)) & 0xf); | ||
649 | } | ||
650 | |||
651 | static __inline unsigned int | ||
652 | get_Opcode_Y1(tile_bundle_bits n) | ||
653 | { | ||
654 | return (((unsigned int)(n >> 59)) & 0xf); | ||
655 | } | ||
656 | |||
657 | static __inline unsigned int | ||
658 | get_Opcode_Y2(tile_bundle_bits n) | ||
659 | { | ||
660 | return (((unsigned int)(n >> 56)) & 0x7); | ||
661 | } | ||
662 | |||
663 | static __inline unsigned int | ||
664 | get_RROpcodeExtension_SN(tile_bundle_bits num) | ||
665 | { | ||
666 | const unsigned int n = (unsigned int)num; | ||
667 | return (((n >> 4)) & 0xf); | ||
668 | } | ||
669 | |||
670 | static __inline unsigned int | ||
671 | get_RRROpcodeExtension_X0(tile_bundle_bits num) | ||
672 | { | ||
673 | const unsigned int n = (unsigned int)num; | ||
674 | return (((n >> 18)) & 0x1ff); | ||
675 | } | ||
676 | |||
677 | static __inline unsigned int | ||
678 | get_RRROpcodeExtension_X1(tile_bundle_bits n) | ||
679 | { | ||
680 | return (((unsigned int)(n >> 49)) & 0x1ff); | ||
681 | } | ||
682 | |||
683 | static __inline unsigned int | ||
684 | get_RRROpcodeExtension_Y0(tile_bundle_bits num) | ||
685 | { | ||
686 | const unsigned int n = (unsigned int)num; | ||
687 | return (((n >> 18)) & 0x3); | ||
688 | } | ||
689 | |||
690 | static __inline unsigned int | ||
691 | get_RRROpcodeExtension_Y1(tile_bundle_bits n) | ||
692 | { | ||
693 | return (((unsigned int)(n >> 49)) & 0x3); | ||
694 | } | ||
695 | |||
696 | static __inline unsigned int | ||
697 | get_RouteOpcodeExtension_SN(tile_bundle_bits num) | ||
698 | { | ||
699 | const unsigned int n = (unsigned int)num; | ||
700 | return (((n >> 0)) & 0x3ff); | ||
701 | } | ||
702 | |||
703 | static __inline unsigned int | ||
704 | get_S_X0(tile_bundle_bits num) | ||
705 | { | ||
706 | const unsigned int n = (unsigned int)num; | ||
707 | return (((n >> 27)) & 0x1); | ||
708 | } | ||
709 | |||
710 | static __inline unsigned int | ||
711 | get_S_X1(tile_bundle_bits n) | ||
712 | { | ||
713 | return (((unsigned int)(n >> 58)) & 0x1); | ||
714 | } | ||
715 | |||
716 | static __inline unsigned int | ||
717 | get_ShAmt_X0(tile_bundle_bits num) | ||
718 | { | ||
719 | const unsigned int n = (unsigned int)num; | ||
720 | return (((n >> 12)) & 0x1f); | ||
721 | } | ||
722 | |||
723 | static __inline unsigned int | ||
724 | get_ShAmt_X1(tile_bundle_bits n) | ||
725 | { | ||
726 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
727 | } | ||
728 | |||
729 | static __inline unsigned int | ||
730 | get_ShAmt_Y0(tile_bundle_bits num) | ||
731 | { | ||
732 | const unsigned int n = (unsigned int)num; | ||
733 | return (((n >> 12)) & 0x1f); | ||
734 | } | ||
735 | |||
736 | static __inline unsigned int | ||
737 | get_ShAmt_Y1(tile_bundle_bits n) | ||
738 | { | ||
739 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
740 | } | ||
741 | |||
742 | static __inline unsigned int | ||
743 | get_SrcA_X0(tile_bundle_bits num) | ||
744 | { | ||
745 | const unsigned int n = (unsigned int)num; | ||
746 | return (((n >> 6)) & 0x3f); | ||
747 | } | ||
748 | |||
749 | static __inline unsigned int | ||
750 | get_SrcA_X1(tile_bundle_bits n) | ||
751 | { | ||
752 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
753 | } | ||
754 | |||
755 | static __inline unsigned int | ||
756 | get_SrcA_Y0(tile_bundle_bits num) | ||
757 | { | ||
758 | const unsigned int n = (unsigned int)num; | ||
759 | return (((n >> 6)) & 0x3f); | ||
760 | } | ||
761 | |||
762 | static __inline unsigned int | ||
763 | get_SrcA_Y1(tile_bundle_bits n) | ||
764 | { | ||
765 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
766 | } | ||
767 | |||
768 | static __inline unsigned int | ||
769 | get_SrcA_Y2(tile_bundle_bits n) | ||
770 | { | ||
771 | return (((n >> 26)) & 0x00000001) | | ||
772 | (((unsigned int)(n >> 50)) & 0x0000003e); | ||
773 | } | ||
774 | |||
775 | static __inline unsigned int | ||
776 | get_SrcBDest_Y2(tile_bundle_bits num) | ||
777 | { | ||
778 | const unsigned int n = (unsigned int)num; | ||
779 | return (((n >> 20)) & 0x3f); | ||
780 | } | ||
781 | |||
782 | static __inline unsigned int | ||
783 | get_SrcB_X0(tile_bundle_bits num) | ||
784 | { | ||
785 | const unsigned int n = (unsigned int)num; | ||
786 | return (((n >> 12)) & 0x3f); | ||
787 | } | ||
788 | |||
789 | static __inline unsigned int | ||
790 | get_SrcB_X1(tile_bundle_bits n) | ||
791 | { | ||
792 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
793 | } | ||
794 | |||
795 | static __inline unsigned int | ||
796 | get_SrcB_Y0(tile_bundle_bits num) | ||
797 | { | ||
798 | const unsigned int n = (unsigned int)num; | ||
799 | return (((n >> 12)) & 0x3f); | ||
800 | } | ||
801 | |||
802 | static __inline unsigned int | ||
803 | get_SrcB_Y1(tile_bundle_bits n) | ||
804 | { | ||
805 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
806 | } | ||
807 | |||
808 | static __inline unsigned int | ||
809 | get_Src_SN(tile_bundle_bits num) | ||
810 | { | ||
811 | const unsigned int n = (unsigned int)num; | ||
812 | return (((n >> 0)) & 0x3); | ||
813 | } | ||
814 | |||
815 | static __inline unsigned int | ||
816 | get_UnOpcodeExtension_X0(tile_bundle_bits num) | ||
817 | { | ||
818 | const unsigned int n = (unsigned int)num; | ||
819 | return (((n >> 12)) & 0x1f); | ||
820 | } | ||
821 | |||
822 | static __inline unsigned int | ||
823 | get_UnOpcodeExtension_X1(tile_bundle_bits n) | ||
824 | { | ||
825 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
826 | } | ||
827 | |||
828 | static __inline unsigned int | ||
829 | get_UnOpcodeExtension_Y0(tile_bundle_bits num) | ||
830 | { | ||
831 | const unsigned int n = (unsigned int)num; | ||
832 | return (((n >> 12)) & 0x1f); | ||
833 | } | ||
834 | |||
835 | static __inline unsigned int | ||
836 | get_UnOpcodeExtension_Y1(tile_bundle_bits n) | ||
837 | { | ||
838 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
839 | } | ||
840 | |||
841 | static __inline unsigned int | ||
842 | get_UnShOpcodeExtension_X0(tile_bundle_bits num) | ||
843 | { | ||
844 | const unsigned int n = (unsigned int)num; | ||
845 | return (((n >> 17)) & 0x3ff); | ||
846 | } | ||
847 | |||
848 | static __inline unsigned int | ||
849 | get_UnShOpcodeExtension_X1(tile_bundle_bits n) | ||
850 | { | ||
851 | return (((unsigned int)(n >> 48)) & 0x3ff); | ||
852 | } | ||
853 | |||
854 | static __inline unsigned int | ||
855 | get_UnShOpcodeExtension_Y0(tile_bundle_bits num) | ||
856 | { | ||
857 | const unsigned int n = (unsigned int)num; | ||
858 | return (((n >> 17)) & 0x7); | ||
859 | } | ||
860 | |||
861 | static __inline unsigned int | ||
862 | get_UnShOpcodeExtension_Y1(tile_bundle_bits n) | ||
863 | { | ||
864 | return (((unsigned int)(n >> 48)) & 0x7); | ||
865 | } | ||
866 | |||
867 | |||
868 | static __inline int | ||
869 | sign_extend(int n, int num_bits) | ||
870 | { | ||
871 | int shift = (int)(sizeof(int) * 8 - num_bits); | ||
872 | return (n << shift) >> shift; | ||
873 | } | ||
874 | |||
875 | |||
876 | |||
877 | static __inline tile_bundle_bits | ||
878 | create_BrOff_SN(int num) | ||
879 | { | ||
880 | const unsigned int n = (unsigned int)num; | ||
881 | return ((n & 0x3ff) << 0); | ||
882 | } | ||
883 | |||
884 | static __inline tile_bundle_bits | ||
885 | create_BrOff_X1(int num) | ||
886 | { | ||
887 | const unsigned int n = (unsigned int)num; | ||
888 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
889 | (((tile_bundle_bits)(n & 0x00018000)) << 20); | ||
890 | } | ||
891 | |||
892 | static __inline tile_bundle_bits | ||
893 | create_BrType_X1(int num) | ||
894 | { | ||
895 | const unsigned int n = (unsigned int)num; | ||
896 | return (((tile_bundle_bits)(n & 0xf)) << 31); | ||
897 | } | ||
898 | |||
899 | static __inline tile_bundle_bits | ||
900 | create_Dest_Imm8_X1(int num) | ||
901 | { | ||
902 | const unsigned int n = (unsigned int)num; | ||
903 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
904 | (((tile_bundle_bits)(n & 0x000000c0)) << 43); | ||
905 | } | ||
906 | |||
907 | static __inline tile_bundle_bits | ||
908 | create_Dest_SN(int num) | ||
909 | { | ||
910 | const unsigned int n = (unsigned int)num; | ||
911 | return ((n & 0x3) << 2); | ||
912 | } | ||
913 | |||
914 | static __inline tile_bundle_bits | ||
915 | create_Dest_X0(int num) | ||
916 | { | ||
917 | const unsigned int n = (unsigned int)num; | ||
918 | return ((n & 0x3f) << 0); | ||
919 | } | ||
920 | |||
921 | static __inline tile_bundle_bits | ||
922 | create_Dest_X1(int num) | ||
923 | { | ||
924 | const unsigned int n = (unsigned int)num; | ||
925 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
926 | } | ||
927 | |||
928 | static __inline tile_bundle_bits | ||
929 | create_Dest_Y0(int num) | ||
930 | { | ||
931 | const unsigned int n = (unsigned int)num; | ||
932 | return ((n & 0x3f) << 0); | ||
933 | } | ||
934 | |||
935 | static __inline tile_bundle_bits | ||
936 | create_Dest_Y1(int num) | ||
937 | { | ||
938 | const unsigned int n = (unsigned int)num; | ||
939 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
940 | } | ||
941 | |||
942 | static __inline tile_bundle_bits | ||
943 | create_Imm16_X0(int num) | ||
944 | { | ||
945 | const unsigned int n = (unsigned int)num; | ||
946 | return ((n & 0xffff) << 12); | ||
947 | } | ||
948 | |||
949 | static __inline tile_bundle_bits | ||
950 | create_Imm16_X1(int num) | ||
951 | { | ||
952 | const unsigned int n = (unsigned int)num; | ||
953 | return (((tile_bundle_bits)(n & 0xffff)) << 43); | ||
954 | } | ||
955 | |||
956 | static __inline tile_bundle_bits | ||
957 | create_Imm8_SN(int num) | ||
958 | { | ||
959 | const unsigned int n = (unsigned int)num; | ||
960 | return ((n & 0xff) << 0); | ||
961 | } | ||
962 | |||
963 | static __inline tile_bundle_bits | ||
964 | create_Imm8_X0(int num) | ||
965 | { | ||
966 | const unsigned int n = (unsigned int)num; | ||
967 | return ((n & 0xff) << 12); | ||
968 | } | ||
969 | |||
970 | static __inline tile_bundle_bits | ||
971 | create_Imm8_X1(int num) | ||
972 | { | ||
973 | const unsigned int n = (unsigned int)num; | ||
974 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
975 | } | ||
976 | |||
977 | static __inline tile_bundle_bits | ||
978 | create_Imm8_Y0(int num) | ||
979 | { | ||
980 | const unsigned int n = (unsigned int)num; | ||
981 | return ((n & 0xff) << 12); | ||
982 | } | ||
983 | |||
984 | static __inline tile_bundle_bits | ||
985 | create_Imm8_Y1(int num) | ||
986 | { | ||
987 | const unsigned int n = (unsigned int)num; | ||
988 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
989 | } | ||
990 | |||
991 | static __inline tile_bundle_bits | ||
992 | create_ImmOpcodeExtension_X0(int num) | ||
993 | { | ||
994 | const unsigned int n = (unsigned int)num; | ||
995 | return ((n & 0x7f) << 20); | ||
996 | } | ||
997 | |||
998 | static __inline tile_bundle_bits | ||
999 | create_ImmOpcodeExtension_X1(int num) | ||
1000 | { | ||
1001 | const unsigned int n = (unsigned int)num; | ||
1002 | return (((tile_bundle_bits)(n & 0x7f)) << 51); | ||
1003 | } | ||
1004 | |||
1005 | static __inline tile_bundle_bits | ||
1006 | create_ImmRROpcodeExtension_SN(int num) | ||
1007 | { | ||
1008 | const unsigned int n = (unsigned int)num; | ||
1009 | return ((n & 0x3) << 8); | ||
1010 | } | ||
1011 | |||
1012 | static __inline tile_bundle_bits | ||
1013 | create_JOffLong_X1(int num) | ||
1014 | { | ||
1015 | const unsigned int n = (unsigned int)num; | ||
1016 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1017 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1018 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1019 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1020 | (((tile_bundle_bits)(n & 0x18000000)) << 31); | ||
1021 | } | ||
1022 | |||
1023 | static __inline tile_bundle_bits | ||
1024 | create_JOff_X1(int num) | ||
1025 | { | ||
1026 | const unsigned int n = (unsigned int)num; | ||
1027 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1028 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1029 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1030 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1031 | (((tile_bundle_bits)(n & 0x08000000)) << 31); | ||
1032 | } | ||
1033 | |||
1034 | static __inline tile_bundle_bits | ||
1035 | create_MF_Imm15_X1(int num) | ||
1036 | { | ||
1037 | const unsigned int n = (unsigned int)num; | ||
1038 | return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | | ||
1039 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1040 | } | ||
1041 | |||
1042 | static __inline tile_bundle_bits | ||
1043 | create_MMEnd_X0(int num) | ||
1044 | { | ||
1045 | const unsigned int n = (unsigned int)num; | ||
1046 | return ((n & 0x1f) << 18); | ||
1047 | } | ||
1048 | |||
1049 | static __inline tile_bundle_bits | ||
1050 | create_MMEnd_X1(int num) | ||
1051 | { | ||
1052 | const unsigned int n = (unsigned int)num; | ||
1053 | return (((tile_bundle_bits)(n & 0x1f)) << 49); | ||
1054 | } | ||
1055 | |||
1056 | static __inline tile_bundle_bits | ||
1057 | create_MMStart_X0(int num) | ||
1058 | { | ||
1059 | const unsigned int n = (unsigned int)num; | ||
1060 | return ((n & 0x1f) << 23); | ||
1061 | } | ||
1062 | |||
1063 | static __inline tile_bundle_bits | ||
1064 | create_MMStart_X1(int num) | ||
1065 | { | ||
1066 | const unsigned int n = (unsigned int)num; | ||
1067 | return (((tile_bundle_bits)(n & 0x1f)) << 54); | ||
1068 | } | ||
1069 | |||
1070 | static __inline tile_bundle_bits | ||
1071 | create_MT_Imm15_X1(int num) | ||
1072 | { | ||
1073 | const unsigned int n = (unsigned int)num; | ||
1074 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
1075 | (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | | ||
1076 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1077 | } | ||
1078 | |||
1079 | static __inline tile_bundle_bits | ||
1080 | create_Mode(int num) | ||
1081 | { | ||
1082 | const unsigned int n = (unsigned int)num; | ||
1083 | return (((tile_bundle_bits)(n & 0x1)) << 63); | ||
1084 | } | ||
1085 | |||
1086 | static __inline tile_bundle_bits | ||
1087 | create_NoRegOpcodeExtension_SN(int num) | ||
1088 | { | ||
1089 | const unsigned int n = (unsigned int)num; | ||
1090 | return ((n & 0xf) << 0); | ||
1091 | } | ||
1092 | |||
1093 | static __inline tile_bundle_bits | ||
1094 | create_Opcode_SN(int num) | ||
1095 | { | ||
1096 | const unsigned int n = (unsigned int)num; | ||
1097 | return ((n & 0x3f) << 10); | ||
1098 | } | ||
1099 | |||
1100 | static __inline tile_bundle_bits | ||
1101 | create_Opcode_X0(int num) | ||
1102 | { | ||
1103 | const unsigned int n = (unsigned int)num; | ||
1104 | return ((n & 0x7) << 28); | ||
1105 | } | ||
1106 | |||
1107 | static __inline tile_bundle_bits | ||
1108 | create_Opcode_X1(int num) | ||
1109 | { | ||
1110 | const unsigned int n = (unsigned int)num; | ||
1111 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1112 | } | ||
1113 | |||
1114 | static __inline tile_bundle_bits | ||
1115 | create_Opcode_Y0(int num) | ||
1116 | { | ||
1117 | const unsigned int n = (unsigned int)num; | ||
1118 | return ((n & 0xf) << 27); | ||
1119 | } | ||
1120 | |||
1121 | static __inline tile_bundle_bits | ||
1122 | create_Opcode_Y1(int num) | ||
1123 | { | ||
1124 | const unsigned int n = (unsigned int)num; | ||
1125 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1126 | } | ||
1127 | |||
1128 | static __inline tile_bundle_bits | ||
1129 | create_Opcode_Y2(int num) | ||
1130 | { | ||
1131 | const unsigned int n = (unsigned int)num; | ||
1132 | return (((tile_bundle_bits)(n & 0x7)) << 56); | ||
1133 | } | ||
1134 | |||
1135 | static __inline tile_bundle_bits | ||
1136 | create_RROpcodeExtension_SN(int num) | ||
1137 | { | ||
1138 | const unsigned int n = (unsigned int)num; | ||
1139 | return ((n & 0xf) << 4); | ||
1140 | } | ||
1141 | |||
1142 | static __inline tile_bundle_bits | ||
1143 | create_RRROpcodeExtension_X0(int num) | ||
1144 | { | ||
1145 | const unsigned int n = (unsigned int)num; | ||
1146 | return ((n & 0x1ff) << 18); | ||
1147 | } | ||
1148 | |||
1149 | static __inline tile_bundle_bits | ||
1150 | create_RRROpcodeExtension_X1(int num) | ||
1151 | { | ||
1152 | const unsigned int n = (unsigned int)num; | ||
1153 | return (((tile_bundle_bits)(n & 0x1ff)) << 49); | ||
1154 | } | ||
1155 | |||
1156 | static __inline tile_bundle_bits | ||
1157 | create_RRROpcodeExtension_Y0(int num) | ||
1158 | { | ||
1159 | const unsigned int n = (unsigned int)num; | ||
1160 | return ((n & 0x3) << 18); | ||
1161 | } | ||
1162 | |||
1163 | static __inline tile_bundle_bits | ||
1164 | create_RRROpcodeExtension_Y1(int num) | ||
1165 | { | ||
1166 | const unsigned int n = (unsigned int)num; | ||
1167 | return (((tile_bundle_bits)(n & 0x3)) << 49); | ||
1168 | } | ||
1169 | |||
1170 | static __inline tile_bundle_bits | ||
1171 | create_RouteOpcodeExtension_SN(int num) | ||
1172 | { | ||
1173 | const unsigned int n = (unsigned int)num; | ||
1174 | return ((n & 0x3ff) << 0); | ||
1175 | } | ||
1176 | |||
1177 | static __inline tile_bundle_bits | ||
1178 | create_S_X0(int num) | ||
1179 | { | ||
1180 | const unsigned int n = (unsigned int)num; | ||
1181 | return ((n & 0x1) << 27); | ||
1182 | } | ||
1183 | |||
1184 | static __inline tile_bundle_bits | ||
1185 | create_S_X1(int num) | ||
1186 | { | ||
1187 | const unsigned int n = (unsigned int)num; | ||
1188 | return (((tile_bundle_bits)(n & 0x1)) << 58); | ||
1189 | } | ||
1190 | |||
1191 | static __inline tile_bundle_bits | ||
1192 | create_ShAmt_X0(int num) | ||
1193 | { | ||
1194 | const unsigned int n = (unsigned int)num; | ||
1195 | return ((n & 0x1f) << 12); | ||
1196 | } | ||
1197 | |||
1198 | static __inline tile_bundle_bits | ||
1199 | create_ShAmt_X1(int num) | ||
1200 | { | ||
1201 | const unsigned int n = (unsigned int)num; | ||
1202 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1203 | } | ||
1204 | |||
1205 | static __inline tile_bundle_bits | ||
1206 | create_ShAmt_Y0(int num) | ||
1207 | { | ||
1208 | const unsigned int n = (unsigned int)num; | ||
1209 | return ((n & 0x1f) << 12); | ||
1210 | } | ||
1211 | |||
1212 | static __inline tile_bundle_bits | ||
1213 | create_ShAmt_Y1(int num) | ||
1214 | { | ||
1215 | const unsigned int n = (unsigned int)num; | ||
1216 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1217 | } | ||
1218 | |||
1219 | static __inline tile_bundle_bits | ||
1220 | create_SrcA_X0(int num) | ||
1221 | { | ||
1222 | const unsigned int n = (unsigned int)num; | ||
1223 | return ((n & 0x3f) << 6); | ||
1224 | } | ||
1225 | |||
1226 | static __inline tile_bundle_bits | ||
1227 | create_SrcA_X1(int num) | ||
1228 | { | ||
1229 | const unsigned int n = (unsigned int)num; | ||
1230 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1231 | } | ||
1232 | |||
1233 | static __inline tile_bundle_bits | ||
1234 | create_SrcA_Y0(int num) | ||
1235 | { | ||
1236 | const unsigned int n = (unsigned int)num; | ||
1237 | return ((n & 0x3f) << 6); | ||
1238 | } | ||
1239 | |||
1240 | static __inline tile_bundle_bits | ||
1241 | create_SrcA_Y1(int num) | ||
1242 | { | ||
1243 | const unsigned int n = (unsigned int)num; | ||
1244 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1245 | } | ||
1246 | |||
1247 | static __inline tile_bundle_bits | ||
1248 | create_SrcA_Y2(int num) | ||
1249 | { | ||
1250 | const unsigned int n = (unsigned int)num; | ||
1251 | return ((n & 0x00000001) << 26) | | ||
1252 | (((tile_bundle_bits)(n & 0x0000003e)) << 50); | ||
1253 | } | ||
1254 | |||
1255 | static __inline tile_bundle_bits | ||
1256 | create_SrcBDest_Y2(int num) | ||
1257 | { | ||
1258 | const unsigned int n = (unsigned int)num; | ||
1259 | return ((n & 0x3f) << 20); | ||
1260 | } | ||
1261 | |||
1262 | static __inline tile_bundle_bits | ||
1263 | create_SrcB_X0(int num) | ||
1264 | { | ||
1265 | const unsigned int n = (unsigned int)num; | ||
1266 | return ((n & 0x3f) << 12); | ||
1267 | } | ||
1268 | |||
1269 | static __inline tile_bundle_bits | ||
1270 | create_SrcB_X1(int num) | ||
1271 | { | ||
1272 | const unsigned int n = (unsigned int)num; | ||
1273 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1274 | } | ||
1275 | |||
1276 | static __inline tile_bundle_bits | ||
1277 | create_SrcB_Y0(int num) | ||
1278 | { | ||
1279 | const unsigned int n = (unsigned int)num; | ||
1280 | return ((n & 0x3f) << 12); | ||
1281 | } | ||
1282 | |||
1283 | static __inline tile_bundle_bits | ||
1284 | create_SrcB_Y1(int num) | ||
1285 | { | ||
1286 | const unsigned int n = (unsigned int)num; | ||
1287 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1288 | } | ||
1289 | |||
1290 | static __inline tile_bundle_bits | ||
1291 | create_Src_SN(int num) | ||
1292 | { | ||
1293 | const unsigned int n = (unsigned int)num; | ||
1294 | return ((n & 0x3) << 0); | ||
1295 | } | ||
1296 | |||
1297 | static __inline tile_bundle_bits | ||
1298 | create_UnOpcodeExtension_X0(int num) | ||
1299 | { | ||
1300 | const unsigned int n = (unsigned int)num; | ||
1301 | return ((n & 0x1f) << 12); | ||
1302 | } | ||
1303 | |||
1304 | static __inline tile_bundle_bits | ||
1305 | create_UnOpcodeExtension_X1(int num) | ||
1306 | { | ||
1307 | const unsigned int n = (unsigned int)num; | ||
1308 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1309 | } | ||
1310 | |||
1311 | static __inline tile_bundle_bits | ||
1312 | create_UnOpcodeExtension_Y0(int num) | ||
1313 | { | ||
1314 | const unsigned int n = (unsigned int)num; | ||
1315 | return ((n & 0x1f) << 12); | ||
1316 | } | ||
1317 | |||
1318 | static __inline tile_bundle_bits | ||
1319 | create_UnOpcodeExtension_Y1(int num) | ||
1320 | { | ||
1321 | const unsigned int n = (unsigned int)num; | ||
1322 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1323 | } | ||
1324 | |||
1325 | static __inline tile_bundle_bits | ||
1326 | create_UnShOpcodeExtension_X0(int num) | ||
1327 | { | ||
1328 | const unsigned int n = (unsigned int)num; | ||
1329 | return ((n & 0x3ff) << 17); | ||
1330 | } | ||
1331 | |||
1332 | static __inline tile_bundle_bits | ||
1333 | create_UnShOpcodeExtension_X1(int num) | ||
1334 | { | ||
1335 | const unsigned int n = (unsigned int)num; | ||
1336 | return (((tile_bundle_bits)(n & 0x3ff)) << 48); | ||
1337 | } | ||
1338 | |||
1339 | static __inline tile_bundle_bits | ||
1340 | create_UnShOpcodeExtension_Y0(int num) | ||
1341 | { | ||
1342 | const unsigned int n = (unsigned int)num; | ||
1343 | return ((n & 0x7) << 17); | ||
1344 | } | ||
1345 | |||
1346 | static __inline tile_bundle_bits | ||
1347 | create_UnShOpcodeExtension_Y1(int num) | ||
1348 | { | ||
1349 | const unsigned int n = (unsigned int)num; | ||
1350 | return (((tile_bundle_bits)(n & 0x7)) << 48); | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | |||
1355 | typedef enum | ||
1356 | { | ||
1357 | TILE_PIPELINE_X0, | ||
1358 | TILE_PIPELINE_X1, | ||
1359 | TILE_PIPELINE_Y0, | ||
1360 | TILE_PIPELINE_Y1, | ||
1361 | TILE_PIPELINE_Y2, | ||
1362 | } tile_pipeline; | ||
1363 | |||
1364 | #define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) | ||
1365 | |||
1366 | typedef enum | ||
1367 | { | ||
1368 | TILE_OP_TYPE_REGISTER, | ||
1369 | TILE_OP_TYPE_IMMEDIATE, | ||
1370 | TILE_OP_TYPE_ADDRESS, | ||
1371 | TILE_OP_TYPE_SPR | ||
1372 | } tile_operand_type; | ||
1373 | |||
1374 | /* This is the bit that determines if a bundle is in the Y encoding. */ | ||
1375 | #define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) | ||
1376 | |||
1377 | enum | ||
1378 | { | ||
1379 | /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ | ||
1380 | TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, | ||
1381 | |||
1382 | /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ | ||
1383 | TILE_NUM_PIPELINE_ENCODINGS = 5, | ||
1384 | |||
1385 | /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ | ||
1386 | TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, | ||
1387 | |||
1388 | /* Instructions take this many bytes. */ | ||
1389 | TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, | ||
1390 | |||
1391 | /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ | ||
1392 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, | ||
1393 | |||
1394 | /* Bundles should be aligned modulo this number of bytes. */ | ||
1395 | TILE_BUNDLE_ALIGNMENT_IN_BYTES = | ||
1396 | (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), | ||
1397 | |||
1398 | /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ | ||
1399 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, | ||
1400 | |||
1401 | /* Static network instructions take this many bytes. */ | ||
1402 | TILE_SN_INSTRUCTION_SIZE_IN_BYTES = | ||
1403 | (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), | ||
1404 | |||
1405 | /* Number of registers (some are magic, such as network I/O). */ | ||
1406 | TILE_NUM_REGISTERS = 64, | ||
1407 | |||
1408 | /* Number of static network registers. */ | ||
1409 | TILE_NUM_SN_REGISTERS = 4 | ||
1410 | }; | ||
1411 | |||
1412 | |||
1413 | struct tile_operand | ||
1414 | { | ||
1415 | /* Is this operand a register, immediate or address? */ | ||
1416 | tile_operand_type type; | ||
1417 | |||
1418 | /* The default relocation type for this operand. */ | ||
1419 | signed int default_reloc : 16; | ||
1420 | |||
1421 | /* How many bits is this value? (used for range checking) */ | ||
1422 | unsigned int num_bits : 5; | ||
1423 | |||
1424 | /* Is the value signed? (used for range checking) */ | ||
1425 | unsigned int is_signed : 1; | ||
1426 | |||
1427 | /* Is this operand a source register? */ | ||
1428 | unsigned int is_src_reg : 1; | ||
1429 | |||
1430 | /* Is this operand written? (i.e. is it a destination register) */ | ||
1431 | unsigned int is_dest_reg : 1; | ||
1432 | |||
1433 | /* Is this operand PC-relative? */ | ||
1434 | unsigned int is_pc_relative : 1; | ||
1435 | |||
1436 | /* By how many bits do we right shift the value before inserting? */ | ||
1437 | unsigned int rightshift : 2; | ||
1438 | |||
1439 | /* Return the bits for this operand to be ORed into an existing bundle. */ | ||
1440 | tile_bundle_bits (*insert) (int op); | ||
1441 | |||
1442 | /* Extract this operand and return it. */ | ||
1443 | unsigned int (*extract) (tile_bundle_bits bundle); | ||
1444 | }; | ||
1445 | |||
1446 | |||
1447 | extern const struct tile_operand tile_operands[]; | ||
1448 | |||
1449 | /* One finite-state machine per pipe for rapid instruction decoding. */ | ||
1450 | extern const unsigned short * const | ||
1451 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1452 | |||
1453 | |||
1454 | struct tile_opcode | ||
1455 | { | ||
1456 | /* The opcode mnemonic, e.g. "add" */ | ||
1457 | const char *name; | ||
1458 | |||
1459 | /* The enum value for this mnemonic. */ | ||
1460 | tile_mnemonic mnemonic; | ||
1461 | |||
1462 | /* A bit mask of which of the five pipes this instruction | ||
1463 | is compatible with: | ||
1464 | X0 0x01 | ||
1465 | X1 0x02 | ||
1466 | Y0 0x04 | ||
1467 | Y1 0x08 | ||
1468 | Y2 0x10 */ | ||
1469 | unsigned char pipes; | ||
1470 | |||
1471 | /* How many operands are there? */ | ||
1472 | unsigned char num_operands; | ||
1473 | |||
1474 | /* Which register does this write implicitly, or TREG_ZERO if none? */ | ||
1475 | unsigned char implicitly_written_register; | ||
1476 | |||
1477 | /* Can this be bundled with other instructions (almost always true). */ | ||
1478 | unsigned char can_bundle; | ||
1479 | |||
1480 | /* The description of the operands. Each of these is an | ||
1481 | * index into the tile_operands[] table. */ | ||
1482 | unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; | ||
1483 | |||
1484 | }; | ||
1485 | |||
1486 | extern const struct tile_opcode tile_opcodes[]; | ||
1487 | |||
1488 | |||
1489 | /* Used for non-textual disassembly into structs. */ | ||
1490 | struct tile_decoded_instruction | ||
1491 | { | ||
1492 | const struct tile_opcode *opcode; | ||
1493 | const struct tile_operand *operands[TILE_MAX_OPERANDS]; | ||
1494 | int operand_values[TILE_MAX_OPERANDS]; | ||
1495 | }; | ||
1496 | |||
1497 | |||
1498 | /* Disassemble a bundle into a struct for machine processing. */ | ||
1499 | extern int parse_insn_tile(tile_bundle_bits bits, | ||
1500 | unsigned int pc, | ||
1501 | struct tile_decoded_instruction | ||
1502 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); | ||
1503 | |||
1504 | |||
1505 | |||
1506 | #endif /* opcode_tile_h */ | ||
diff --git a/arch/tile/include/asm/opcode-tile_64.h b/arch/tile/include/asm/opcode-tile_64.h new file mode 100644 index 00000000000..eda60ecbae3 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile_64.h | |||
@@ -0,0 +1,1506 @@ | |||
1 | /* tile.h -- Header file for TILE opcode table | ||
2 | Copyright (C) 2005 Free Software Foundation, Inc. | ||
3 | Contributed by Tilera Corp. */ | ||
4 | |||
5 | #ifndef opcode_tile_h | ||
6 | #define opcode_tile_h | ||
7 | |||
8 | typedef unsigned long long tile_bundle_bits; | ||
9 | |||
10 | |||
11 | enum | ||
12 | { | ||
13 | TILE_MAX_OPERANDS = 5 /* mm */ | ||
14 | }; | ||
15 | |||
16 | typedef enum | ||
17 | { | ||
18 | TILE_OPC_BPT, | ||
19 | TILE_OPC_INFO, | ||
20 | TILE_OPC_INFOL, | ||
21 | TILE_OPC_J, | ||
22 | TILE_OPC_JAL, | ||
23 | TILE_OPC_MOVE, | ||
24 | TILE_OPC_MOVE_SN, | ||
25 | TILE_OPC_MOVEI, | ||
26 | TILE_OPC_MOVEI_SN, | ||
27 | TILE_OPC_MOVELI, | ||
28 | TILE_OPC_MOVELI_SN, | ||
29 | TILE_OPC_MOVELIS, | ||
30 | TILE_OPC_PREFETCH, | ||
31 | TILE_OPC_RAISE, | ||
32 | TILE_OPC_ADD, | ||
33 | TILE_OPC_ADD_SN, | ||
34 | TILE_OPC_ADDB, | ||
35 | TILE_OPC_ADDB_SN, | ||
36 | TILE_OPC_ADDBS_U, | ||
37 | TILE_OPC_ADDBS_U_SN, | ||
38 | TILE_OPC_ADDH, | ||
39 | TILE_OPC_ADDH_SN, | ||
40 | TILE_OPC_ADDHS, | ||
41 | TILE_OPC_ADDHS_SN, | ||
42 | TILE_OPC_ADDI, | ||
43 | TILE_OPC_ADDI_SN, | ||
44 | TILE_OPC_ADDIB, | ||
45 | TILE_OPC_ADDIB_SN, | ||
46 | TILE_OPC_ADDIH, | ||
47 | TILE_OPC_ADDIH_SN, | ||
48 | TILE_OPC_ADDLI, | ||
49 | TILE_OPC_ADDLI_SN, | ||
50 | TILE_OPC_ADDLIS, | ||
51 | TILE_OPC_ADDS, | ||
52 | TILE_OPC_ADDS_SN, | ||
53 | TILE_OPC_ADIFFB_U, | ||
54 | TILE_OPC_ADIFFB_U_SN, | ||
55 | TILE_OPC_ADIFFH, | ||
56 | TILE_OPC_ADIFFH_SN, | ||
57 | TILE_OPC_AND, | ||
58 | TILE_OPC_AND_SN, | ||
59 | TILE_OPC_ANDI, | ||
60 | TILE_OPC_ANDI_SN, | ||
61 | TILE_OPC_AULI, | ||
62 | TILE_OPC_AVGB_U, | ||
63 | TILE_OPC_AVGB_U_SN, | ||
64 | TILE_OPC_AVGH, | ||
65 | TILE_OPC_AVGH_SN, | ||
66 | TILE_OPC_BBNS, | ||
67 | TILE_OPC_BBNS_SN, | ||
68 | TILE_OPC_BBNST, | ||
69 | TILE_OPC_BBNST_SN, | ||
70 | TILE_OPC_BBS, | ||
71 | TILE_OPC_BBS_SN, | ||
72 | TILE_OPC_BBST, | ||
73 | TILE_OPC_BBST_SN, | ||
74 | TILE_OPC_BGEZ, | ||
75 | TILE_OPC_BGEZ_SN, | ||
76 | TILE_OPC_BGEZT, | ||
77 | TILE_OPC_BGEZT_SN, | ||
78 | TILE_OPC_BGZ, | ||
79 | TILE_OPC_BGZ_SN, | ||
80 | TILE_OPC_BGZT, | ||
81 | TILE_OPC_BGZT_SN, | ||
82 | TILE_OPC_BITX, | ||
83 | TILE_OPC_BITX_SN, | ||
84 | TILE_OPC_BLEZ, | ||
85 | TILE_OPC_BLEZ_SN, | ||
86 | TILE_OPC_BLEZT, | ||
87 | TILE_OPC_BLEZT_SN, | ||
88 | TILE_OPC_BLZ, | ||
89 | TILE_OPC_BLZ_SN, | ||
90 | TILE_OPC_BLZT, | ||
91 | TILE_OPC_BLZT_SN, | ||
92 | TILE_OPC_BNZ, | ||
93 | TILE_OPC_BNZ_SN, | ||
94 | TILE_OPC_BNZT, | ||
95 | TILE_OPC_BNZT_SN, | ||
96 | TILE_OPC_BYTEX, | ||
97 | TILE_OPC_BYTEX_SN, | ||
98 | TILE_OPC_BZ, | ||
99 | TILE_OPC_BZ_SN, | ||
100 | TILE_OPC_BZT, | ||
101 | TILE_OPC_BZT_SN, | ||
102 | TILE_OPC_CLZ, | ||
103 | TILE_OPC_CLZ_SN, | ||
104 | TILE_OPC_CRC32_32, | ||
105 | TILE_OPC_CRC32_32_SN, | ||
106 | TILE_OPC_CRC32_8, | ||
107 | TILE_OPC_CRC32_8_SN, | ||
108 | TILE_OPC_CTZ, | ||
109 | TILE_OPC_CTZ_SN, | ||
110 | TILE_OPC_DRAIN, | ||
111 | TILE_OPC_DTLBPR, | ||
112 | TILE_OPC_DWORD_ALIGN, | ||
113 | TILE_OPC_DWORD_ALIGN_SN, | ||
114 | TILE_OPC_FINV, | ||
115 | TILE_OPC_FLUSH, | ||
116 | TILE_OPC_FNOP, | ||
117 | TILE_OPC_ICOH, | ||
118 | TILE_OPC_ILL, | ||
119 | TILE_OPC_INTHB, | ||
120 | TILE_OPC_INTHB_SN, | ||
121 | TILE_OPC_INTHH, | ||
122 | TILE_OPC_INTHH_SN, | ||
123 | TILE_OPC_INTLB, | ||
124 | TILE_OPC_INTLB_SN, | ||
125 | TILE_OPC_INTLH, | ||
126 | TILE_OPC_INTLH_SN, | ||
127 | TILE_OPC_INV, | ||
128 | TILE_OPC_IRET, | ||
129 | TILE_OPC_JALB, | ||
130 | TILE_OPC_JALF, | ||
131 | TILE_OPC_JALR, | ||
132 | TILE_OPC_JALRP, | ||
133 | TILE_OPC_JB, | ||
134 | TILE_OPC_JF, | ||
135 | TILE_OPC_JR, | ||
136 | TILE_OPC_JRP, | ||
137 | TILE_OPC_LB, | ||
138 | TILE_OPC_LB_SN, | ||
139 | TILE_OPC_LB_U, | ||
140 | TILE_OPC_LB_U_SN, | ||
141 | TILE_OPC_LBADD, | ||
142 | TILE_OPC_LBADD_SN, | ||
143 | TILE_OPC_LBADD_U, | ||
144 | TILE_OPC_LBADD_U_SN, | ||
145 | TILE_OPC_LH, | ||
146 | TILE_OPC_LH_SN, | ||
147 | TILE_OPC_LH_U, | ||
148 | TILE_OPC_LH_U_SN, | ||
149 | TILE_OPC_LHADD, | ||
150 | TILE_OPC_LHADD_SN, | ||
151 | TILE_OPC_LHADD_U, | ||
152 | TILE_OPC_LHADD_U_SN, | ||
153 | TILE_OPC_LNK, | ||
154 | TILE_OPC_LNK_SN, | ||
155 | TILE_OPC_LW, | ||
156 | TILE_OPC_LW_SN, | ||
157 | TILE_OPC_LW_NA, | ||
158 | TILE_OPC_LW_NA_SN, | ||
159 | TILE_OPC_LWADD, | ||
160 | TILE_OPC_LWADD_SN, | ||
161 | TILE_OPC_LWADD_NA, | ||
162 | TILE_OPC_LWADD_NA_SN, | ||
163 | TILE_OPC_MAXB_U, | ||
164 | TILE_OPC_MAXB_U_SN, | ||
165 | TILE_OPC_MAXH, | ||
166 | TILE_OPC_MAXH_SN, | ||
167 | TILE_OPC_MAXIB_U, | ||
168 | TILE_OPC_MAXIB_U_SN, | ||
169 | TILE_OPC_MAXIH, | ||
170 | TILE_OPC_MAXIH_SN, | ||
171 | TILE_OPC_MF, | ||
172 | TILE_OPC_MFSPR, | ||
173 | TILE_OPC_MINB_U, | ||
174 | TILE_OPC_MINB_U_SN, | ||
175 | TILE_OPC_MINH, | ||
176 | TILE_OPC_MINH_SN, | ||
177 | TILE_OPC_MINIB_U, | ||
178 | TILE_OPC_MINIB_U_SN, | ||
179 | TILE_OPC_MINIH, | ||
180 | TILE_OPC_MINIH_SN, | ||
181 | TILE_OPC_MM, | ||
182 | TILE_OPC_MNZ, | ||
183 | TILE_OPC_MNZ_SN, | ||
184 | TILE_OPC_MNZB, | ||
185 | TILE_OPC_MNZB_SN, | ||
186 | TILE_OPC_MNZH, | ||
187 | TILE_OPC_MNZH_SN, | ||
188 | TILE_OPC_MTSPR, | ||
189 | TILE_OPC_MULHH_SS, | ||
190 | TILE_OPC_MULHH_SS_SN, | ||
191 | TILE_OPC_MULHH_SU, | ||
192 | TILE_OPC_MULHH_SU_SN, | ||
193 | TILE_OPC_MULHH_UU, | ||
194 | TILE_OPC_MULHH_UU_SN, | ||
195 | TILE_OPC_MULHHA_SS, | ||
196 | TILE_OPC_MULHHA_SS_SN, | ||
197 | TILE_OPC_MULHHA_SU, | ||
198 | TILE_OPC_MULHHA_SU_SN, | ||
199 | TILE_OPC_MULHHA_UU, | ||
200 | TILE_OPC_MULHHA_UU_SN, | ||
201 | TILE_OPC_MULHHSA_UU, | ||
202 | TILE_OPC_MULHHSA_UU_SN, | ||
203 | TILE_OPC_MULHL_SS, | ||
204 | TILE_OPC_MULHL_SS_SN, | ||
205 | TILE_OPC_MULHL_SU, | ||
206 | TILE_OPC_MULHL_SU_SN, | ||
207 | TILE_OPC_MULHL_US, | ||
208 | TILE_OPC_MULHL_US_SN, | ||
209 | TILE_OPC_MULHL_UU, | ||
210 | TILE_OPC_MULHL_UU_SN, | ||
211 | TILE_OPC_MULHLA_SS, | ||
212 | TILE_OPC_MULHLA_SS_SN, | ||
213 | TILE_OPC_MULHLA_SU, | ||
214 | TILE_OPC_MULHLA_SU_SN, | ||
215 | TILE_OPC_MULHLA_US, | ||
216 | TILE_OPC_MULHLA_US_SN, | ||
217 | TILE_OPC_MULHLA_UU, | ||
218 | TILE_OPC_MULHLA_UU_SN, | ||
219 | TILE_OPC_MULHLSA_UU, | ||
220 | TILE_OPC_MULHLSA_UU_SN, | ||
221 | TILE_OPC_MULLL_SS, | ||
222 | TILE_OPC_MULLL_SS_SN, | ||
223 | TILE_OPC_MULLL_SU, | ||
224 | TILE_OPC_MULLL_SU_SN, | ||
225 | TILE_OPC_MULLL_UU, | ||
226 | TILE_OPC_MULLL_UU_SN, | ||
227 | TILE_OPC_MULLLA_SS, | ||
228 | TILE_OPC_MULLLA_SS_SN, | ||
229 | TILE_OPC_MULLLA_SU, | ||
230 | TILE_OPC_MULLLA_SU_SN, | ||
231 | TILE_OPC_MULLLA_UU, | ||
232 | TILE_OPC_MULLLA_UU_SN, | ||
233 | TILE_OPC_MULLLSA_UU, | ||
234 | TILE_OPC_MULLLSA_UU_SN, | ||
235 | TILE_OPC_MVNZ, | ||
236 | TILE_OPC_MVNZ_SN, | ||
237 | TILE_OPC_MVZ, | ||
238 | TILE_OPC_MVZ_SN, | ||
239 | TILE_OPC_MZ, | ||
240 | TILE_OPC_MZ_SN, | ||
241 | TILE_OPC_MZB, | ||
242 | TILE_OPC_MZB_SN, | ||
243 | TILE_OPC_MZH, | ||
244 | TILE_OPC_MZH_SN, | ||
245 | TILE_OPC_NAP, | ||
246 | TILE_OPC_NOP, | ||
247 | TILE_OPC_NOR, | ||
248 | TILE_OPC_NOR_SN, | ||
249 | TILE_OPC_OR, | ||
250 | TILE_OPC_OR_SN, | ||
251 | TILE_OPC_ORI, | ||
252 | TILE_OPC_ORI_SN, | ||
253 | TILE_OPC_PACKBS_U, | ||
254 | TILE_OPC_PACKBS_U_SN, | ||
255 | TILE_OPC_PACKHB, | ||
256 | TILE_OPC_PACKHB_SN, | ||
257 | TILE_OPC_PACKHS, | ||
258 | TILE_OPC_PACKHS_SN, | ||
259 | TILE_OPC_PACKLB, | ||
260 | TILE_OPC_PACKLB_SN, | ||
261 | TILE_OPC_PCNT, | ||
262 | TILE_OPC_PCNT_SN, | ||
263 | TILE_OPC_RL, | ||
264 | TILE_OPC_RL_SN, | ||
265 | TILE_OPC_RLI, | ||
266 | TILE_OPC_RLI_SN, | ||
267 | TILE_OPC_S1A, | ||
268 | TILE_OPC_S1A_SN, | ||
269 | TILE_OPC_S2A, | ||
270 | TILE_OPC_S2A_SN, | ||
271 | TILE_OPC_S3A, | ||
272 | TILE_OPC_S3A_SN, | ||
273 | TILE_OPC_SADAB_U, | ||
274 | TILE_OPC_SADAB_U_SN, | ||
275 | TILE_OPC_SADAH, | ||
276 | TILE_OPC_SADAH_SN, | ||
277 | TILE_OPC_SADAH_U, | ||
278 | TILE_OPC_SADAH_U_SN, | ||
279 | TILE_OPC_SADB_U, | ||
280 | TILE_OPC_SADB_U_SN, | ||
281 | TILE_OPC_SADH, | ||
282 | TILE_OPC_SADH_SN, | ||
283 | TILE_OPC_SADH_U, | ||
284 | TILE_OPC_SADH_U_SN, | ||
285 | TILE_OPC_SB, | ||
286 | TILE_OPC_SBADD, | ||
287 | TILE_OPC_SEQ, | ||
288 | TILE_OPC_SEQ_SN, | ||
289 | TILE_OPC_SEQB, | ||
290 | TILE_OPC_SEQB_SN, | ||
291 | TILE_OPC_SEQH, | ||
292 | TILE_OPC_SEQH_SN, | ||
293 | TILE_OPC_SEQI, | ||
294 | TILE_OPC_SEQI_SN, | ||
295 | TILE_OPC_SEQIB, | ||
296 | TILE_OPC_SEQIB_SN, | ||
297 | TILE_OPC_SEQIH, | ||
298 | TILE_OPC_SEQIH_SN, | ||
299 | TILE_OPC_SH, | ||
300 | TILE_OPC_SHADD, | ||
301 | TILE_OPC_SHL, | ||
302 | TILE_OPC_SHL_SN, | ||
303 | TILE_OPC_SHLB, | ||
304 | TILE_OPC_SHLB_SN, | ||
305 | TILE_OPC_SHLH, | ||
306 | TILE_OPC_SHLH_SN, | ||
307 | TILE_OPC_SHLI, | ||
308 | TILE_OPC_SHLI_SN, | ||
309 | TILE_OPC_SHLIB, | ||
310 | TILE_OPC_SHLIB_SN, | ||
311 | TILE_OPC_SHLIH, | ||
312 | TILE_OPC_SHLIH_SN, | ||
313 | TILE_OPC_SHR, | ||
314 | TILE_OPC_SHR_SN, | ||
315 | TILE_OPC_SHRB, | ||
316 | TILE_OPC_SHRB_SN, | ||
317 | TILE_OPC_SHRH, | ||
318 | TILE_OPC_SHRH_SN, | ||
319 | TILE_OPC_SHRI, | ||
320 | TILE_OPC_SHRI_SN, | ||
321 | TILE_OPC_SHRIB, | ||
322 | TILE_OPC_SHRIB_SN, | ||
323 | TILE_OPC_SHRIH, | ||
324 | TILE_OPC_SHRIH_SN, | ||
325 | TILE_OPC_SLT, | ||
326 | TILE_OPC_SLT_SN, | ||
327 | TILE_OPC_SLT_U, | ||
328 | TILE_OPC_SLT_U_SN, | ||
329 | TILE_OPC_SLTB, | ||
330 | TILE_OPC_SLTB_SN, | ||
331 | TILE_OPC_SLTB_U, | ||
332 | TILE_OPC_SLTB_U_SN, | ||
333 | TILE_OPC_SLTE, | ||
334 | TILE_OPC_SLTE_SN, | ||
335 | TILE_OPC_SLTE_U, | ||
336 | TILE_OPC_SLTE_U_SN, | ||
337 | TILE_OPC_SLTEB, | ||
338 | TILE_OPC_SLTEB_SN, | ||
339 | TILE_OPC_SLTEB_U, | ||
340 | TILE_OPC_SLTEB_U_SN, | ||
341 | TILE_OPC_SLTEH, | ||
342 | TILE_OPC_SLTEH_SN, | ||
343 | TILE_OPC_SLTEH_U, | ||
344 | TILE_OPC_SLTEH_U_SN, | ||
345 | TILE_OPC_SLTH, | ||
346 | TILE_OPC_SLTH_SN, | ||
347 | TILE_OPC_SLTH_U, | ||
348 | TILE_OPC_SLTH_U_SN, | ||
349 | TILE_OPC_SLTI, | ||
350 | TILE_OPC_SLTI_SN, | ||
351 | TILE_OPC_SLTI_U, | ||
352 | TILE_OPC_SLTI_U_SN, | ||
353 | TILE_OPC_SLTIB, | ||
354 | TILE_OPC_SLTIB_SN, | ||
355 | TILE_OPC_SLTIB_U, | ||
356 | TILE_OPC_SLTIB_U_SN, | ||
357 | TILE_OPC_SLTIH, | ||
358 | TILE_OPC_SLTIH_SN, | ||
359 | TILE_OPC_SLTIH_U, | ||
360 | TILE_OPC_SLTIH_U_SN, | ||
361 | TILE_OPC_SNE, | ||
362 | TILE_OPC_SNE_SN, | ||
363 | TILE_OPC_SNEB, | ||
364 | TILE_OPC_SNEB_SN, | ||
365 | TILE_OPC_SNEH, | ||
366 | TILE_OPC_SNEH_SN, | ||
367 | TILE_OPC_SRA, | ||
368 | TILE_OPC_SRA_SN, | ||
369 | TILE_OPC_SRAB, | ||
370 | TILE_OPC_SRAB_SN, | ||
371 | TILE_OPC_SRAH, | ||
372 | TILE_OPC_SRAH_SN, | ||
373 | TILE_OPC_SRAI, | ||
374 | TILE_OPC_SRAI_SN, | ||
375 | TILE_OPC_SRAIB, | ||
376 | TILE_OPC_SRAIB_SN, | ||
377 | TILE_OPC_SRAIH, | ||
378 | TILE_OPC_SRAIH_SN, | ||
379 | TILE_OPC_SUB, | ||
380 | TILE_OPC_SUB_SN, | ||
381 | TILE_OPC_SUBB, | ||
382 | TILE_OPC_SUBB_SN, | ||
383 | TILE_OPC_SUBBS_U, | ||
384 | TILE_OPC_SUBBS_U_SN, | ||
385 | TILE_OPC_SUBH, | ||
386 | TILE_OPC_SUBH_SN, | ||
387 | TILE_OPC_SUBHS, | ||
388 | TILE_OPC_SUBHS_SN, | ||
389 | TILE_OPC_SUBS, | ||
390 | TILE_OPC_SUBS_SN, | ||
391 | TILE_OPC_SW, | ||
392 | TILE_OPC_SWADD, | ||
393 | TILE_OPC_SWINT0, | ||
394 | TILE_OPC_SWINT1, | ||
395 | TILE_OPC_SWINT2, | ||
396 | TILE_OPC_SWINT3, | ||
397 | TILE_OPC_TBLIDXB0, | ||
398 | TILE_OPC_TBLIDXB0_SN, | ||
399 | TILE_OPC_TBLIDXB1, | ||
400 | TILE_OPC_TBLIDXB1_SN, | ||
401 | TILE_OPC_TBLIDXB2, | ||
402 | TILE_OPC_TBLIDXB2_SN, | ||
403 | TILE_OPC_TBLIDXB3, | ||
404 | TILE_OPC_TBLIDXB3_SN, | ||
405 | TILE_OPC_TNS, | ||
406 | TILE_OPC_TNS_SN, | ||
407 | TILE_OPC_WH64, | ||
408 | TILE_OPC_XOR, | ||
409 | TILE_OPC_XOR_SN, | ||
410 | TILE_OPC_XORI, | ||
411 | TILE_OPC_XORI_SN, | ||
412 | TILE_OPC_NONE | ||
413 | } tile_mnemonic; | ||
414 | |||
415 | /* 64-bit pattern for a { bpt ; nop } bundle. */ | ||
416 | #define TILE_BPT_BUNDLE 0x400b3cae70166000ULL | ||
417 | |||
418 | |||
419 | #define TILE_ELF_MACHINE_CODE EM_TILEPRO | ||
420 | |||
421 | #define TILE_ELF_NAME "elf32-tilepro" | ||
422 | |||
423 | |||
424 | static __inline unsigned int | ||
425 | get_BrOff_SN(tile_bundle_bits num) | ||
426 | { | ||
427 | const unsigned int n = (unsigned int)num; | ||
428 | return (((n >> 0)) & 0x3ff); | ||
429 | } | ||
430 | |||
431 | static __inline unsigned int | ||
432 | get_BrOff_X1(tile_bundle_bits n) | ||
433 | { | ||
434 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
435 | (((unsigned int)(n >> 20)) & 0x00018000); | ||
436 | } | ||
437 | |||
438 | static __inline unsigned int | ||
439 | get_BrType_X1(tile_bundle_bits n) | ||
440 | { | ||
441 | return (((unsigned int)(n >> 31)) & 0xf); | ||
442 | } | ||
443 | |||
444 | static __inline unsigned int | ||
445 | get_Dest_Imm8_X1(tile_bundle_bits n) | ||
446 | { | ||
447 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
448 | (((unsigned int)(n >> 43)) & 0x000000c0); | ||
449 | } | ||
450 | |||
451 | static __inline unsigned int | ||
452 | get_Dest_SN(tile_bundle_bits num) | ||
453 | { | ||
454 | const unsigned int n = (unsigned int)num; | ||
455 | return (((n >> 2)) & 0x3); | ||
456 | } | ||
457 | |||
458 | static __inline unsigned int | ||
459 | get_Dest_X0(tile_bundle_bits num) | ||
460 | { | ||
461 | const unsigned int n = (unsigned int)num; | ||
462 | return (((n >> 0)) & 0x3f); | ||
463 | } | ||
464 | |||
465 | static __inline unsigned int | ||
466 | get_Dest_X1(tile_bundle_bits n) | ||
467 | { | ||
468 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
469 | } | ||
470 | |||
471 | static __inline unsigned int | ||
472 | get_Dest_Y0(tile_bundle_bits num) | ||
473 | { | ||
474 | const unsigned int n = (unsigned int)num; | ||
475 | return (((n >> 0)) & 0x3f); | ||
476 | } | ||
477 | |||
478 | static __inline unsigned int | ||
479 | get_Dest_Y1(tile_bundle_bits n) | ||
480 | { | ||
481 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
482 | } | ||
483 | |||
484 | static __inline unsigned int | ||
485 | get_Imm16_X0(tile_bundle_bits num) | ||
486 | { | ||
487 | const unsigned int n = (unsigned int)num; | ||
488 | return (((n >> 12)) & 0xffff); | ||
489 | } | ||
490 | |||
491 | static __inline unsigned int | ||
492 | get_Imm16_X1(tile_bundle_bits n) | ||
493 | { | ||
494 | return (((unsigned int)(n >> 43)) & 0xffff); | ||
495 | } | ||
496 | |||
497 | static __inline unsigned int | ||
498 | get_Imm8_SN(tile_bundle_bits num) | ||
499 | { | ||
500 | const unsigned int n = (unsigned int)num; | ||
501 | return (((n >> 0)) & 0xff); | ||
502 | } | ||
503 | |||
504 | static __inline unsigned int | ||
505 | get_Imm8_X0(tile_bundle_bits num) | ||
506 | { | ||
507 | const unsigned int n = (unsigned int)num; | ||
508 | return (((n >> 12)) & 0xff); | ||
509 | } | ||
510 | |||
511 | static __inline unsigned int | ||
512 | get_Imm8_X1(tile_bundle_bits n) | ||
513 | { | ||
514 | return (((unsigned int)(n >> 43)) & 0xff); | ||
515 | } | ||
516 | |||
517 | static __inline unsigned int | ||
518 | get_Imm8_Y0(tile_bundle_bits num) | ||
519 | { | ||
520 | const unsigned int n = (unsigned int)num; | ||
521 | return (((n >> 12)) & 0xff); | ||
522 | } | ||
523 | |||
524 | static __inline unsigned int | ||
525 | get_Imm8_Y1(tile_bundle_bits n) | ||
526 | { | ||
527 | return (((unsigned int)(n >> 43)) & 0xff); | ||
528 | } | ||
529 | |||
530 | static __inline unsigned int | ||
531 | get_ImmOpcodeExtension_X0(tile_bundle_bits num) | ||
532 | { | ||
533 | const unsigned int n = (unsigned int)num; | ||
534 | return (((n >> 20)) & 0x7f); | ||
535 | } | ||
536 | |||
537 | static __inline unsigned int | ||
538 | get_ImmOpcodeExtension_X1(tile_bundle_bits n) | ||
539 | { | ||
540 | return (((unsigned int)(n >> 51)) & 0x7f); | ||
541 | } | ||
542 | |||
543 | static __inline unsigned int | ||
544 | get_ImmRROpcodeExtension_SN(tile_bundle_bits num) | ||
545 | { | ||
546 | const unsigned int n = (unsigned int)num; | ||
547 | return (((n >> 8)) & 0x3); | ||
548 | } | ||
549 | |||
550 | static __inline unsigned int | ||
551 | get_JOffLong_X1(tile_bundle_bits n) | ||
552 | { | ||
553 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
554 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
555 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
556 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
557 | (((unsigned int)(n >> 31)) & 0x18000000); | ||
558 | } | ||
559 | |||
560 | static __inline unsigned int | ||
561 | get_JOff_X1(tile_bundle_bits n) | ||
562 | { | ||
563 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
564 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
565 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
566 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
567 | (((unsigned int)(n >> 31)) & 0x08000000); | ||
568 | } | ||
569 | |||
570 | static __inline unsigned int | ||
571 | get_MF_Imm15_X1(tile_bundle_bits n) | ||
572 | { | ||
573 | return (((unsigned int)(n >> 37)) & 0x00003fff) | | ||
574 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
575 | } | ||
576 | |||
577 | static __inline unsigned int | ||
578 | get_MMEnd_X0(tile_bundle_bits num) | ||
579 | { | ||
580 | const unsigned int n = (unsigned int)num; | ||
581 | return (((n >> 18)) & 0x1f); | ||
582 | } | ||
583 | |||
584 | static __inline unsigned int | ||
585 | get_MMEnd_X1(tile_bundle_bits n) | ||
586 | { | ||
587 | return (((unsigned int)(n >> 49)) & 0x1f); | ||
588 | } | ||
589 | |||
590 | static __inline unsigned int | ||
591 | get_MMStart_X0(tile_bundle_bits num) | ||
592 | { | ||
593 | const unsigned int n = (unsigned int)num; | ||
594 | return (((n >> 23)) & 0x1f); | ||
595 | } | ||
596 | |||
597 | static __inline unsigned int | ||
598 | get_MMStart_X1(tile_bundle_bits n) | ||
599 | { | ||
600 | return (((unsigned int)(n >> 54)) & 0x1f); | ||
601 | } | ||
602 | |||
603 | static __inline unsigned int | ||
604 | get_MT_Imm15_X1(tile_bundle_bits n) | ||
605 | { | ||
606 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
607 | (((unsigned int)(n >> 37)) & 0x00003fc0) | | ||
608 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
609 | } | ||
610 | |||
611 | static __inline unsigned int | ||
612 | get_Mode(tile_bundle_bits n) | ||
613 | { | ||
614 | return (((unsigned int)(n >> 63)) & 0x1); | ||
615 | } | ||
616 | |||
617 | static __inline unsigned int | ||
618 | get_NoRegOpcodeExtension_SN(tile_bundle_bits num) | ||
619 | { | ||
620 | const unsigned int n = (unsigned int)num; | ||
621 | return (((n >> 0)) & 0xf); | ||
622 | } | ||
623 | |||
624 | static __inline unsigned int | ||
625 | get_Opcode_SN(tile_bundle_bits num) | ||
626 | { | ||
627 | const unsigned int n = (unsigned int)num; | ||
628 | return (((n >> 10)) & 0x3f); | ||
629 | } | ||
630 | |||
631 | static __inline unsigned int | ||
632 | get_Opcode_X0(tile_bundle_bits num) | ||
633 | { | ||
634 | const unsigned int n = (unsigned int)num; | ||
635 | return (((n >> 28)) & 0x7); | ||
636 | } | ||
637 | |||
638 | static __inline unsigned int | ||
639 | get_Opcode_X1(tile_bundle_bits n) | ||
640 | { | ||
641 | return (((unsigned int)(n >> 59)) & 0xf); | ||
642 | } | ||
643 | |||
644 | static __inline unsigned int | ||
645 | get_Opcode_Y0(tile_bundle_bits num) | ||
646 | { | ||
647 | const unsigned int n = (unsigned int)num; | ||
648 | return (((n >> 27)) & 0xf); | ||
649 | } | ||
650 | |||
651 | static __inline unsigned int | ||
652 | get_Opcode_Y1(tile_bundle_bits n) | ||
653 | { | ||
654 | return (((unsigned int)(n >> 59)) & 0xf); | ||
655 | } | ||
656 | |||
657 | static __inline unsigned int | ||
658 | get_Opcode_Y2(tile_bundle_bits n) | ||
659 | { | ||
660 | return (((unsigned int)(n >> 56)) & 0x7); | ||
661 | } | ||
662 | |||
663 | static __inline unsigned int | ||
664 | get_RROpcodeExtension_SN(tile_bundle_bits num) | ||
665 | { | ||
666 | const unsigned int n = (unsigned int)num; | ||
667 | return (((n >> 4)) & 0xf); | ||
668 | } | ||
669 | |||
670 | static __inline unsigned int | ||
671 | get_RRROpcodeExtension_X0(tile_bundle_bits num) | ||
672 | { | ||
673 | const unsigned int n = (unsigned int)num; | ||
674 | return (((n >> 18)) & 0x1ff); | ||
675 | } | ||
676 | |||
677 | static __inline unsigned int | ||
678 | get_RRROpcodeExtension_X1(tile_bundle_bits n) | ||
679 | { | ||
680 | return (((unsigned int)(n >> 49)) & 0x1ff); | ||
681 | } | ||
682 | |||
683 | static __inline unsigned int | ||
684 | get_RRROpcodeExtension_Y0(tile_bundle_bits num) | ||
685 | { | ||
686 | const unsigned int n = (unsigned int)num; | ||
687 | return (((n >> 18)) & 0x3); | ||
688 | } | ||
689 | |||
690 | static __inline unsigned int | ||
691 | get_RRROpcodeExtension_Y1(tile_bundle_bits n) | ||
692 | { | ||
693 | return (((unsigned int)(n >> 49)) & 0x3); | ||
694 | } | ||
695 | |||
696 | static __inline unsigned int | ||
697 | get_RouteOpcodeExtension_SN(tile_bundle_bits num) | ||
698 | { | ||
699 | const unsigned int n = (unsigned int)num; | ||
700 | return (((n >> 0)) & 0x3ff); | ||
701 | } | ||
702 | |||
703 | static __inline unsigned int | ||
704 | get_S_X0(tile_bundle_bits num) | ||
705 | { | ||
706 | const unsigned int n = (unsigned int)num; | ||
707 | return (((n >> 27)) & 0x1); | ||
708 | } | ||
709 | |||
710 | static __inline unsigned int | ||
711 | get_S_X1(tile_bundle_bits n) | ||
712 | { | ||
713 | return (((unsigned int)(n >> 58)) & 0x1); | ||
714 | } | ||
715 | |||
716 | static __inline unsigned int | ||
717 | get_ShAmt_X0(tile_bundle_bits num) | ||
718 | { | ||
719 | const unsigned int n = (unsigned int)num; | ||
720 | return (((n >> 12)) & 0x1f); | ||
721 | } | ||
722 | |||
723 | static __inline unsigned int | ||
724 | get_ShAmt_X1(tile_bundle_bits n) | ||
725 | { | ||
726 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
727 | } | ||
728 | |||
729 | static __inline unsigned int | ||
730 | get_ShAmt_Y0(tile_bundle_bits num) | ||
731 | { | ||
732 | const unsigned int n = (unsigned int)num; | ||
733 | return (((n >> 12)) & 0x1f); | ||
734 | } | ||
735 | |||
736 | static __inline unsigned int | ||
737 | get_ShAmt_Y1(tile_bundle_bits n) | ||
738 | { | ||
739 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
740 | } | ||
741 | |||
742 | static __inline unsigned int | ||
743 | get_SrcA_X0(tile_bundle_bits num) | ||
744 | { | ||
745 | const unsigned int n = (unsigned int)num; | ||
746 | return (((n >> 6)) & 0x3f); | ||
747 | } | ||
748 | |||
749 | static __inline unsigned int | ||
750 | get_SrcA_X1(tile_bundle_bits n) | ||
751 | { | ||
752 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
753 | } | ||
754 | |||
755 | static __inline unsigned int | ||
756 | get_SrcA_Y0(tile_bundle_bits num) | ||
757 | { | ||
758 | const unsigned int n = (unsigned int)num; | ||
759 | return (((n >> 6)) & 0x3f); | ||
760 | } | ||
761 | |||
762 | static __inline unsigned int | ||
763 | get_SrcA_Y1(tile_bundle_bits n) | ||
764 | { | ||
765 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
766 | } | ||
767 | |||
768 | static __inline unsigned int | ||
769 | get_SrcA_Y2(tile_bundle_bits n) | ||
770 | { | ||
771 | return (((n >> 26)) & 0x00000001) | | ||
772 | (((unsigned int)(n >> 50)) & 0x0000003e); | ||
773 | } | ||
774 | |||
775 | static __inline unsigned int | ||
776 | get_SrcBDest_Y2(tile_bundle_bits num) | ||
777 | { | ||
778 | const unsigned int n = (unsigned int)num; | ||
779 | return (((n >> 20)) & 0x3f); | ||
780 | } | ||
781 | |||
782 | static __inline unsigned int | ||
783 | get_SrcB_X0(tile_bundle_bits num) | ||
784 | { | ||
785 | const unsigned int n = (unsigned int)num; | ||
786 | return (((n >> 12)) & 0x3f); | ||
787 | } | ||
788 | |||
789 | static __inline unsigned int | ||
790 | get_SrcB_X1(tile_bundle_bits n) | ||
791 | { | ||
792 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
793 | } | ||
794 | |||
795 | static __inline unsigned int | ||
796 | get_SrcB_Y0(tile_bundle_bits num) | ||
797 | { | ||
798 | const unsigned int n = (unsigned int)num; | ||
799 | return (((n >> 12)) & 0x3f); | ||
800 | } | ||
801 | |||
802 | static __inline unsigned int | ||
803 | get_SrcB_Y1(tile_bundle_bits n) | ||
804 | { | ||
805 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
806 | } | ||
807 | |||
808 | static __inline unsigned int | ||
809 | get_Src_SN(tile_bundle_bits num) | ||
810 | { | ||
811 | const unsigned int n = (unsigned int)num; | ||
812 | return (((n >> 0)) & 0x3); | ||
813 | } | ||
814 | |||
815 | static __inline unsigned int | ||
816 | get_UnOpcodeExtension_X0(tile_bundle_bits num) | ||
817 | { | ||
818 | const unsigned int n = (unsigned int)num; | ||
819 | return (((n >> 12)) & 0x1f); | ||
820 | } | ||
821 | |||
822 | static __inline unsigned int | ||
823 | get_UnOpcodeExtension_X1(tile_bundle_bits n) | ||
824 | { | ||
825 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
826 | } | ||
827 | |||
828 | static __inline unsigned int | ||
829 | get_UnOpcodeExtension_Y0(tile_bundle_bits num) | ||
830 | { | ||
831 | const unsigned int n = (unsigned int)num; | ||
832 | return (((n >> 12)) & 0x1f); | ||
833 | } | ||
834 | |||
835 | static __inline unsigned int | ||
836 | get_UnOpcodeExtension_Y1(tile_bundle_bits n) | ||
837 | { | ||
838 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
839 | } | ||
840 | |||
841 | static __inline unsigned int | ||
842 | get_UnShOpcodeExtension_X0(tile_bundle_bits num) | ||
843 | { | ||
844 | const unsigned int n = (unsigned int)num; | ||
845 | return (((n >> 17)) & 0x3ff); | ||
846 | } | ||
847 | |||
848 | static __inline unsigned int | ||
849 | get_UnShOpcodeExtension_X1(tile_bundle_bits n) | ||
850 | { | ||
851 | return (((unsigned int)(n >> 48)) & 0x3ff); | ||
852 | } | ||
853 | |||
854 | static __inline unsigned int | ||
855 | get_UnShOpcodeExtension_Y0(tile_bundle_bits num) | ||
856 | { | ||
857 | const unsigned int n = (unsigned int)num; | ||
858 | return (((n >> 17)) & 0x7); | ||
859 | } | ||
860 | |||
861 | static __inline unsigned int | ||
862 | get_UnShOpcodeExtension_Y1(tile_bundle_bits n) | ||
863 | { | ||
864 | return (((unsigned int)(n >> 48)) & 0x7); | ||
865 | } | ||
866 | |||
867 | |||
868 | static __inline int | ||
869 | sign_extend(int n, int num_bits) | ||
870 | { | ||
871 | int shift = (int)(sizeof(int) * 8 - num_bits); | ||
872 | return (n << shift) >> shift; | ||
873 | } | ||
874 | |||
875 | |||
876 | |||
877 | static __inline tile_bundle_bits | ||
878 | create_BrOff_SN(int num) | ||
879 | { | ||
880 | const unsigned int n = (unsigned int)num; | ||
881 | return ((n & 0x3ff) << 0); | ||
882 | } | ||
883 | |||
884 | static __inline tile_bundle_bits | ||
885 | create_BrOff_X1(int num) | ||
886 | { | ||
887 | const unsigned int n = (unsigned int)num; | ||
888 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
889 | (((tile_bundle_bits)(n & 0x00018000)) << 20); | ||
890 | } | ||
891 | |||
892 | static __inline tile_bundle_bits | ||
893 | create_BrType_X1(int num) | ||
894 | { | ||
895 | const unsigned int n = (unsigned int)num; | ||
896 | return (((tile_bundle_bits)(n & 0xf)) << 31); | ||
897 | } | ||
898 | |||
899 | static __inline tile_bundle_bits | ||
900 | create_Dest_Imm8_X1(int num) | ||
901 | { | ||
902 | const unsigned int n = (unsigned int)num; | ||
903 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
904 | (((tile_bundle_bits)(n & 0x000000c0)) << 43); | ||
905 | } | ||
906 | |||
907 | static __inline tile_bundle_bits | ||
908 | create_Dest_SN(int num) | ||
909 | { | ||
910 | const unsigned int n = (unsigned int)num; | ||
911 | return ((n & 0x3) << 2); | ||
912 | } | ||
913 | |||
914 | static __inline tile_bundle_bits | ||
915 | create_Dest_X0(int num) | ||
916 | { | ||
917 | const unsigned int n = (unsigned int)num; | ||
918 | return ((n & 0x3f) << 0); | ||
919 | } | ||
920 | |||
921 | static __inline tile_bundle_bits | ||
922 | create_Dest_X1(int num) | ||
923 | { | ||
924 | const unsigned int n = (unsigned int)num; | ||
925 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
926 | } | ||
927 | |||
928 | static __inline tile_bundle_bits | ||
929 | create_Dest_Y0(int num) | ||
930 | { | ||
931 | const unsigned int n = (unsigned int)num; | ||
932 | return ((n & 0x3f) << 0); | ||
933 | } | ||
934 | |||
935 | static __inline tile_bundle_bits | ||
936 | create_Dest_Y1(int num) | ||
937 | { | ||
938 | const unsigned int n = (unsigned int)num; | ||
939 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
940 | } | ||
941 | |||
942 | static __inline tile_bundle_bits | ||
943 | create_Imm16_X0(int num) | ||
944 | { | ||
945 | const unsigned int n = (unsigned int)num; | ||
946 | return ((n & 0xffff) << 12); | ||
947 | } | ||
948 | |||
949 | static __inline tile_bundle_bits | ||
950 | create_Imm16_X1(int num) | ||
951 | { | ||
952 | const unsigned int n = (unsigned int)num; | ||
953 | return (((tile_bundle_bits)(n & 0xffff)) << 43); | ||
954 | } | ||
955 | |||
956 | static __inline tile_bundle_bits | ||
957 | create_Imm8_SN(int num) | ||
958 | { | ||
959 | const unsigned int n = (unsigned int)num; | ||
960 | return ((n & 0xff) << 0); | ||
961 | } | ||
962 | |||
963 | static __inline tile_bundle_bits | ||
964 | create_Imm8_X0(int num) | ||
965 | { | ||
966 | const unsigned int n = (unsigned int)num; | ||
967 | return ((n & 0xff) << 12); | ||
968 | } | ||
969 | |||
970 | static __inline tile_bundle_bits | ||
971 | create_Imm8_X1(int num) | ||
972 | { | ||
973 | const unsigned int n = (unsigned int)num; | ||
974 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
975 | } | ||
976 | |||
977 | static __inline tile_bundle_bits | ||
978 | create_Imm8_Y0(int num) | ||
979 | { | ||
980 | const unsigned int n = (unsigned int)num; | ||
981 | return ((n & 0xff) << 12); | ||
982 | } | ||
983 | |||
984 | static __inline tile_bundle_bits | ||
985 | create_Imm8_Y1(int num) | ||
986 | { | ||
987 | const unsigned int n = (unsigned int)num; | ||
988 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
989 | } | ||
990 | |||
991 | static __inline tile_bundle_bits | ||
992 | create_ImmOpcodeExtension_X0(int num) | ||
993 | { | ||
994 | const unsigned int n = (unsigned int)num; | ||
995 | return ((n & 0x7f) << 20); | ||
996 | } | ||
997 | |||
998 | static __inline tile_bundle_bits | ||
999 | create_ImmOpcodeExtension_X1(int num) | ||
1000 | { | ||
1001 | const unsigned int n = (unsigned int)num; | ||
1002 | return (((tile_bundle_bits)(n & 0x7f)) << 51); | ||
1003 | } | ||
1004 | |||
1005 | static __inline tile_bundle_bits | ||
1006 | create_ImmRROpcodeExtension_SN(int num) | ||
1007 | { | ||
1008 | const unsigned int n = (unsigned int)num; | ||
1009 | return ((n & 0x3) << 8); | ||
1010 | } | ||
1011 | |||
1012 | static __inline tile_bundle_bits | ||
1013 | create_JOffLong_X1(int num) | ||
1014 | { | ||
1015 | const unsigned int n = (unsigned int)num; | ||
1016 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1017 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1018 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1019 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1020 | (((tile_bundle_bits)(n & 0x18000000)) << 31); | ||
1021 | } | ||
1022 | |||
1023 | static __inline tile_bundle_bits | ||
1024 | create_JOff_X1(int num) | ||
1025 | { | ||
1026 | const unsigned int n = (unsigned int)num; | ||
1027 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1028 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1029 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1030 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1031 | (((tile_bundle_bits)(n & 0x08000000)) << 31); | ||
1032 | } | ||
1033 | |||
1034 | static __inline tile_bundle_bits | ||
1035 | create_MF_Imm15_X1(int num) | ||
1036 | { | ||
1037 | const unsigned int n = (unsigned int)num; | ||
1038 | return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | | ||
1039 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1040 | } | ||
1041 | |||
1042 | static __inline tile_bundle_bits | ||
1043 | create_MMEnd_X0(int num) | ||
1044 | { | ||
1045 | const unsigned int n = (unsigned int)num; | ||
1046 | return ((n & 0x1f) << 18); | ||
1047 | } | ||
1048 | |||
1049 | static __inline tile_bundle_bits | ||
1050 | create_MMEnd_X1(int num) | ||
1051 | { | ||
1052 | const unsigned int n = (unsigned int)num; | ||
1053 | return (((tile_bundle_bits)(n & 0x1f)) << 49); | ||
1054 | } | ||
1055 | |||
1056 | static __inline tile_bundle_bits | ||
1057 | create_MMStart_X0(int num) | ||
1058 | { | ||
1059 | const unsigned int n = (unsigned int)num; | ||
1060 | return ((n & 0x1f) << 23); | ||
1061 | } | ||
1062 | |||
1063 | static __inline tile_bundle_bits | ||
1064 | create_MMStart_X1(int num) | ||
1065 | { | ||
1066 | const unsigned int n = (unsigned int)num; | ||
1067 | return (((tile_bundle_bits)(n & 0x1f)) << 54); | ||
1068 | } | ||
1069 | |||
1070 | static __inline tile_bundle_bits | ||
1071 | create_MT_Imm15_X1(int num) | ||
1072 | { | ||
1073 | const unsigned int n = (unsigned int)num; | ||
1074 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
1075 | (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | | ||
1076 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1077 | } | ||
1078 | |||
1079 | static __inline tile_bundle_bits | ||
1080 | create_Mode(int num) | ||
1081 | { | ||
1082 | const unsigned int n = (unsigned int)num; | ||
1083 | return (((tile_bundle_bits)(n & 0x1)) << 63); | ||
1084 | } | ||
1085 | |||
1086 | static __inline tile_bundle_bits | ||
1087 | create_NoRegOpcodeExtension_SN(int num) | ||
1088 | { | ||
1089 | const unsigned int n = (unsigned int)num; | ||
1090 | return ((n & 0xf) << 0); | ||
1091 | } | ||
1092 | |||
1093 | static __inline tile_bundle_bits | ||
1094 | create_Opcode_SN(int num) | ||
1095 | { | ||
1096 | const unsigned int n = (unsigned int)num; | ||
1097 | return ((n & 0x3f) << 10); | ||
1098 | } | ||
1099 | |||
1100 | static __inline tile_bundle_bits | ||
1101 | create_Opcode_X0(int num) | ||
1102 | { | ||
1103 | const unsigned int n = (unsigned int)num; | ||
1104 | return ((n & 0x7) << 28); | ||
1105 | } | ||
1106 | |||
1107 | static __inline tile_bundle_bits | ||
1108 | create_Opcode_X1(int num) | ||
1109 | { | ||
1110 | const unsigned int n = (unsigned int)num; | ||
1111 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1112 | } | ||
1113 | |||
1114 | static __inline tile_bundle_bits | ||
1115 | create_Opcode_Y0(int num) | ||
1116 | { | ||
1117 | const unsigned int n = (unsigned int)num; | ||
1118 | return ((n & 0xf) << 27); | ||
1119 | } | ||
1120 | |||
1121 | static __inline tile_bundle_bits | ||
1122 | create_Opcode_Y1(int num) | ||
1123 | { | ||
1124 | const unsigned int n = (unsigned int)num; | ||
1125 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1126 | } | ||
1127 | |||
1128 | static __inline tile_bundle_bits | ||
1129 | create_Opcode_Y2(int num) | ||
1130 | { | ||
1131 | const unsigned int n = (unsigned int)num; | ||
1132 | return (((tile_bundle_bits)(n & 0x7)) << 56); | ||
1133 | } | ||
1134 | |||
1135 | static __inline tile_bundle_bits | ||
1136 | create_RROpcodeExtension_SN(int num) | ||
1137 | { | ||
1138 | const unsigned int n = (unsigned int)num; | ||
1139 | return ((n & 0xf) << 4); | ||
1140 | } | ||
1141 | |||
1142 | static __inline tile_bundle_bits | ||
1143 | create_RRROpcodeExtension_X0(int num) | ||
1144 | { | ||
1145 | const unsigned int n = (unsigned int)num; | ||
1146 | return ((n & 0x1ff) << 18); | ||
1147 | } | ||
1148 | |||
1149 | static __inline tile_bundle_bits | ||
1150 | create_RRROpcodeExtension_X1(int num) | ||
1151 | { | ||
1152 | const unsigned int n = (unsigned int)num; | ||
1153 | return (((tile_bundle_bits)(n & 0x1ff)) << 49); | ||
1154 | } | ||
1155 | |||
1156 | static __inline tile_bundle_bits | ||
1157 | create_RRROpcodeExtension_Y0(int num) | ||
1158 | { | ||
1159 | const unsigned int n = (unsigned int)num; | ||
1160 | return ((n & 0x3) << 18); | ||
1161 | } | ||
1162 | |||
1163 | static __inline tile_bundle_bits | ||
1164 | create_RRROpcodeExtension_Y1(int num) | ||
1165 | { | ||
1166 | const unsigned int n = (unsigned int)num; | ||
1167 | return (((tile_bundle_bits)(n & 0x3)) << 49); | ||
1168 | } | ||
1169 | |||
1170 | static __inline tile_bundle_bits | ||
1171 | create_RouteOpcodeExtension_SN(int num) | ||
1172 | { | ||
1173 | const unsigned int n = (unsigned int)num; | ||
1174 | return ((n & 0x3ff) << 0); | ||
1175 | } | ||
1176 | |||
1177 | static __inline tile_bundle_bits | ||
1178 | create_S_X0(int num) | ||
1179 | { | ||
1180 | const unsigned int n = (unsigned int)num; | ||
1181 | return ((n & 0x1) << 27); | ||
1182 | } | ||
1183 | |||
1184 | static __inline tile_bundle_bits | ||
1185 | create_S_X1(int num) | ||
1186 | { | ||
1187 | const unsigned int n = (unsigned int)num; | ||
1188 | return (((tile_bundle_bits)(n & 0x1)) << 58); | ||
1189 | } | ||
1190 | |||
1191 | static __inline tile_bundle_bits | ||
1192 | create_ShAmt_X0(int num) | ||
1193 | { | ||
1194 | const unsigned int n = (unsigned int)num; | ||
1195 | return ((n & 0x1f) << 12); | ||
1196 | } | ||
1197 | |||
1198 | static __inline tile_bundle_bits | ||
1199 | create_ShAmt_X1(int num) | ||
1200 | { | ||
1201 | const unsigned int n = (unsigned int)num; | ||
1202 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1203 | } | ||
1204 | |||
1205 | static __inline tile_bundle_bits | ||
1206 | create_ShAmt_Y0(int num) | ||
1207 | { | ||
1208 | const unsigned int n = (unsigned int)num; | ||
1209 | return ((n & 0x1f) << 12); | ||
1210 | } | ||
1211 | |||
1212 | static __inline tile_bundle_bits | ||
1213 | create_ShAmt_Y1(int num) | ||
1214 | { | ||
1215 | const unsigned int n = (unsigned int)num; | ||
1216 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1217 | } | ||
1218 | |||
1219 | static __inline tile_bundle_bits | ||
1220 | create_SrcA_X0(int num) | ||
1221 | { | ||
1222 | const unsigned int n = (unsigned int)num; | ||
1223 | return ((n & 0x3f) << 6); | ||
1224 | } | ||
1225 | |||
1226 | static __inline tile_bundle_bits | ||
1227 | create_SrcA_X1(int num) | ||
1228 | { | ||
1229 | const unsigned int n = (unsigned int)num; | ||
1230 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1231 | } | ||
1232 | |||
1233 | static __inline tile_bundle_bits | ||
1234 | create_SrcA_Y0(int num) | ||
1235 | { | ||
1236 | const unsigned int n = (unsigned int)num; | ||
1237 | return ((n & 0x3f) << 6); | ||
1238 | } | ||
1239 | |||
1240 | static __inline tile_bundle_bits | ||
1241 | create_SrcA_Y1(int num) | ||
1242 | { | ||
1243 | const unsigned int n = (unsigned int)num; | ||
1244 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1245 | } | ||
1246 | |||
1247 | static __inline tile_bundle_bits | ||
1248 | create_SrcA_Y2(int num) | ||
1249 | { | ||
1250 | const unsigned int n = (unsigned int)num; | ||
1251 | return ((n & 0x00000001) << 26) | | ||
1252 | (((tile_bundle_bits)(n & 0x0000003e)) << 50); | ||
1253 | } | ||
1254 | |||
1255 | static __inline tile_bundle_bits | ||
1256 | create_SrcBDest_Y2(int num) | ||
1257 | { | ||
1258 | const unsigned int n = (unsigned int)num; | ||
1259 | return ((n & 0x3f) << 20); | ||
1260 | } | ||
1261 | |||
1262 | static __inline tile_bundle_bits | ||
1263 | create_SrcB_X0(int num) | ||
1264 | { | ||
1265 | const unsigned int n = (unsigned int)num; | ||
1266 | return ((n & 0x3f) << 12); | ||
1267 | } | ||
1268 | |||
1269 | static __inline tile_bundle_bits | ||
1270 | create_SrcB_X1(int num) | ||
1271 | { | ||
1272 | const unsigned int n = (unsigned int)num; | ||
1273 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1274 | } | ||
1275 | |||
1276 | static __inline tile_bundle_bits | ||
1277 | create_SrcB_Y0(int num) | ||
1278 | { | ||
1279 | const unsigned int n = (unsigned int)num; | ||
1280 | return ((n & 0x3f) << 12); | ||
1281 | } | ||
1282 | |||
1283 | static __inline tile_bundle_bits | ||
1284 | create_SrcB_Y1(int num) | ||
1285 | { | ||
1286 | const unsigned int n = (unsigned int)num; | ||
1287 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1288 | } | ||
1289 | |||
1290 | static __inline tile_bundle_bits | ||
1291 | create_Src_SN(int num) | ||
1292 | { | ||
1293 | const unsigned int n = (unsigned int)num; | ||
1294 | return ((n & 0x3) << 0); | ||
1295 | } | ||
1296 | |||
1297 | static __inline tile_bundle_bits | ||
1298 | create_UnOpcodeExtension_X0(int num) | ||
1299 | { | ||
1300 | const unsigned int n = (unsigned int)num; | ||
1301 | return ((n & 0x1f) << 12); | ||
1302 | } | ||
1303 | |||
1304 | static __inline tile_bundle_bits | ||
1305 | create_UnOpcodeExtension_X1(int num) | ||
1306 | { | ||
1307 | const unsigned int n = (unsigned int)num; | ||
1308 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1309 | } | ||
1310 | |||
1311 | static __inline tile_bundle_bits | ||
1312 | create_UnOpcodeExtension_Y0(int num) | ||
1313 | { | ||
1314 | const unsigned int n = (unsigned int)num; | ||
1315 | return ((n & 0x1f) << 12); | ||
1316 | } | ||
1317 | |||
1318 | static __inline tile_bundle_bits | ||
1319 | create_UnOpcodeExtension_Y1(int num) | ||
1320 | { | ||
1321 | const unsigned int n = (unsigned int)num; | ||
1322 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1323 | } | ||
1324 | |||
1325 | static __inline tile_bundle_bits | ||
1326 | create_UnShOpcodeExtension_X0(int num) | ||
1327 | { | ||
1328 | const unsigned int n = (unsigned int)num; | ||
1329 | return ((n & 0x3ff) << 17); | ||
1330 | } | ||
1331 | |||
1332 | static __inline tile_bundle_bits | ||
1333 | create_UnShOpcodeExtension_X1(int num) | ||
1334 | { | ||
1335 | const unsigned int n = (unsigned int)num; | ||
1336 | return (((tile_bundle_bits)(n & 0x3ff)) << 48); | ||
1337 | } | ||
1338 | |||
1339 | static __inline tile_bundle_bits | ||
1340 | create_UnShOpcodeExtension_Y0(int num) | ||
1341 | { | ||
1342 | const unsigned int n = (unsigned int)num; | ||
1343 | return ((n & 0x7) << 17); | ||
1344 | } | ||
1345 | |||
1346 | static __inline tile_bundle_bits | ||
1347 | create_UnShOpcodeExtension_Y1(int num) | ||
1348 | { | ||
1349 | const unsigned int n = (unsigned int)num; | ||
1350 | return (((tile_bundle_bits)(n & 0x7)) << 48); | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | |||
1355 | typedef enum | ||
1356 | { | ||
1357 | TILE_PIPELINE_X0, | ||
1358 | TILE_PIPELINE_X1, | ||
1359 | TILE_PIPELINE_Y0, | ||
1360 | TILE_PIPELINE_Y1, | ||
1361 | TILE_PIPELINE_Y2, | ||
1362 | } tile_pipeline; | ||
1363 | |||
1364 | #define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) | ||
1365 | |||
1366 | typedef enum | ||
1367 | { | ||
1368 | TILE_OP_TYPE_REGISTER, | ||
1369 | TILE_OP_TYPE_IMMEDIATE, | ||
1370 | TILE_OP_TYPE_ADDRESS, | ||
1371 | TILE_OP_TYPE_SPR | ||
1372 | } tile_operand_type; | ||
1373 | |||
1374 | /* This is the bit that determines if a bundle is in the Y encoding. */ | ||
1375 | #define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) | ||
1376 | |||
1377 | enum | ||
1378 | { | ||
1379 | /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ | ||
1380 | TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, | ||
1381 | |||
1382 | /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ | ||
1383 | TILE_NUM_PIPELINE_ENCODINGS = 5, | ||
1384 | |||
1385 | /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ | ||
1386 | TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, | ||
1387 | |||
1388 | /* Instructions take this many bytes. */ | ||
1389 | TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, | ||
1390 | |||
1391 | /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ | ||
1392 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, | ||
1393 | |||
1394 | /* Bundles should be aligned modulo this number of bytes. */ | ||
1395 | TILE_BUNDLE_ALIGNMENT_IN_BYTES = | ||
1396 | (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), | ||
1397 | |||
1398 | /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ | ||
1399 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, | ||
1400 | |||
1401 | /* Static network instructions take this many bytes. */ | ||
1402 | TILE_SN_INSTRUCTION_SIZE_IN_BYTES = | ||
1403 | (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), | ||
1404 | |||
1405 | /* Number of registers (some are magic, such as network I/O). */ | ||
1406 | TILE_NUM_REGISTERS = 64, | ||
1407 | |||
1408 | /* Number of static network registers. */ | ||
1409 | TILE_NUM_SN_REGISTERS = 4 | ||
1410 | }; | ||
1411 | |||
1412 | |||
1413 | struct tile_operand | ||
1414 | { | ||
1415 | /* Is this operand a register, immediate or address? */ | ||
1416 | tile_operand_type type; | ||
1417 | |||
1418 | /* The default relocation type for this operand. */ | ||
1419 | signed int default_reloc : 16; | ||
1420 | |||
1421 | /* How many bits is this value? (used for range checking) */ | ||
1422 | unsigned int num_bits : 5; | ||
1423 | |||
1424 | /* Is the value signed? (used for range checking) */ | ||
1425 | unsigned int is_signed : 1; | ||
1426 | |||
1427 | /* Is this operand a source register? */ | ||
1428 | unsigned int is_src_reg : 1; | ||
1429 | |||
1430 | /* Is this operand written? (i.e. is it a destination register) */ | ||
1431 | unsigned int is_dest_reg : 1; | ||
1432 | |||
1433 | /* Is this operand PC-relative? */ | ||
1434 | unsigned int is_pc_relative : 1; | ||
1435 | |||
1436 | /* By how many bits do we right shift the value before inserting? */ | ||
1437 | unsigned int rightshift : 2; | ||
1438 | |||
1439 | /* Return the bits for this operand to be ORed into an existing bundle. */ | ||
1440 | tile_bundle_bits (*insert) (int op); | ||
1441 | |||
1442 | /* Extract this operand and return it. */ | ||
1443 | unsigned int (*extract) (tile_bundle_bits bundle); | ||
1444 | }; | ||
1445 | |||
1446 | |||
1447 | extern const struct tile_operand tile_operands[]; | ||
1448 | |||
1449 | /* One finite-state machine per pipe for rapid instruction decoding. */ | ||
1450 | extern const unsigned short * const | ||
1451 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1452 | |||
1453 | |||
1454 | struct tile_opcode | ||
1455 | { | ||
1456 | /* The opcode mnemonic, e.g. "add" */ | ||
1457 | const char *name; | ||
1458 | |||
1459 | /* The enum value for this mnemonic. */ | ||
1460 | tile_mnemonic mnemonic; | ||
1461 | |||
1462 | /* A bit mask of which of the five pipes this instruction | ||
1463 | is compatible with: | ||
1464 | X0 0x01 | ||
1465 | X1 0x02 | ||
1466 | Y0 0x04 | ||
1467 | Y1 0x08 | ||
1468 | Y2 0x10 */ | ||
1469 | unsigned char pipes; | ||
1470 | |||
1471 | /* How many operands are there? */ | ||
1472 | unsigned char num_operands; | ||
1473 | |||
1474 | /* Which register does this write implicitly, or TREG_ZERO if none? */ | ||
1475 | unsigned char implicitly_written_register; | ||
1476 | |||
1477 | /* Can this be bundled with other instructions (almost always true). */ | ||
1478 | unsigned char can_bundle; | ||
1479 | |||
1480 | /* The description of the operands. Each of these is an | ||
1481 | * index into the tile_operands[] table. */ | ||
1482 | unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; | ||
1483 | |||
1484 | }; | ||
1485 | |||
1486 | extern const struct tile_opcode tile_opcodes[]; | ||
1487 | |||
1488 | |||
1489 | /* Used for non-textual disassembly into structs. */ | ||
1490 | struct tile_decoded_instruction | ||
1491 | { | ||
1492 | const struct tile_opcode *opcode; | ||
1493 | const struct tile_operand *operands[TILE_MAX_OPERANDS]; | ||
1494 | int operand_values[TILE_MAX_OPERANDS]; | ||
1495 | }; | ||
1496 | |||
1497 | |||
1498 | /* Disassemble a bundle into a struct for machine processing. */ | ||
1499 | extern int parse_insn_tile(tile_bundle_bits bits, | ||
1500 | unsigned int pc, | ||
1501 | struct tile_decoded_instruction | ||
1502 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); | ||
1503 | |||
1504 | |||
1505 | |||
1506 | #endif /* opcode_tile_h */ | ||
diff --git a/arch/tile/include/asm/opcode_constants.h b/arch/tile/include/asm/opcode_constants.h new file mode 100644 index 00000000000..37a9f2958cb --- /dev/null +++ b/arch/tile/include/asm/opcode_constants.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_OPCODE_CONSTANTS_H | ||
16 | #define _ASM_TILE_OPCODE_CONSTANTS_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_WORD_SIZE() == 64 | ||
21 | #include <asm/opcode_constants_64.h> | ||
22 | #else | ||
23 | #include <asm/opcode_constants_32.h> | ||
24 | #endif | ||
25 | |||
26 | #endif /* _ASM_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/opcode_constants_32.h b/arch/tile/include/asm/opcode_constants_32.h new file mode 100644 index 00000000000..227d033b180 --- /dev/null +++ b/arch/tile/include/asm/opcode_constants_32.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | |||
17 | |||
18 | #ifndef _TILE_OPCODE_CONSTANTS_H | ||
19 | #define _TILE_OPCODE_CONSTANTS_H | ||
20 | enum | ||
21 | { | ||
22 | ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, | ||
23 | ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, | ||
24 | ADDB_SPECIAL_0_OPCODE_X0 = 1, | ||
25 | ADDB_SPECIAL_0_OPCODE_X1 = 1, | ||
26 | ADDHS_SPECIAL_0_OPCODE_X0 = 99, | ||
27 | ADDHS_SPECIAL_0_OPCODE_X1 = 69, | ||
28 | ADDH_SPECIAL_0_OPCODE_X0 = 2, | ||
29 | ADDH_SPECIAL_0_OPCODE_X1 = 2, | ||
30 | ADDIB_IMM_0_OPCODE_X0 = 1, | ||
31 | ADDIB_IMM_0_OPCODE_X1 = 1, | ||
32 | ADDIH_IMM_0_OPCODE_X0 = 2, | ||
33 | ADDIH_IMM_0_OPCODE_X1 = 2, | ||
34 | ADDI_IMM_0_OPCODE_X0 = 3, | ||
35 | ADDI_IMM_0_OPCODE_X1 = 3, | ||
36 | ADDI_IMM_1_OPCODE_SN = 1, | ||
37 | ADDI_OPCODE_Y0 = 9, | ||
38 | ADDI_OPCODE_Y1 = 7, | ||
39 | ADDLIS_OPCODE_X0 = 1, | ||
40 | ADDLIS_OPCODE_X1 = 2, | ||
41 | ADDLI_OPCODE_X0 = 2, | ||
42 | ADDLI_OPCODE_X1 = 3, | ||
43 | ADDS_SPECIAL_0_OPCODE_X0 = 96, | ||
44 | ADDS_SPECIAL_0_OPCODE_X1 = 66, | ||
45 | ADD_SPECIAL_0_OPCODE_X0 = 3, | ||
46 | ADD_SPECIAL_0_OPCODE_X1 = 3, | ||
47 | ADD_SPECIAL_0_OPCODE_Y0 = 0, | ||
48 | ADD_SPECIAL_0_OPCODE_Y1 = 0, | ||
49 | ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, | ||
50 | ADIFFH_SPECIAL_0_OPCODE_X0 = 5, | ||
51 | ANDI_IMM_0_OPCODE_X0 = 1, | ||
52 | ANDI_IMM_0_OPCODE_X1 = 4, | ||
53 | ANDI_OPCODE_Y0 = 10, | ||
54 | ANDI_OPCODE_Y1 = 8, | ||
55 | AND_SPECIAL_0_OPCODE_X0 = 6, | ||
56 | AND_SPECIAL_0_OPCODE_X1 = 4, | ||
57 | AND_SPECIAL_2_OPCODE_Y0 = 0, | ||
58 | AND_SPECIAL_2_OPCODE_Y1 = 0, | ||
59 | AULI_OPCODE_X0 = 3, | ||
60 | AULI_OPCODE_X1 = 4, | ||
61 | AVGB_U_SPECIAL_0_OPCODE_X0 = 7, | ||
62 | AVGH_SPECIAL_0_OPCODE_X0 = 8, | ||
63 | BBNST_BRANCH_OPCODE_X1 = 15, | ||
64 | BBNS_BRANCH_OPCODE_X1 = 14, | ||
65 | BBNS_OPCODE_SN = 63, | ||
66 | BBST_BRANCH_OPCODE_X1 = 13, | ||
67 | BBS_BRANCH_OPCODE_X1 = 12, | ||
68 | BBS_OPCODE_SN = 62, | ||
69 | BGEZT_BRANCH_OPCODE_X1 = 7, | ||
70 | BGEZ_BRANCH_OPCODE_X1 = 6, | ||
71 | BGEZ_OPCODE_SN = 61, | ||
72 | BGZT_BRANCH_OPCODE_X1 = 5, | ||
73 | BGZ_BRANCH_OPCODE_X1 = 4, | ||
74 | BGZ_OPCODE_SN = 58, | ||
75 | BITX_UN_0_SHUN_0_OPCODE_X0 = 1, | ||
76 | BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, | ||
77 | BLEZT_BRANCH_OPCODE_X1 = 11, | ||
78 | BLEZ_BRANCH_OPCODE_X1 = 10, | ||
79 | BLEZ_OPCODE_SN = 59, | ||
80 | BLZT_BRANCH_OPCODE_X1 = 9, | ||
81 | BLZ_BRANCH_OPCODE_X1 = 8, | ||
82 | BLZ_OPCODE_SN = 60, | ||
83 | BNZT_BRANCH_OPCODE_X1 = 3, | ||
84 | BNZ_BRANCH_OPCODE_X1 = 2, | ||
85 | BNZ_OPCODE_SN = 57, | ||
86 | BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, | ||
87 | BRANCH_OPCODE_X1 = 5, | ||
88 | BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, | ||
89 | BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, | ||
90 | BZT_BRANCH_OPCODE_X1 = 1, | ||
91 | BZ_BRANCH_OPCODE_X1 = 0, | ||
92 | BZ_OPCODE_SN = 56, | ||
93 | CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, | ||
94 | CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, | ||
95 | CRC32_32_SPECIAL_0_OPCODE_X0 = 9, | ||
96 | CRC32_8_SPECIAL_0_OPCODE_X0 = 10, | ||
97 | CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, | ||
98 | CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, | ||
99 | DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, | ||
100 | DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, | ||
101 | DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, | ||
102 | FINV_UN_0_SHUN_0_OPCODE_X1 = 3, | ||
103 | FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, | ||
104 | FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, | ||
105 | FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, | ||
106 | FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, | ||
107 | FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
108 | FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, | ||
109 | HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
110 | ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, | ||
111 | ILL_UN_0_SHUN_0_OPCODE_X1 = 7, | ||
112 | ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, | ||
113 | IMM_0_OPCODE_SN = 0, | ||
114 | IMM_0_OPCODE_X0 = 4, | ||
115 | IMM_0_OPCODE_X1 = 6, | ||
116 | IMM_1_OPCODE_SN = 1, | ||
117 | IMM_OPCODE_0_X0 = 5, | ||
118 | INTHB_SPECIAL_0_OPCODE_X0 = 11, | ||
119 | INTHB_SPECIAL_0_OPCODE_X1 = 5, | ||
120 | INTHH_SPECIAL_0_OPCODE_X0 = 12, | ||
121 | INTHH_SPECIAL_0_OPCODE_X1 = 6, | ||
122 | INTLB_SPECIAL_0_OPCODE_X0 = 13, | ||
123 | INTLB_SPECIAL_0_OPCODE_X1 = 7, | ||
124 | INTLH_SPECIAL_0_OPCODE_X0 = 14, | ||
125 | INTLH_SPECIAL_0_OPCODE_X1 = 8, | ||
126 | INV_UN_0_SHUN_0_OPCODE_X1 = 8, | ||
127 | IRET_UN_0_SHUN_0_OPCODE_X1 = 9, | ||
128 | JALB_OPCODE_X1 = 13, | ||
129 | JALF_OPCODE_X1 = 12, | ||
130 | JALRP_SPECIAL_0_OPCODE_X1 = 9, | ||
131 | JALRR_IMM_1_OPCODE_SN = 3, | ||
132 | JALR_RR_IMM_0_OPCODE_SN = 5, | ||
133 | JALR_SPECIAL_0_OPCODE_X1 = 10, | ||
134 | JB_OPCODE_X1 = 11, | ||
135 | JF_OPCODE_X1 = 10, | ||
136 | JRP_SPECIAL_0_OPCODE_X1 = 11, | ||
137 | JRR_IMM_1_OPCODE_SN = 2, | ||
138 | JR_RR_IMM_0_OPCODE_SN = 4, | ||
139 | JR_SPECIAL_0_OPCODE_X1 = 12, | ||
140 | LBADD_IMM_0_OPCODE_X1 = 22, | ||
141 | LBADD_U_IMM_0_OPCODE_X1 = 23, | ||
142 | LB_OPCODE_Y2 = 0, | ||
143 | LB_UN_0_SHUN_0_OPCODE_X1 = 10, | ||
144 | LB_U_OPCODE_Y2 = 1, | ||
145 | LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, | ||
146 | LHADD_IMM_0_OPCODE_X1 = 24, | ||
147 | LHADD_U_IMM_0_OPCODE_X1 = 25, | ||
148 | LH_OPCODE_Y2 = 2, | ||
149 | LH_UN_0_SHUN_0_OPCODE_X1 = 12, | ||
150 | LH_U_OPCODE_Y2 = 3, | ||
151 | LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, | ||
152 | LNK_SPECIAL_0_OPCODE_X1 = 13, | ||
153 | LWADD_IMM_0_OPCODE_X1 = 26, | ||
154 | LWADD_NA_IMM_0_OPCODE_X1 = 27, | ||
155 | LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, | ||
156 | LW_OPCODE_Y2 = 4, | ||
157 | LW_UN_0_SHUN_0_OPCODE_X1 = 14, | ||
158 | MAXB_U_SPECIAL_0_OPCODE_X0 = 15, | ||
159 | MAXB_U_SPECIAL_0_OPCODE_X1 = 14, | ||
160 | MAXH_SPECIAL_0_OPCODE_X0 = 16, | ||
161 | MAXH_SPECIAL_0_OPCODE_X1 = 15, | ||
162 | MAXIB_U_IMM_0_OPCODE_X0 = 4, | ||
163 | MAXIB_U_IMM_0_OPCODE_X1 = 5, | ||
164 | MAXIH_IMM_0_OPCODE_X0 = 5, | ||
165 | MAXIH_IMM_0_OPCODE_X1 = 6, | ||
166 | MFSPR_IMM_0_OPCODE_X1 = 7, | ||
167 | MF_UN_0_SHUN_0_OPCODE_X1 = 15, | ||
168 | MINB_U_SPECIAL_0_OPCODE_X0 = 17, | ||
169 | MINB_U_SPECIAL_0_OPCODE_X1 = 16, | ||
170 | MINH_SPECIAL_0_OPCODE_X0 = 18, | ||
171 | MINH_SPECIAL_0_OPCODE_X1 = 17, | ||
172 | MINIB_U_IMM_0_OPCODE_X0 = 6, | ||
173 | MINIB_U_IMM_0_OPCODE_X1 = 8, | ||
174 | MINIH_IMM_0_OPCODE_X0 = 7, | ||
175 | MINIH_IMM_0_OPCODE_X1 = 9, | ||
176 | MM_OPCODE_X0 = 6, | ||
177 | MM_OPCODE_X1 = 7, | ||
178 | MNZB_SPECIAL_0_OPCODE_X0 = 19, | ||
179 | MNZB_SPECIAL_0_OPCODE_X1 = 18, | ||
180 | MNZH_SPECIAL_0_OPCODE_X0 = 20, | ||
181 | MNZH_SPECIAL_0_OPCODE_X1 = 19, | ||
182 | MNZ_SPECIAL_0_OPCODE_X0 = 21, | ||
183 | MNZ_SPECIAL_0_OPCODE_X1 = 20, | ||
184 | MNZ_SPECIAL_1_OPCODE_Y0 = 0, | ||
185 | MNZ_SPECIAL_1_OPCODE_Y1 = 1, | ||
186 | MOVEI_IMM_1_OPCODE_SN = 0, | ||
187 | MOVE_RR_IMM_0_OPCODE_SN = 8, | ||
188 | MTSPR_IMM_0_OPCODE_X1 = 10, | ||
189 | MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, | ||
190 | MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, | ||
191 | MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, | ||
192 | MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, | ||
193 | MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, | ||
194 | MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, | ||
195 | MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, | ||
196 | MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, | ||
197 | MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, | ||
198 | MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, | ||
199 | MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, | ||
200 | MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, | ||
201 | MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, | ||
202 | MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, | ||
203 | MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, | ||
204 | MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, | ||
205 | MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, | ||
206 | MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, | ||
207 | MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, | ||
208 | MULHL_US_SPECIAL_0_OPCODE_X0 = 36, | ||
209 | MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, | ||
210 | MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, | ||
211 | MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, | ||
212 | MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, | ||
213 | MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, | ||
214 | MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, | ||
215 | MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, | ||
216 | MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, | ||
217 | MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, | ||
218 | MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, | ||
219 | MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, | ||
220 | MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, | ||
221 | MVNZ_SPECIAL_0_OPCODE_X0 = 45, | ||
222 | MVNZ_SPECIAL_1_OPCODE_Y0 = 1, | ||
223 | MVZ_SPECIAL_0_OPCODE_X0 = 46, | ||
224 | MVZ_SPECIAL_1_OPCODE_Y0 = 2, | ||
225 | MZB_SPECIAL_0_OPCODE_X0 = 47, | ||
226 | MZB_SPECIAL_0_OPCODE_X1 = 21, | ||
227 | MZH_SPECIAL_0_OPCODE_X0 = 48, | ||
228 | MZH_SPECIAL_0_OPCODE_X1 = 22, | ||
229 | MZ_SPECIAL_0_OPCODE_X0 = 49, | ||
230 | MZ_SPECIAL_0_OPCODE_X1 = 23, | ||
231 | MZ_SPECIAL_1_OPCODE_Y0 = 3, | ||
232 | MZ_SPECIAL_1_OPCODE_Y1 = 2, | ||
233 | NAP_UN_0_SHUN_0_OPCODE_X1 = 16, | ||
234 | NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, | ||
235 | NOP_UN_0_SHUN_0_OPCODE_X0 = 6, | ||
236 | NOP_UN_0_SHUN_0_OPCODE_X1 = 17, | ||
237 | NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, | ||
238 | NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, | ||
239 | NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
240 | NOR_SPECIAL_0_OPCODE_X0 = 50, | ||
241 | NOR_SPECIAL_0_OPCODE_X1 = 24, | ||
242 | NOR_SPECIAL_2_OPCODE_Y0 = 1, | ||
243 | NOR_SPECIAL_2_OPCODE_Y1 = 1, | ||
244 | ORI_IMM_0_OPCODE_X0 = 8, | ||
245 | ORI_IMM_0_OPCODE_X1 = 11, | ||
246 | ORI_OPCODE_Y0 = 11, | ||
247 | ORI_OPCODE_Y1 = 9, | ||
248 | OR_SPECIAL_0_OPCODE_X0 = 51, | ||
249 | OR_SPECIAL_0_OPCODE_X1 = 25, | ||
250 | OR_SPECIAL_2_OPCODE_Y0 = 2, | ||
251 | OR_SPECIAL_2_OPCODE_Y1 = 2, | ||
252 | PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, | ||
253 | PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, | ||
254 | PACKHB_SPECIAL_0_OPCODE_X0 = 52, | ||
255 | PACKHB_SPECIAL_0_OPCODE_X1 = 26, | ||
256 | PACKHS_SPECIAL_0_OPCODE_X0 = 102, | ||
257 | PACKHS_SPECIAL_0_OPCODE_X1 = 72, | ||
258 | PACKLB_SPECIAL_0_OPCODE_X0 = 53, | ||
259 | PACKLB_SPECIAL_0_OPCODE_X1 = 27, | ||
260 | PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, | ||
261 | PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, | ||
262 | RLI_SHUN_0_OPCODE_X0 = 1, | ||
263 | RLI_SHUN_0_OPCODE_X1 = 1, | ||
264 | RLI_SHUN_0_OPCODE_Y0 = 1, | ||
265 | RLI_SHUN_0_OPCODE_Y1 = 1, | ||
266 | RL_SPECIAL_0_OPCODE_X0 = 54, | ||
267 | RL_SPECIAL_0_OPCODE_X1 = 28, | ||
268 | RL_SPECIAL_3_OPCODE_Y0 = 0, | ||
269 | RL_SPECIAL_3_OPCODE_Y1 = 0, | ||
270 | RR_IMM_0_OPCODE_SN = 0, | ||
271 | S1A_SPECIAL_0_OPCODE_X0 = 55, | ||
272 | S1A_SPECIAL_0_OPCODE_X1 = 29, | ||
273 | S1A_SPECIAL_0_OPCODE_Y0 = 1, | ||
274 | S1A_SPECIAL_0_OPCODE_Y1 = 1, | ||
275 | S2A_SPECIAL_0_OPCODE_X0 = 56, | ||
276 | S2A_SPECIAL_0_OPCODE_X1 = 30, | ||
277 | S2A_SPECIAL_0_OPCODE_Y0 = 2, | ||
278 | S2A_SPECIAL_0_OPCODE_Y1 = 2, | ||
279 | S3A_SPECIAL_0_OPCODE_X0 = 57, | ||
280 | S3A_SPECIAL_0_OPCODE_X1 = 31, | ||
281 | S3A_SPECIAL_5_OPCODE_Y0 = 1, | ||
282 | S3A_SPECIAL_5_OPCODE_Y1 = 1, | ||
283 | SADAB_U_SPECIAL_0_OPCODE_X0 = 58, | ||
284 | SADAH_SPECIAL_0_OPCODE_X0 = 59, | ||
285 | SADAH_U_SPECIAL_0_OPCODE_X0 = 60, | ||
286 | SADB_U_SPECIAL_0_OPCODE_X0 = 61, | ||
287 | SADH_SPECIAL_0_OPCODE_X0 = 62, | ||
288 | SADH_U_SPECIAL_0_OPCODE_X0 = 63, | ||
289 | SBADD_IMM_0_OPCODE_X1 = 28, | ||
290 | SB_OPCODE_Y2 = 5, | ||
291 | SB_SPECIAL_0_OPCODE_X1 = 32, | ||
292 | SEQB_SPECIAL_0_OPCODE_X0 = 64, | ||
293 | SEQB_SPECIAL_0_OPCODE_X1 = 33, | ||
294 | SEQH_SPECIAL_0_OPCODE_X0 = 65, | ||
295 | SEQH_SPECIAL_0_OPCODE_X1 = 34, | ||
296 | SEQIB_IMM_0_OPCODE_X0 = 9, | ||
297 | SEQIB_IMM_0_OPCODE_X1 = 12, | ||
298 | SEQIH_IMM_0_OPCODE_X0 = 10, | ||
299 | SEQIH_IMM_0_OPCODE_X1 = 13, | ||
300 | SEQI_IMM_0_OPCODE_X0 = 11, | ||
301 | SEQI_IMM_0_OPCODE_X1 = 14, | ||
302 | SEQI_OPCODE_Y0 = 12, | ||
303 | SEQI_OPCODE_Y1 = 10, | ||
304 | SEQ_SPECIAL_0_OPCODE_X0 = 66, | ||
305 | SEQ_SPECIAL_0_OPCODE_X1 = 35, | ||
306 | SEQ_SPECIAL_5_OPCODE_Y0 = 2, | ||
307 | SEQ_SPECIAL_5_OPCODE_Y1 = 2, | ||
308 | SHADD_IMM_0_OPCODE_X1 = 29, | ||
309 | SHL8II_IMM_0_OPCODE_SN = 3, | ||
310 | SHLB_SPECIAL_0_OPCODE_X0 = 67, | ||
311 | SHLB_SPECIAL_0_OPCODE_X1 = 36, | ||
312 | SHLH_SPECIAL_0_OPCODE_X0 = 68, | ||
313 | SHLH_SPECIAL_0_OPCODE_X1 = 37, | ||
314 | SHLIB_SHUN_0_OPCODE_X0 = 2, | ||
315 | SHLIB_SHUN_0_OPCODE_X1 = 2, | ||
316 | SHLIH_SHUN_0_OPCODE_X0 = 3, | ||
317 | SHLIH_SHUN_0_OPCODE_X1 = 3, | ||
318 | SHLI_SHUN_0_OPCODE_X0 = 4, | ||
319 | SHLI_SHUN_0_OPCODE_X1 = 4, | ||
320 | SHLI_SHUN_0_OPCODE_Y0 = 2, | ||
321 | SHLI_SHUN_0_OPCODE_Y1 = 2, | ||
322 | SHL_SPECIAL_0_OPCODE_X0 = 69, | ||
323 | SHL_SPECIAL_0_OPCODE_X1 = 38, | ||
324 | SHL_SPECIAL_3_OPCODE_Y0 = 1, | ||
325 | SHL_SPECIAL_3_OPCODE_Y1 = 1, | ||
326 | SHR1_RR_IMM_0_OPCODE_SN = 9, | ||
327 | SHRB_SPECIAL_0_OPCODE_X0 = 70, | ||
328 | SHRB_SPECIAL_0_OPCODE_X1 = 39, | ||
329 | SHRH_SPECIAL_0_OPCODE_X0 = 71, | ||
330 | SHRH_SPECIAL_0_OPCODE_X1 = 40, | ||
331 | SHRIB_SHUN_0_OPCODE_X0 = 5, | ||
332 | SHRIB_SHUN_0_OPCODE_X1 = 5, | ||
333 | SHRIH_SHUN_0_OPCODE_X0 = 6, | ||
334 | SHRIH_SHUN_0_OPCODE_X1 = 6, | ||
335 | SHRI_SHUN_0_OPCODE_X0 = 7, | ||
336 | SHRI_SHUN_0_OPCODE_X1 = 7, | ||
337 | SHRI_SHUN_0_OPCODE_Y0 = 3, | ||
338 | SHRI_SHUN_0_OPCODE_Y1 = 3, | ||
339 | SHR_SPECIAL_0_OPCODE_X0 = 72, | ||
340 | SHR_SPECIAL_0_OPCODE_X1 = 41, | ||
341 | SHR_SPECIAL_3_OPCODE_Y0 = 2, | ||
342 | SHR_SPECIAL_3_OPCODE_Y1 = 2, | ||
343 | SHUN_0_OPCODE_X0 = 7, | ||
344 | SHUN_0_OPCODE_X1 = 8, | ||
345 | SHUN_0_OPCODE_Y0 = 13, | ||
346 | SHUN_0_OPCODE_Y1 = 11, | ||
347 | SH_OPCODE_Y2 = 6, | ||
348 | SH_SPECIAL_0_OPCODE_X1 = 42, | ||
349 | SLTB_SPECIAL_0_OPCODE_X0 = 73, | ||
350 | SLTB_SPECIAL_0_OPCODE_X1 = 43, | ||
351 | SLTB_U_SPECIAL_0_OPCODE_X0 = 74, | ||
352 | SLTB_U_SPECIAL_0_OPCODE_X1 = 44, | ||
353 | SLTEB_SPECIAL_0_OPCODE_X0 = 75, | ||
354 | SLTEB_SPECIAL_0_OPCODE_X1 = 45, | ||
355 | SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, | ||
356 | SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, | ||
357 | SLTEH_SPECIAL_0_OPCODE_X0 = 77, | ||
358 | SLTEH_SPECIAL_0_OPCODE_X1 = 47, | ||
359 | SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, | ||
360 | SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, | ||
361 | SLTE_SPECIAL_0_OPCODE_X0 = 79, | ||
362 | SLTE_SPECIAL_0_OPCODE_X1 = 49, | ||
363 | SLTE_SPECIAL_4_OPCODE_Y0 = 0, | ||
364 | SLTE_SPECIAL_4_OPCODE_Y1 = 0, | ||
365 | SLTE_U_SPECIAL_0_OPCODE_X0 = 80, | ||
366 | SLTE_U_SPECIAL_0_OPCODE_X1 = 50, | ||
367 | SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, | ||
368 | SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, | ||
369 | SLTH_SPECIAL_0_OPCODE_X0 = 81, | ||
370 | SLTH_SPECIAL_0_OPCODE_X1 = 51, | ||
371 | SLTH_U_SPECIAL_0_OPCODE_X0 = 82, | ||
372 | SLTH_U_SPECIAL_0_OPCODE_X1 = 52, | ||
373 | SLTIB_IMM_0_OPCODE_X0 = 12, | ||
374 | SLTIB_IMM_0_OPCODE_X1 = 15, | ||
375 | SLTIB_U_IMM_0_OPCODE_X0 = 13, | ||
376 | SLTIB_U_IMM_0_OPCODE_X1 = 16, | ||
377 | SLTIH_IMM_0_OPCODE_X0 = 14, | ||
378 | SLTIH_IMM_0_OPCODE_X1 = 17, | ||
379 | SLTIH_U_IMM_0_OPCODE_X0 = 15, | ||
380 | SLTIH_U_IMM_0_OPCODE_X1 = 18, | ||
381 | SLTI_IMM_0_OPCODE_X0 = 16, | ||
382 | SLTI_IMM_0_OPCODE_X1 = 19, | ||
383 | SLTI_OPCODE_Y0 = 14, | ||
384 | SLTI_OPCODE_Y1 = 12, | ||
385 | SLTI_U_IMM_0_OPCODE_X0 = 17, | ||
386 | SLTI_U_IMM_0_OPCODE_X1 = 20, | ||
387 | SLTI_U_OPCODE_Y0 = 15, | ||
388 | SLTI_U_OPCODE_Y1 = 13, | ||
389 | SLT_SPECIAL_0_OPCODE_X0 = 83, | ||
390 | SLT_SPECIAL_0_OPCODE_X1 = 53, | ||
391 | SLT_SPECIAL_4_OPCODE_Y0 = 2, | ||
392 | SLT_SPECIAL_4_OPCODE_Y1 = 2, | ||
393 | SLT_U_SPECIAL_0_OPCODE_X0 = 84, | ||
394 | SLT_U_SPECIAL_0_OPCODE_X1 = 54, | ||
395 | SLT_U_SPECIAL_4_OPCODE_Y0 = 3, | ||
396 | SLT_U_SPECIAL_4_OPCODE_Y1 = 3, | ||
397 | SNEB_SPECIAL_0_OPCODE_X0 = 85, | ||
398 | SNEB_SPECIAL_0_OPCODE_X1 = 55, | ||
399 | SNEH_SPECIAL_0_OPCODE_X0 = 86, | ||
400 | SNEH_SPECIAL_0_OPCODE_X1 = 56, | ||
401 | SNE_SPECIAL_0_OPCODE_X0 = 87, | ||
402 | SNE_SPECIAL_0_OPCODE_X1 = 57, | ||
403 | SNE_SPECIAL_5_OPCODE_Y0 = 3, | ||
404 | SNE_SPECIAL_5_OPCODE_Y1 = 3, | ||
405 | SPECIAL_0_OPCODE_X0 = 0, | ||
406 | SPECIAL_0_OPCODE_X1 = 1, | ||
407 | SPECIAL_0_OPCODE_Y0 = 1, | ||
408 | SPECIAL_0_OPCODE_Y1 = 1, | ||
409 | SPECIAL_1_OPCODE_Y0 = 2, | ||
410 | SPECIAL_1_OPCODE_Y1 = 2, | ||
411 | SPECIAL_2_OPCODE_Y0 = 3, | ||
412 | SPECIAL_2_OPCODE_Y1 = 3, | ||
413 | SPECIAL_3_OPCODE_Y0 = 4, | ||
414 | SPECIAL_3_OPCODE_Y1 = 4, | ||
415 | SPECIAL_4_OPCODE_Y0 = 5, | ||
416 | SPECIAL_4_OPCODE_Y1 = 5, | ||
417 | SPECIAL_5_OPCODE_Y0 = 6, | ||
418 | SPECIAL_5_OPCODE_Y1 = 6, | ||
419 | SPECIAL_6_OPCODE_Y0 = 7, | ||
420 | SPECIAL_7_OPCODE_Y0 = 8, | ||
421 | SRAB_SPECIAL_0_OPCODE_X0 = 88, | ||
422 | SRAB_SPECIAL_0_OPCODE_X1 = 58, | ||
423 | SRAH_SPECIAL_0_OPCODE_X0 = 89, | ||
424 | SRAH_SPECIAL_0_OPCODE_X1 = 59, | ||
425 | SRAIB_SHUN_0_OPCODE_X0 = 8, | ||
426 | SRAIB_SHUN_0_OPCODE_X1 = 8, | ||
427 | SRAIH_SHUN_0_OPCODE_X0 = 9, | ||
428 | SRAIH_SHUN_0_OPCODE_X1 = 9, | ||
429 | SRAI_SHUN_0_OPCODE_X0 = 10, | ||
430 | SRAI_SHUN_0_OPCODE_X1 = 10, | ||
431 | SRAI_SHUN_0_OPCODE_Y0 = 4, | ||
432 | SRAI_SHUN_0_OPCODE_Y1 = 4, | ||
433 | SRA_SPECIAL_0_OPCODE_X0 = 90, | ||
434 | SRA_SPECIAL_0_OPCODE_X1 = 60, | ||
435 | SRA_SPECIAL_3_OPCODE_Y0 = 3, | ||
436 | SRA_SPECIAL_3_OPCODE_Y1 = 3, | ||
437 | SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, | ||
438 | SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, | ||
439 | SUBB_SPECIAL_0_OPCODE_X0 = 91, | ||
440 | SUBB_SPECIAL_0_OPCODE_X1 = 61, | ||
441 | SUBHS_SPECIAL_0_OPCODE_X0 = 101, | ||
442 | SUBHS_SPECIAL_0_OPCODE_X1 = 71, | ||
443 | SUBH_SPECIAL_0_OPCODE_X0 = 92, | ||
444 | SUBH_SPECIAL_0_OPCODE_X1 = 62, | ||
445 | SUBS_SPECIAL_0_OPCODE_X0 = 97, | ||
446 | SUBS_SPECIAL_0_OPCODE_X1 = 67, | ||
447 | SUB_SPECIAL_0_OPCODE_X0 = 93, | ||
448 | SUB_SPECIAL_0_OPCODE_X1 = 63, | ||
449 | SUB_SPECIAL_0_OPCODE_Y0 = 3, | ||
450 | SUB_SPECIAL_0_OPCODE_Y1 = 3, | ||
451 | SWADD_IMM_0_OPCODE_X1 = 30, | ||
452 | SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, | ||
453 | SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, | ||
454 | SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, | ||
455 | SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, | ||
456 | SW_OPCODE_Y2 = 7, | ||
457 | SW_SPECIAL_0_OPCODE_X1 = 64, | ||
458 | TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, | ||
459 | TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, | ||
460 | TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, | ||
461 | TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, | ||
462 | TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, | ||
463 | TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, | ||
464 | TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, | ||
465 | TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, | ||
466 | TNS_UN_0_SHUN_0_OPCODE_X1 = 22, | ||
467 | UN_0_SHUN_0_OPCODE_X0 = 11, | ||
468 | UN_0_SHUN_0_OPCODE_X1 = 11, | ||
469 | UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
470 | UN_0_SHUN_0_OPCODE_Y1 = 5, | ||
471 | WH64_UN_0_SHUN_0_OPCODE_X1 = 23, | ||
472 | XORI_IMM_0_OPCODE_X0 = 2, | ||
473 | XORI_IMM_0_OPCODE_X1 = 21, | ||
474 | XOR_SPECIAL_0_OPCODE_X0 = 94, | ||
475 | XOR_SPECIAL_0_OPCODE_X1 = 65, | ||
476 | XOR_SPECIAL_2_OPCODE_Y0 = 3, | ||
477 | XOR_SPECIAL_2_OPCODE_Y1 = 3 | ||
478 | }; | ||
479 | |||
480 | #endif /* !_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/opcode_constants_64.h b/arch/tile/include/asm/opcode_constants_64.h new file mode 100644 index 00000000000..227d033b180 --- /dev/null +++ b/arch/tile/include/asm/opcode_constants_64.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | |||
17 | |||
18 | #ifndef _TILE_OPCODE_CONSTANTS_H | ||
19 | #define _TILE_OPCODE_CONSTANTS_H | ||
20 | enum | ||
21 | { | ||
22 | ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, | ||
23 | ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, | ||
24 | ADDB_SPECIAL_0_OPCODE_X0 = 1, | ||
25 | ADDB_SPECIAL_0_OPCODE_X1 = 1, | ||
26 | ADDHS_SPECIAL_0_OPCODE_X0 = 99, | ||
27 | ADDHS_SPECIAL_0_OPCODE_X1 = 69, | ||
28 | ADDH_SPECIAL_0_OPCODE_X0 = 2, | ||
29 | ADDH_SPECIAL_0_OPCODE_X1 = 2, | ||
30 | ADDIB_IMM_0_OPCODE_X0 = 1, | ||
31 | ADDIB_IMM_0_OPCODE_X1 = 1, | ||
32 | ADDIH_IMM_0_OPCODE_X0 = 2, | ||
33 | ADDIH_IMM_0_OPCODE_X1 = 2, | ||
34 | ADDI_IMM_0_OPCODE_X0 = 3, | ||
35 | ADDI_IMM_0_OPCODE_X1 = 3, | ||
36 | ADDI_IMM_1_OPCODE_SN = 1, | ||
37 | ADDI_OPCODE_Y0 = 9, | ||
38 | ADDI_OPCODE_Y1 = 7, | ||
39 | ADDLIS_OPCODE_X0 = 1, | ||
40 | ADDLIS_OPCODE_X1 = 2, | ||
41 | ADDLI_OPCODE_X0 = 2, | ||
42 | ADDLI_OPCODE_X1 = 3, | ||
43 | ADDS_SPECIAL_0_OPCODE_X0 = 96, | ||
44 | ADDS_SPECIAL_0_OPCODE_X1 = 66, | ||
45 | ADD_SPECIAL_0_OPCODE_X0 = 3, | ||
46 | ADD_SPECIAL_0_OPCODE_X1 = 3, | ||
47 | ADD_SPECIAL_0_OPCODE_Y0 = 0, | ||
48 | ADD_SPECIAL_0_OPCODE_Y1 = 0, | ||
49 | ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, | ||
50 | ADIFFH_SPECIAL_0_OPCODE_X0 = 5, | ||
51 | ANDI_IMM_0_OPCODE_X0 = 1, | ||
52 | ANDI_IMM_0_OPCODE_X1 = 4, | ||
53 | ANDI_OPCODE_Y0 = 10, | ||
54 | ANDI_OPCODE_Y1 = 8, | ||
55 | AND_SPECIAL_0_OPCODE_X0 = 6, | ||
56 | AND_SPECIAL_0_OPCODE_X1 = 4, | ||
57 | AND_SPECIAL_2_OPCODE_Y0 = 0, | ||
58 | AND_SPECIAL_2_OPCODE_Y1 = 0, | ||
59 | AULI_OPCODE_X0 = 3, | ||
60 | AULI_OPCODE_X1 = 4, | ||
61 | AVGB_U_SPECIAL_0_OPCODE_X0 = 7, | ||
62 | AVGH_SPECIAL_0_OPCODE_X0 = 8, | ||
63 | BBNST_BRANCH_OPCODE_X1 = 15, | ||
64 | BBNS_BRANCH_OPCODE_X1 = 14, | ||
65 | BBNS_OPCODE_SN = 63, | ||
66 | BBST_BRANCH_OPCODE_X1 = 13, | ||
67 | BBS_BRANCH_OPCODE_X1 = 12, | ||
68 | BBS_OPCODE_SN = 62, | ||
69 | BGEZT_BRANCH_OPCODE_X1 = 7, | ||
70 | BGEZ_BRANCH_OPCODE_X1 = 6, | ||
71 | BGEZ_OPCODE_SN = 61, | ||
72 | BGZT_BRANCH_OPCODE_X1 = 5, | ||
73 | BGZ_BRANCH_OPCODE_X1 = 4, | ||
74 | BGZ_OPCODE_SN = 58, | ||
75 | BITX_UN_0_SHUN_0_OPCODE_X0 = 1, | ||
76 | BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, | ||
77 | BLEZT_BRANCH_OPCODE_X1 = 11, | ||
78 | BLEZ_BRANCH_OPCODE_X1 = 10, | ||
79 | BLEZ_OPCODE_SN = 59, | ||
80 | BLZT_BRANCH_OPCODE_X1 = 9, | ||
81 | BLZ_BRANCH_OPCODE_X1 = 8, | ||
82 | BLZ_OPCODE_SN = 60, | ||
83 | BNZT_BRANCH_OPCODE_X1 = 3, | ||
84 | BNZ_BRANCH_OPCODE_X1 = 2, | ||
85 | BNZ_OPCODE_SN = 57, | ||
86 | BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, | ||
87 | BRANCH_OPCODE_X1 = 5, | ||
88 | BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, | ||
89 | BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, | ||
90 | BZT_BRANCH_OPCODE_X1 = 1, | ||
91 | BZ_BRANCH_OPCODE_X1 = 0, | ||
92 | BZ_OPCODE_SN = 56, | ||
93 | CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, | ||
94 | CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, | ||
95 | CRC32_32_SPECIAL_0_OPCODE_X0 = 9, | ||
96 | CRC32_8_SPECIAL_0_OPCODE_X0 = 10, | ||
97 | CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, | ||
98 | CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, | ||
99 | DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, | ||
100 | DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, | ||
101 | DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, | ||
102 | FINV_UN_0_SHUN_0_OPCODE_X1 = 3, | ||
103 | FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, | ||
104 | FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, | ||
105 | FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, | ||
106 | FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, | ||
107 | FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
108 | FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, | ||
109 | HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
110 | ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, | ||
111 | ILL_UN_0_SHUN_0_OPCODE_X1 = 7, | ||
112 | ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, | ||
113 | IMM_0_OPCODE_SN = 0, | ||
114 | IMM_0_OPCODE_X0 = 4, | ||
115 | IMM_0_OPCODE_X1 = 6, | ||
116 | IMM_1_OPCODE_SN = 1, | ||
117 | IMM_OPCODE_0_X0 = 5, | ||
118 | INTHB_SPECIAL_0_OPCODE_X0 = 11, | ||
119 | INTHB_SPECIAL_0_OPCODE_X1 = 5, | ||
120 | INTHH_SPECIAL_0_OPCODE_X0 = 12, | ||
121 | INTHH_SPECIAL_0_OPCODE_X1 = 6, | ||
122 | INTLB_SPECIAL_0_OPCODE_X0 = 13, | ||
123 | INTLB_SPECIAL_0_OPCODE_X1 = 7, | ||
124 | INTLH_SPECIAL_0_OPCODE_X0 = 14, | ||
125 | INTLH_SPECIAL_0_OPCODE_X1 = 8, | ||
126 | INV_UN_0_SHUN_0_OPCODE_X1 = 8, | ||
127 | IRET_UN_0_SHUN_0_OPCODE_X1 = 9, | ||
128 | JALB_OPCODE_X1 = 13, | ||
129 | JALF_OPCODE_X1 = 12, | ||
130 | JALRP_SPECIAL_0_OPCODE_X1 = 9, | ||
131 | JALRR_IMM_1_OPCODE_SN = 3, | ||
132 | JALR_RR_IMM_0_OPCODE_SN = 5, | ||
133 | JALR_SPECIAL_0_OPCODE_X1 = 10, | ||
134 | JB_OPCODE_X1 = 11, | ||
135 | JF_OPCODE_X1 = 10, | ||
136 | JRP_SPECIAL_0_OPCODE_X1 = 11, | ||
137 | JRR_IMM_1_OPCODE_SN = 2, | ||
138 | JR_RR_IMM_0_OPCODE_SN = 4, | ||
139 | JR_SPECIAL_0_OPCODE_X1 = 12, | ||
140 | LBADD_IMM_0_OPCODE_X1 = 22, | ||
141 | LBADD_U_IMM_0_OPCODE_X1 = 23, | ||
142 | LB_OPCODE_Y2 = 0, | ||
143 | LB_UN_0_SHUN_0_OPCODE_X1 = 10, | ||
144 | LB_U_OPCODE_Y2 = 1, | ||
145 | LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, | ||
146 | LHADD_IMM_0_OPCODE_X1 = 24, | ||
147 | LHADD_U_IMM_0_OPCODE_X1 = 25, | ||
148 | LH_OPCODE_Y2 = 2, | ||
149 | LH_UN_0_SHUN_0_OPCODE_X1 = 12, | ||
150 | LH_U_OPCODE_Y2 = 3, | ||
151 | LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, | ||
152 | LNK_SPECIAL_0_OPCODE_X1 = 13, | ||
153 | LWADD_IMM_0_OPCODE_X1 = 26, | ||
154 | LWADD_NA_IMM_0_OPCODE_X1 = 27, | ||
155 | LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, | ||
156 | LW_OPCODE_Y2 = 4, | ||
157 | LW_UN_0_SHUN_0_OPCODE_X1 = 14, | ||
158 | MAXB_U_SPECIAL_0_OPCODE_X0 = 15, | ||
159 | MAXB_U_SPECIAL_0_OPCODE_X1 = 14, | ||
160 | MAXH_SPECIAL_0_OPCODE_X0 = 16, | ||
161 | MAXH_SPECIAL_0_OPCODE_X1 = 15, | ||
162 | MAXIB_U_IMM_0_OPCODE_X0 = 4, | ||
163 | MAXIB_U_IMM_0_OPCODE_X1 = 5, | ||
164 | MAXIH_IMM_0_OPCODE_X0 = 5, | ||
165 | MAXIH_IMM_0_OPCODE_X1 = 6, | ||
166 | MFSPR_IMM_0_OPCODE_X1 = 7, | ||
167 | MF_UN_0_SHUN_0_OPCODE_X1 = 15, | ||
168 | MINB_U_SPECIAL_0_OPCODE_X0 = 17, | ||
169 | MINB_U_SPECIAL_0_OPCODE_X1 = 16, | ||
170 | MINH_SPECIAL_0_OPCODE_X0 = 18, | ||
171 | MINH_SPECIAL_0_OPCODE_X1 = 17, | ||
172 | MINIB_U_IMM_0_OPCODE_X0 = 6, | ||
173 | MINIB_U_IMM_0_OPCODE_X1 = 8, | ||
174 | MINIH_IMM_0_OPCODE_X0 = 7, | ||
175 | MINIH_IMM_0_OPCODE_X1 = 9, | ||
176 | MM_OPCODE_X0 = 6, | ||
177 | MM_OPCODE_X1 = 7, | ||
178 | MNZB_SPECIAL_0_OPCODE_X0 = 19, | ||
179 | MNZB_SPECIAL_0_OPCODE_X1 = 18, | ||
180 | MNZH_SPECIAL_0_OPCODE_X0 = 20, | ||
181 | MNZH_SPECIAL_0_OPCODE_X1 = 19, | ||
182 | MNZ_SPECIAL_0_OPCODE_X0 = 21, | ||
183 | MNZ_SPECIAL_0_OPCODE_X1 = 20, | ||
184 | MNZ_SPECIAL_1_OPCODE_Y0 = 0, | ||
185 | MNZ_SPECIAL_1_OPCODE_Y1 = 1, | ||
186 | MOVEI_IMM_1_OPCODE_SN = 0, | ||
187 | MOVE_RR_IMM_0_OPCODE_SN = 8, | ||
188 | MTSPR_IMM_0_OPCODE_X1 = 10, | ||
189 | MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, | ||
190 | MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, | ||
191 | MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, | ||
192 | MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, | ||
193 | MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, | ||
194 | MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, | ||
195 | MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, | ||
196 | MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, | ||
197 | MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, | ||
198 | MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, | ||
199 | MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, | ||
200 | MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, | ||
201 | MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, | ||
202 | MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, | ||
203 | MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, | ||
204 | MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, | ||
205 | MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, | ||
206 | MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, | ||
207 | MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, | ||
208 | MULHL_US_SPECIAL_0_OPCODE_X0 = 36, | ||
209 | MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, | ||
210 | MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, | ||
211 | MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, | ||
212 | MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, | ||
213 | MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, | ||
214 | MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, | ||
215 | MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, | ||
216 | MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, | ||
217 | MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, | ||
218 | MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, | ||
219 | MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, | ||
220 | MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, | ||
221 | MVNZ_SPECIAL_0_OPCODE_X0 = 45, | ||
222 | MVNZ_SPECIAL_1_OPCODE_Y0 = 1, | ||
223 | MVZ_SPECIAL_0_OPCODE_X0 = 46, | ||
224 | MVZ_SPECIAL_1_OPCODE_Y0 = 2, | ||
225 | MZB_SPECIAL_0_OPCODE_X0 = 47, | ||
226 | MZB_SPECIAL_0_OPCODE_X1 = 21, | ||
227 | MZH_SPECIAL_0_OPCODE_X0 = 48, | ||
228 | MZH_SPECIAL_0_OPCODE_X1 = 22, | ||
229 | MZ_SPECIAL_0_OPCODE_X0 = 49, | ||
230 | MZ_SPECIAL_0_OPCODE_X1 = 23, | ||
231 | MZ_SPECIAL_1_OPCODE_Y0 = 3, | ||
232 | MZ_SPECIAL_1_OPCODE_Y1 = 2, | ||
233 | NAP_UN_0_SHUN_0_OPCODE_X1 = 16, | ||
234 | NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, | ||
235 | NOP_UN_0_SHUN_0_OPCODE_X0 = 6, | ||
236 | NOP_UN_0_SHUN_0_OPCODE_X1 = 17, | ||
237 | NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, | ||
238 | NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, | ||
239 | NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
240 | NOR_SPECIAL_0_OPCODE_X0 = 50, | ||
241 | NOR_SPECIAL_0_OPCODE_X1 = 24, | ||
242 | NOR_SPECIAL_2_OPCODE_Y0 = 1, | ||
243 | NOR_SPECIAL_2_OPCODE_Y1 = 1, | ||
244 | ORI_IMM_0_OPCODE_X0 = 8, | ||
245 | ORI_IMM_0_OPCODE_X1 = 11, | ||
246 | ORI_OPCODE_Y0 = 11, | ||
247 | ORI_OPCODE_Y1 = 9, | ||
248 | OR_SPECIAL_0_OPCODE_X0 = 51, | ||
249 | OR_SPECIAL_0_OPCODE_X1 = 25, | ||
250 | OR_SPECIAL_2_OPCODE_Y0 = 2, | ||
251 | OR_SPECIAL_2_OPCODE_Y1 = 2, | ||
252 | PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, | ||
253 | PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, | ||
254 | PACKHB_SPECIAL_0_OPCODE_X0 = 52, | ||
255 | PACKHB_SPECIAL_0_OPCODE_X1 = 26, | ||
256 | PACKHS_SPECIAL_0_OPCODE_X0 = 102, | ||
257 | PACKHS_SPECIAL_0_OPCODE_X1 = 72, | ||
258 | PACKLB_SPECIAL_0_OPCODE_X0 = 53, | ||
259 | PACKLB_SPECIAL_0_OPCODE_X1 = 27, | ||
260 | PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, | ||
261 | PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, | ||
262 | RLI_SHUN_0_OPCODE_X0 = 1, | ||
263 | RLI_SHUN_0_OPCODE_X1 = 1, | ||
264 | RLI_SHUN_0_OPCODE_Y0 = 1, | ||
265 | RLI_SHUN_0_OPCODE_Y1 = 1, | ||
266 | RL_SPECIAL_0_OPCODE_X0 = 54, | ||
267 | RL_SPECIAL_0_OPCODE_X1 = 28, | ||
268 | RL_SPECIAL_3_OPCODE_Y0 = 0, | ||
269 | RL_SPECIAL_3_OPCODE_Y1 = 0, | ||
270 | RR_IMM_0_OPCODE_SN = 0, | ||
271 | S1A_SPECIAL_0_OPCODE_X0 = 55, | ||
272 | S1A_SPECIAL_0_OPCODE_X1 = 29, | ||
273 | S1A_SPECIAL_0_OPCODE_Y0 = 1, | ||
274 | S1A_SPECIAL_0_OPCODE_Y1 = 1, | ||
275 | S2A_SPECIAL_0_OPCODE_X0 = 56, | ||
276 | S2A_SPECIAL_0_OPCODE_X1 = 30, | ||
277 | S2A_SPECIAL_0_OPCODE_Y0 = 2, | ||
278 | S2A_SPECIAL_0_OPCODE_Y1 = 2, | ||
279 | S3A_SPECIAL_0_OPCODE_X0 = 57, | ||
280 | S3A_SPECIAL_0_OPCODE_X1 = 31, | ||
281 | S3A_SPECIAL_5_OPCODE_Y0 = 1, | ||
282 | S3A_SPECIAL_5_OPCODE_Y1 = 1, | ||
283 | SADAB_U_SPECIAL_0_OPCODE_X0 = 58, | ||
284 | SADAH_SPECIAL_0_OPCODE_X0 = 59, | ||
285 | SADAH_U_SPECIAL_0_OPCODE_X0 = 60, | ||
286 | SADB_U_SPECIAL_0_OPCODE_X0 = 61, | ||
287 | SADH_SPECIAL_0_OPCODE_X0 = 62, | ||
288 | SADH_U_SPECIAL_0_OPCODE_X0 = 63, | ||
289 | SBADD_IMM_0_OPCODE_X1 = 28, | ||
290 | SB_OPCODE_Y2 = 5, | ||
291 | SB_SPECIAL_0_OPCODE_X1 = 32, | ||
292 | SEQB_SPECIAL_0_OPCODE_X0 = 64, | ||
293 | SEQB_SPECIAL_0_OPCODE_X1 = 33, | ||
294 | SEQH_SPECIAL_0_OPCODE_X0 = 65, | ||
295 | SEQH_SPECIAL_0_OPCODE_X1 = 34, | ||
296 | SEQIB_IMM_0_OPCODE_X0 = 9, | ||
297 | SEQIB_IMM_0_OPCODE_X1 = 12, | ||
298 | SEQIH_IMM_0_OPCODE_X0 = 10, | ||
299 | SEQIH_IMM_0_OPCODE_X1 = 13, | ||
300 | SEQI_IMM_0_OPCODE_X0 = 11, | ||
301 | SEQI_IMM_0_OPCODE_X1 = 14, | ||
302 | SEQI_OPCODE_Y0 = 12, | ||
303 | SEQI_OPCODE_Y1 = 10, | ||
304 | SEQ_SPECIAL_0_OPCODE_X0 = 66, | ||
305 | SEQ_SPECIAL_0_OPCODE_X1 = 35, | ||
306 | SEQ_SPECIAL_5_OPCODE_Y0 = 2, | ||
307 | SEQ_SPECIAL_5_OPCODE_Y1 = 2, | ||
308 | SHADD_IMM_0_OPCODE_X1 = 29, | ||
309 | SHL8II_IMM_0_OPCODE_SN = 3, | ||
310 | SHLB_SPECIAL_0_OPCODE_X0 = 67, | ||
311 | SHLB_SPECIAL_0_OPCODE_X1 = 36, | ||
312 | SHLH_SPECIAL_0_OPCODE_X0 = 68, | ||
313 | SHLH_SPECIAL_0_OPCODE_X1 = 37, | ||
314 | SHLIB_SHUN_0_OPCODE_X0 = 2, | ||
315 | SHLIB_SHUN_0_OPCODE_X1 = 2, | ||
316 | SHLIH_SHUN_0_OPCODE_X0 = 3, | ||
317 | SHLIH_SHUN_0_OPCODE_X1 = 3, | ||
318 | SHLI_SHUN_0_OPCODE_X0 = 4, | ||
319 | SHLI_SHUN_0_OPCODE_X1 = 4, | ||
320 | SHLI_SHUN_0_OPCODE_Y0 = 2, | ||
321 | SHLI_SHUN_0_OPCODE_Y1 = 2, | ||
322 | SHL_SPECIAL_0_OPCODE_X0 = 69, | ||
323 | SHL_SPECIAL_0_OPCODE_X1 = 38, | ||
324 | SHL_SPECIAL_3_OPCODE_Y0 = 1, | ||
325 | SHL_SPECIAL_3_OPCODE_Y1 = 1, | ||
326 | SHR1_RR_IMM_0_OPCODE_SN = 9, | ||
327 | SHRB_SPECIAL_0_OPCODE_X0 = 70, | ||
328 | SHRB_SPECIAL_0_OPCODE_X1 = 39, | ||
329 | SHRH_SPECIAL_0_OPCODE_X0 = 71, | ||
330 | SHRH_SPECIAL_0_OPCODE_X1 = 40, | ||
331 | SHRIB_SHUN_0_OPCODE_X0 = 5, | ||
332 | SHRIB_SHUN_0_OPCODE_X1 = 5, | ||
333 | SHRIH_SHUN_0_OPCODE_X0 = 6, | ||
334 | SHRIH_SHUN_0_OPCODE_X1 = 6, | ||
335 | SHRI_SHUN_0_OPCODE_X0 = 7, | ||
336 | SHRI_SHUN_0_OPCODE_X1 = 7, | ||
337 | SHRI_SHUN_0_OPCODE_Y0 = 3, | ||
338 | SHRI_SHUN_0_OPCODE_Y1 = 3, | ||
339 | SHR_SPECIAL_0_OPCODE_X0 = 72, | ||
340 | SHR_SPECIAL_0_OPCODE_X1 = 41, | ||
341 | SHR_SPECIAL_3_OPCODE_Y0 = 2, | ||
342 | SHR_SPECIAL_3_OPCODE_Y1 = 2, | ||
343 | SHUN_0_OPCODE_X0 = 7, | ||
344 | SHUN_0_OPCODE_X1 = 8, | ||
345 | SHUN_0_OPCODE_Y0 = 13, | ||
346 | SHUN_0_OPCODE_Y1 = 11, | ||
347 | SH_OPCODE_Y2 = 6, | ||
348 | SH_SPECIAL_0_OPCODE_X1 = 42, | ||
349 | SLTB_SPECIAL_0_OPCODE_X0 = 73, | ||
350 | SLTB_SPECIAL_0_OPCODE_X1 = 43, | ||
351 | SLTB_U_SPECIAL_0_OPCODE_X0 = 74, | ||
352 | SLTB_U_SPECIAL_0_OPCODE_X1 = 44, | ||
353 | SLTEB_SPECIAL_0_OPCODE_X0 = 75, | ||
354 | SLTEB_SPECIAL_0_OPCODE_X1 = 45, | ||
355 | SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, | ||
356 | SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, | ||
357 | SLTEH_SPECIAL_0_OPCODE_X0 = 77, | ||
358 | SLTEH_SPECIAL_0_OPCODE_X1 = 47, | ||
359 | SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, | ||
360 | SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, | ||
361 | SLTE_SPECIAL_0_OPCODE_X0 = 79, | ||
362 | SLTE_SPECIAL_0_OPCODE_X1 = 49, | ||
363 | SLTE_SPECIAL_4_OPCODE_Y0 = 0, | ||
364 | SLTE_SPECIAL_4_OPCODE_Y1 = 0, | ||
365 | SLTE_U_SPECIAL_0_OPCODE_X0 = 80, | ||
366 | SLTE_U_SPECIAL_0_OPCODE_X1 = 50, | ||
367 | SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, | ||
368 | SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, | ||
369 | SLTH_SPECIAL_0_OPCODE_X0 = 81, | ||
370 | SLTH_SPECIAL_0_OPCODE_X1 = 51, | ||
371 | SLTH_U_SPECIAL_0_OPCODE_X0 = 82, | ||
372 | SLTH_U_SPECIAL_0_OPCODE_X1 = 52, | ||
373 | SLTIB_IMM_0_OPCODE_X0 = 12, | ||
374 | SLTIB_IMM_0_OPCODE_X1 = 15, | ||
375 | SLTIB_U_IMM_0_OPCODE_X0 = 13, | ||
376 | SLTIB_U_IMM_0_OPCODE_X1 = 16, | ||
377 | SLTIH_IMM_0_OPCODE_X0 = 14, | ||
378 | SLTIH_IMM_0_OPCODE_X1 = 17, | ||
379 | SLTIH_U_IMM_0_OPCODE_X0 = 15, | ||
380 | SLTIH_U_IMM_0_OPCODE_X1 = 18, | ||
381 | SLTI_IMM_0_OPCODE_X0 = 16, | ||
382 | SLTI_IMM_0_OPCODE_X1 = 19, | ||
383 | SLTI_OPCODE_Y0 = 14, | ||
384 | SLTI_OPCODE_Y1 = 12, | ||
385 | SLTI_U_IMM_0_OPCODE_X0 = 17, | ||
386 | SLTI_U_IMM_0_OPCODE_X1 = 20, | ||
387 | SLTI_U_OPCODE_Y0 = 15, | ||
388 | SLTI_U_OPCODE_Y1 = 13, | ||
389 | SLT_SPECIAL_0_OPCODE_X0 = 83, | ||
390 | SLT_SPECIAL_0_OPCODE_X1 = 53, | ||
391 | SLT_SPECIAL_4_OPCODE_Y0 = 2, | ||
392 | SLT_SPECIAL_4_OPCODE_Y1 = 2, | ||
393 | SLT_U_SPECIAL_0_OPCODE_X0 = 84, | ||
394 | SLT_U_SPECIAL_0_OPCODE_X1 = 54, | ||
395 | SLT_U_SPECIAL_4_OPCODE_Y0 = 3, | ||
396 | SLT_U_SPECIAL_4_OPCODE_Y1 = 3, | ||
397 | SNEB_SPECIAL_0_OPCODE_X0 = 85, | ||
398 | SNEB_SPECIAL_0_OPCODE_X1 = 55, | ||
399 | SNEH_SPECIAL_0_OPCODE_X0 = 86, | ||
400 | SNEH_SPECIAL_0_OPCODE_X1 = 56, | ||
401 | SNE_SPECIAL_0_OPCODE_X0 = 87, | ||
402 | SNE_SPECIAL_0_OPCODE_X1 = 57, | ||
403 | SNE_SPECIAL_5_OPCODE_Y0 = 3, | ||
404 | SNE_SPECIAL_5_OPCODE_Y1 = 3, | ||
405 | SPECIAL_0_OPCODE_X0 = 0, | ||
406 | SPECIAL_0_OPCODE_X1 = 1, | ||
407 | SPECIAL_0_OPCODE_Y0 = 1, | ||
408 | SPECIAL_0_OPCODE_Y1 = 1, | ||
409 | SPECIAL_1_OPCODE_Y0 = 2, | ||
410 | SPECIAL_1_OPCODE_Y1 = 2, | ||
411 | SPECIAL_2_OPCODE_Y0 = 3, | ||
412 | SPECIAL_2_OPCODE_Y1 = 3, | ||
413 | SPECIAL_3_OPCODE_Y0 = 4, | ||
414 | SPECIAL_3_OPCODE_Y1 = 4, | ||
415 | SPECIAL_4_OPCODE_Y0 = 5, | ||
416 | SPECIAL_4_OPCODE_Y1 = 5, | ||
417 | SPECIAL_5_OPCODE_Y0 = 6, | ||
418 | SPECIAL_5_OPCODE_Y1 = 6, | ||
419 | SPECIAL_6_OPCODE_Y0 = 7, | ||
420 | SPECIAL_7_OPCODE_Y0 = 8, | ||
421 | SRAB_SPECIAL_0_OPCODE_X0 = 88, | ||
422 | SRAB_SPECIAL_0_OPCODE_X1 = 58, | ||
423 | SRAH_SPECIAL_0_OPCODE_X0 = 89, | ||
424 | SRAH_SPECIAL_0_OPCODE_X1 = 59, | ||
425 | SRAIB_SHUN_0_OPCODE_X0 = 8, | ||
426 | SRAIB_SHUN_0_OPCODE_X1 = 8, | ||
427 | SRAIH_SHUN_0_OPCODE_X0 = 9, | ||
428 | SRAIH_SHUN_0_OPCODE_X1 = 9, | ||
429 | SRAI_SHUN_0_OPCODE_X0 = 10, | ||
430 | SRAI_SHUN_0_OPCODE_X1 = 10, | ||
431 | SRAI_SHUN_0_OPCODE_Y0 = 4, | ||
432 | SRAI_SHUN_0_OPCODE_Y1 = 4, | ||
433 | SRA_SPECIAL_0_OPCODE_X0 = 90, | ||
434 | SRA_SPECIAL_0_OPCODE_X1 = 60, | ||
435 | SRA_SPECIAL_3_OPCODE_Y0 = 3, | ||
436 | SRA_SPECIAL_3_OPCODE_Y1 = 3, | ||
437 | SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, | ||
438 | SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, | ||
439 | SUBB_SPECIAL_0_OPCODE_X0 = 91, | ||
440 | SUBB_SPECIAL_0_OPCODE_X1 = 61, | ||
441 | SUBHS_SPECIAL_0_OPCODE_X0 = 101, | ||
442 | SUBHS_SPECIAL_0_OPCODE_X1 = 71, | ||
443 | SUBH_SPECIAL_0_OPCODE_X0 = 92, | ||
444 | SUBH_SPECIAL_0_OPCODE_X1 = 62, | ||
445 | SUBS_SPECIAL_0_OPCODE_X0 = 97, | ||
446 | SUBS_SPECIAL_0_OPCODE_X1 = 67, | ||
447 | SUB_SPECIAL_0_OPCODE_X0 = 93, | ||
448 | SUB_SPECIAL_0_OPCODE_X1 = 63, | ||
449 | SUB_SPECIAL_0_OPCODE_Y0 = 3, | ||
450 | SUB_SPECIAL_0_OPCODE_Y1 = 3, | ||
451 | SWADD_IMM_0_OPCODE_X1 = 30, | ||
452 | SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, | ||
453 | SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, | ||
454 | SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, | ||
455 | SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, | ||
456 | SW_OPCODE_Y2 = 7, | ||
457 | SW_SPECIAL_0_OPCODE_X1 = 64, | ||
458 | TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, | ||
459 | TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, | ||
460 | TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, | ||
461 | TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, | ||
462 | TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, | ||
463 | TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, | ||
464 | TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, | ||
465 | TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, | ||
466 | TNS_UN_0_SHUN_0_OPCODE_X1 = 22, | ||
467 | UN_0_SHUN_0_OPCODE_X0 = 11, | ||
468 | UN_0_SHUN_0_OPCODE_X1 = 11, | ||
469 | UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
470 | UN_0_SHUN_0_OPCODE_Y1 = 5, | ||
471 | WH64_UN_0_SHUN_0_OPCODE_X1 = 23, | ||
472 | XORI_IMM_0_OPCODE_X0 = 2, | ||
473 | XORI_IMM_0_OPCODE_X1 = 21, | ||
474 | XOR_SPECIAL_0_OPCODE_X0 = 94, | ||
475 | XOR_SPECIAL_0_OPCODE_X1 = 65, | ||
476 | XOR_SPECIAL_2_OPCODE_Y0 = 3, | ||
477 | XOR_SPECIAL_2_OPCODE_Y1 = 3 | ||
478 | }; | ||
479 | |||
480 | #endif /* !_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h new file mode 100644 index 00000000000..f894a9016da --- /dev/null +++ b/arch/tile/include/asm/page.h | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PAGE_H | ||
16 | #define _ASM_TILE_PAGE_H | ||
17 | |||
18 | #include <linux/const.h> | ||
19 | |||
20 | /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ | ||
21 | #define PAGE_SHIFT 16 | ||
22 | #define HPAGE_SHIFT 24 | ||
23 | |||
24 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | ||
25 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | ||
26 | |||
27 | #define PAGE_MASK (~(PAGE_SIZE - 1)) | ||
28 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
29 | |||
30 | #ifdef __KERNEL__ | ||
31 | |||
32 | #include <hv/hypervisor.h> | ||
33 | #include <arch/chip.h> | ||
34 | |||
35 | /* | ||
36 | * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx | ||
37 | * definitions in <hv/hypervisor.h>. We validate this at build time | ||
38 | * here, and again at runtime during early boot. We provide a | ||
39 | * separate definition since userspace doesn't have <hv/hypervisor.h>. | ||
40 | * | ||
41 | * Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since | ||
42 | * they are the same on i386 but not TILE. | ||
43 | */ | ||
44 | #if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT | ||
45 | # error Small page size mismatch in Linux | ||
46 | #endif | ||
47 | #if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT | ||
48 | # error Huge page size mismatch in Linux | ||
49 | #endif | ||
50 | |||
51 | #ifndef __ASSEMBLY__ | ||
52 | |||
53 | #include <linux/types.h> | ||
54 | #include <linux/string.h> | ||
55 | |||
56 | struct page; | ||
57 | |||
58 | static inline void clear_page(void *page) | ||
59 | { | ||
60 | memset(page, 0, PAGE_SIZE); | ||
61 | } | ||
62 | |||
63 | static inline void copy_page(void *to, void *from) | ||
64 | { | ||
65 | memcpy(to, from, PAGE_SIZE); | ||
66 | } | ||
67 | |||
68 | static inline void clear_user_page(void *page, unsigned long vaddr, | ||
69 | struct page *pg) | ||
70 | { | ||
71 | clear_page(page); | ||
72 | } | ||
73 | |||
74 | static inline void copy_user_page(void *to, void *from, unsigned long vaddr, | ||
75 | struct page *topage) | ||
76 | { | ||
77 | copy_page(to, from); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Hypervisor page tables are made of the same basic structure. | ||
82 | */ | ||
83 | |||
84 | typedef __u64 pteval_t; | ||
85 | typedef __u64 pmdval_t; | ||
86 | typedef __u64 pudval_t; | ||
87 | typedef __u64 pgdval_t; | ||
88 | typedef __u64 pgprotval_t; | ||
89 | |||
90 | typedef HV_PTE pte_t; | ||
91 | typedef HV_PTE pgd_t; | ||
92 | typedef HV_PTE pgprot_t; | ||
93 | |||
94 | /* | ||
95 | * User L2 page tables are managed as one L2 page table per page, | ||
96 | * because we use the page allocator for them. This keeps the allocation | ||
97 | * simple and makes it potentially useful to implement HIGHPTE at some point. | ||
98 | * However, it's also inefficient, since L2 page tables are much smaller | ||
99 | * than pages (currently 2KB vs 64KB). So we should revisit this. | ||
100 | */ | ||
101 | typedef struct page *pgtable_t; | ||
102 | |||
103 | /* Must be a macro since it is used to create constants. */ | ||
104 | #define __pgprot(val) hv_pte(val) | ||
105 | |||
106 | static inline u64 pgprot_val(pgprot_t pgprot) | ||
107 | { | ||
108 | return hv_pte_val(pgprot); | ||
109 | } | ||
110 | |||
111 | static inline u64 pte_val(pte_t pte) | ||
112 | { | ||
113 | return hv_pte_val(pte); | ||
114 | } | ||
115 | |||
116 | static inline u64 pgd_val(pgd_t pgd) | ||
117 | { | ||
118 | return hv_pte_val(pgd); | ||
119 | } | ||
120 | |||
121 | #ifdef __tilegx__ | ||
122 | |||
123 | typedef HV_PTE pmd_t; | ||
124 | |||
125 | static inline u64 pmd_val(pmd_t pmd) | ||
126 | { | ||
127 | return hv_pte_val(pmd); | ||
128 | } | ||
129 | |||
130 | #endif | ||
131 | |||
132 | #endif /* !__ASSEMBLY__ */ | ||
133 | |||
134 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
135 | |||
136 | #define HUGE_MAX_HSTATE 2 | ||
137 | |||
138 | #ifdef CONFIG_HUGETLB_PAGE | ||
139 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
140 | #endif | ||
141 | |||
142 | /* Each memory controller has PAs distinct in their high bits. */ | ||
143 | #define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS()) | ||
144 | #define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS()) | ||
145 | #define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT) | ||
146 | #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)) | ||
147 | |||
148 | #ifdef __tilegx__ | ||
149 | |||
150 | /* | ||
151 | * We reserve the lower half of memory for user-space programs, and the | ||
152 | * upper half for system code. We re-map all of physical memory in the | ||
153 | * upper half, which takes a quarter of our VA space. Then we have | ||
154 | * the vmalloc regions. The supervisor code lives at 0xfffffff700000000, | ||
155 | * with the hypervisor above that. | ||
156 | * | ||
157 | * Loadable kernel modules are placed immediately after the static | ||
158 | * supervisor code, with each being allocated a 256MB region of | ||
159 | * address space, so we don't have to worry about the range of "jal" | ||
160 | * and other branch instructions. | ||
161 | * | ||
162 | * For now we keep life simple and just allocate one pmd (4GB) for vmalloc. | ||
163 | * Similarly, for now we don't play any struct page mapping games. | ||
164 | */ | ||
165 | |||
166 | #if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH() | ||
167 | # error Too much PA to map with the VA available! | ||
168 | #endif | ||
169 | #define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1)) | ||
170 | |||
171 | #define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */ | ||
172 | #define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */ | ||
173 | #define PAGE_OFFSET MEM_HIGH_START | ||
174 | #define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */ | ||
175 | #define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */ | ||
176 | #define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */ | ||
177 | #define MEM_SV_INTRPT MEM_SV_START | ||
178 | #define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */ | ||
179 | #define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024)) | ||
180 | #define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */ | ||
181 | |||
182 | /* Highest DTLB address we will use */ | ||
183 | #define KERNEL_HIGH_VADDR MEM_SV_START | ||
184 | |||
185 | /* Since we don't currently provide any fixmaps, we use an impossible VA. */ | ||
186 | #define FIXADDR_TOP MEM_HV_START | ||
187 | |||
188 | #else /* !__tilegx__ */ | ||
189 | |||
190 | /* | ||
191 | * A PAGE_OFFSET of 0xC0000000 means that the kernel has | ||
192 | * a virtual address space of one gigabyte, which limits the | ||
193 | * amount of physical memory you can use to about 768MB. | ||
194 | * If you want more physical memory than this then see the CONFIG_HIGHMEM | ||
195 | * option in the kernel configuration. | ||
196 | * | ||
197 | * The top two 16MB chunks in the table below (VIRT and HV) are | ||
198 | * unavailable to Linux. Since the kernel interrupt vectors must live | ||
199 | * at 0xfd000000, we map all of the bottom of RAM at this address with | ||
200 | * a huge page table entry to minimize its ITLB footprint (as well as | ||
201 | * at PAGE_OFFSET). The last architected requirement is that user | ||
202 | * interrupt vectors live at 0xfc000000, so we make that range of | ||
203 | * memory available to user processes. The remaining regions are sized | ||
204 | * as shown; after the first four addresses, we show "typical" values, | ||
205 | * since the actual addresses depend on kernel #defines. | ||
206 | * | ||
207 | * MEM_VIRT_INTRPT 0xff000000 | ||
208 | * MEM_HV_INTRPT 0xfe000000 | ||
209 | * MEM_SV_INTRPT (kernel code) 0xfd000000 | ||
210 | * MEM_USER_INTRPT (user vector) 0xfc000000 | ||
211 | * FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR) | ||
212 | * PKMAP_BASE 0xf7000000 (via LAST_PKMAP) | ||
213 | * HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS) | ||
214 | * VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE) | ||
215 | * mapped LOWMEM 0xc0000000 | ||
216 | */ | ||
217 | |||
218 | #define MEM_USER_INTRPT _AC(0xfc000000, UL) | ||
219 | #define MEM_SV_INTRPT _AC(0xfd000000, UL) | ||
220 | #define MEM_HV_INTRPT _AC(0xfe000000, UL) | ||
221 | #define MEM_VIRT_INTRPT _AC(0xff000000, UL) | ||
222 | |||
223 | #define INTRPT_SIZE 0x4000 | ||
224 | |||
225 | /* Tolerate page size larger than the architecture interrupt region size. */ | ||
226 | #if PAGE_SIZE > INTRPT_SIZE | ||
227 | #undef INTRPT_SIZE | ||
228 | #define INTRPT_SIZE PAGE_SIZE | ||
229 | #endif | ||
230 | |||
231 | #define KERNEL_HIGH_VADDR MEM_USER_INTRPT | ||
232 | #define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE) | ||
233 | |||
234 | #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) | ||
235 | |||
236 | /* On 32-bit architectures we mix kernel modules in with other vmaps. */ | ||
237 | #define MEM_MODULE_START VMALLOC_START | ||
238 | #define MEM_MODULE_END VMALLOC_END | ||
239 | |||
240 | #endif /* __tilegx__ */ | ||
241 | |||
242 | #ifndef __ASSEMBLY__ | ||
243 | |||
244 | #ifdef CONFIG_HIGHMEM | ||
245 | |||
246 | /* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */ | ||
247 | extern unsigned long pbase_map[]; | ||
248 | extern void *vbase_map[]; | ||
249 | |||
250 | static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr) | ||
251 | { | ||
252 | unsigned long kaddr = (unsigned long)_kaddr; | ||
253 | return pbase_map[kaddr >> HPAGE_SHIFT] + | ||
254 | ((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT); | ||
255 | } | ||
256 | |||
257 | static inline void *pfn_to_kaddr(unsigned long pfn) | ||
258 | { | ||
259 | return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT); | ||
260 | } | ||
261 | |||
262 | static inline phys_addr_t virt_to_phys(const volatile void *kaddr) | ||
263 | { | ||
264 | unsigned long pfn = kaddr_to_pfn(kaddr); | ||
265 | return ((phys_addr_t)pfn << PAGE_SHIFT) + | ||
266 | ((unsigned long)kaddr & (PAGE_SIZE-1)); | ||
267 | } | ||
268 | |||
269 | static inline void *phys_to_virt(phys_addr_t paddr) | ||
270 | { | ||
271 | return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1)); | ||
272 | } | ||
273 | |||
274 | /* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */ | ||
275 | static inline int virt_addr_valid(const volatile void *kaddr) | ||
276 | { | ||
277 | extern void *high_memory; /* copied from <linux/mm.h> */ | ||
278 | return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory); | ||
279 | } | ||
280 | |||
281 | #else /* !CONFIG_HIGHMEM */ | ||
282 | |||
283 | static inline unsigned long kaddr_to_pfn(const volatile void *kaddr) | ||
284 | { | ||
285 | return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT; | ||
286 | } | ||
287 | |||
288 | static inline void *pfn_to_kaddr(unsigned long pfn) | ||
289 | { | ||
290 | return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET); | ||
291 | } | ||
292 | |||
293 | static inline phys_addr_t virt_to_phys(const volatile void *kaddr) | ||
294 | { | ||
295 | return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET); | ||
296 | } | ||
297 | |||
298 | static inline void *phys_to_virt(phys_addr_t paddr) | ||
299 | { | ||
300 | return (void *)((unsigned long)paddr + PAGE_OFFSET); | ||
301 | } | ||
302 | |||
303 | /* Check that the given address is within some mapped range of PAs. */ | ||
304 | #define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr)) | ||
305 | |||
306 | #endif /* !CONFIG_HIGHMEM */ | ||
307 | |||
308 | /* All callers are not consistent in how they call these functions. */ | ||
309 | #define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr)) | ||
310 | #define __va(paddr) phys_to_virt((phys_addr_t)(paddr)) | ||
311 | |||
312 | extern int devmem_is_allowed(unsigned long pagenr); | ||
313 | |||
314 | #ifdef CONFIG_FLATMEM | ||
315 | static inline int pfn_valid(unsigned long pfn) | ||
316 | { | ||
317 | return pfn < max_mapnr; | ||
318 | } | ||
319 | #endif | ||
320 | |||
321 | /* Provide as macros since these require some other headers included. */ | ||
322 | #define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT) | ||
323 | #define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr)) | ||
324 | #define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page)) | ||
325 | |||
326 | struct mm_struct; | ||
327 | extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); | ||
328 | |||
329 | #endif /* !__ASSEMBLY__ */ | ||
330 | |||
331 | #define VM_DATA_DEFAULT_FLAGS \ | ||
332 | (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
333 | |||
334 | #include <asm-generic/memory_model.h> | ||
335 | #include <asm-generic/getorder.h> | ||
336 | |||
337 | #endif /* __KERNEL__ */ | ||
338 | |||
339 | #endif /* _ASM_TILE_PAGE_H */ | ||
diff --git a/arch/tile/include/asm/param.h b/arch/tile/include/asm/param.h new file mode 100644 index 00000000000..965d4542797 --- /dev/null +++ b/arch/tile/include/asm/param.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/param.h> | |||
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h new file mode 100644 index 00000000000..e853b0e2793 --- /dev/null +++ b/arch/tile/include/asm/pci-bridge.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_BRIDGE_H | ||
16 | #define _ASM_TILE_PCI_BRIDGE_H | ||
17 | |||
18 | #include <linux/ioport.h> | ||
19 | #include <linux/pci.h> | ||
20 | |||
21 | struct device_node; | ||
22 | struct pci_controller; | ||
23 | |||
24 | /* | ||
25 | * pci_io_base returns the memory address at which you can access | ||
26 | * the I/O space for PCI bus number `bus' (or NULL on error). | ||
27 | */ | ||
28 | extern void __iomem *pci_bus_io_base(unsigned int bus); | ||
29 | extern unsigned long pci_bus_io_base_phys(unsigned int bus); | ||
30 | extern unsigned long pci_bus_mem_base_phys(unsigned int bus); | ||
31 | |||
32 | /* Allocate a new PCI host bridge structure */ | ||
33 | extern struct pci_controller *pcibios_alloc_controller(void); | ||
34 | |||
35 | /* Helper function for setting up resources */ | ||
36 | extern void pci_init_resource(struct resource *res, unsigned long start, | ||
37 | unsigned long end, int flags, char *name); | ||
38 | |||
39 | /* Get the PCI host controller for a bus */ | ||
40 | extern struct pci_controller *pci_bus_to_hose(int bus); | ||
41 | |||
42 | /* | ||
43 | * Structure of a PCI controller (host bridge) | ||
44 | */ | ||
45 | struct pci_controller { | ||
46 | int index; /* PCI domain number */ | ||
47 | struct pci_bus *root_bus; | ||
48 | |||
49 | int first_busno; | ||
50 | int last_busno; | ||
51 | |||
52 | int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ | ||
53 | int hv_mem_fd; /* fd to Hypervisor for MMIO operations */ | ||
54 | |||
55 | struct pci_ops *ops; | ||
56 | |||
57 | int irq_base; /* Base IRQ from the Hypervisor */ | ||
58 | int plx_gen1; /* flag for PLX Gen 1 configuration */ | ||
59 | |||
60 | /* Address ranges that are routed to this controller/bridge. */ | ||
61 | struct resource mem_resources[3]; | ||
62 | }; | ||
63 | |||
64 | static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) | ||
65 | { | ||
66 | return bus->sysdata; | ||
67 | } | ||
68 | |||
69 | extern void setup_indirect_pci_nomap(struct pci_controller *hose, | ||
70 | void __iomem *cfg_addr, void __iomem *cfg_data); | ||
71 | extern void setup_indirect_pci(struct pci_controller *hose, | ||
72 | u32 cfg_addr, u32 cfg_data); | ||
73 | extern void setup_grackle(struct pci_controller *hose); | ||
74 | |||
75 | extern unsigned char common_swizzle(struct pci_dev *, unsigned char *); | ||
76 | |||
77 | /* | ||
78 | * The following code swizzles for exactly one bridge. The routine | ||
79 | * common_swizzle below handles multiple bridges. But there are a | ||
80 | * some boards that don't follow the PCI spec's suggestion so we | ||
81 | * break this piece out separately. | ||
82 | */ | ||
83 | static inline unsigned char bridge_swizzle(unsigned char pin, | ||
84 | unsigned char idsel) | ||
85 | { | ||
86 | return (((pin-1) + idsel) % 4) + 1; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * The following macro is used to lookup irqs in a standard table | ||
91 | * format for those PPC systems that do not already have PCI | ||
92 | * interrupts properly routed. | ||
93 | */ | ||
94 | /* FIXME - double check this */ | ||
95 | #define PCI_IRQ_TABLE_LOOKUP ({ \ | ||
96 | long _ctl_ = -1; \ | ||
97 | if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \ | ||
98 | _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \ | ||
99 | _ctl_; \ | ||
100 | }) | ||
101 | |||
102 | /* | ||
103 | * Scan the buses below a given PCI host bridge and assign suitable | ||
104 | * resources to all devices found. | ||
105 | */ | ||
106 | extern int pciauto_bus_scan(struct pci_controller *, int); | ||
107 | |||
108 | #ifdef CONFIG_PCI | ||
109 | extern unsigned long pci_address_to_pio(phys_addr_t address); | ||
110 | #else | ||
111 | static inline unsigned long pci_address_to_pio(phys_addr_t address) | ||
112 | { | ||
113 | return (unsigned long)-1; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | #endif /* _ASM_TILE_PCI_BRIDGE_H */ | ||
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h new file mode 100644 index 00000000000..b0c15da2d5d --- /dev/null +++ b/arch/tile/include/asm/pci.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_H | ||
16 | #define _ASM_TILE_PCI_H | ||
17 | |||
18 | #include <asm/pci-bridge.h> | ||
19 | |||
20 | /* | ||
21 | * The hypervisor maps the entirety of CPA-space as bus addresses, so | ||
22 | * bus addresses are physical addresses. The networking and block | ||
23 | * device layers use this boolean for bounce buffer decisions. | ||
24 | */ | ||
25 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
26 | |||
27 | struct pci_controller *pci_bus_to_hose(int bus); | ||
28 | unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp); | ||
29 | int __init tile_pci_init(void); | ||
30 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr); | ||
31 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | ||
32 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); | ||
33 | |||
34 | int __devinit _tile_cfg_read(struct pci_controller *hose, | ||
35 | int bus, | ||
36 | int slot, | ||
37 | int function, | ||
38 | int offset, | ||
39 | int size, | ||
40 | u32 *val); | ||
41 | int __devinit _tile_cfg_write(struct pci_controller *hose, | ||
42 | int bus, | ||
43 | int slot, | ||
44 | int function, | ||
45 | int offset, | ||
46 | int size, | ||
47 | u32 val); | ||
48 | |||
49 | /* | ||
50 | * These are used to to config reads and writes in the early stages of | ||
51 | * setup before the driver infrastructure has been set up enough to be | ||
52 | * able to do config reads and writes. | ||
53 | */ | ||
54 | #define early_cfg_read(where, size, value) \ | ||
55 | _tile_cfg_read(controller, \ | ||
56 | current_bus, \ | ||
57 | pci_slot, \ | ||
58 | pci_fn, \ | ||
59 | where, \ | ||
60 | size, \ | ||
61 | value) | ||
62 | |||
63 | #define early_cfg_write(where, size, value) \ | ||
64 | _tile_cfg_write(controller, \ | ||
65 | current_bus, \ | ||
66 | pci_slot, \ | ||
67 | pci_fn, \ | ||
68 | where, \ | ||
69 | size, \ | ||
70 | value) | ||
71 | |||
72 | |||
73 | |||
74 | #define PCICFG_BYTE 1 | ||
75 | #define PCICFG_WORD 2 | ||
76 | #define PCICFG_DWORD 4 | ||
77 | |||
78 | #define TILE_NUM_PCIE 2 | ||
79 | |||
80 | #define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index) | ||
81 | |||
82 | /* | ||
83 | * This decides whether to display the domain number in /proc. | ||
84 | */ | ||
85 | static inline int pci_proc_domain(struct pci_bus *bus) | ||
86 | { | ||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * I/O space is currently not supported. | ||
92 | */ | ||
93 | |||
94 | #define TILE_PCIE_LOWER_IO 0x0 | ||
95 | #define TILE_PCIE_UPPER_IO 0x10000 | ||
96 | #define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF | ||
97 | |||
98 | #define _PAGE_NO_CACHE 0 | ||
99 | #define _PAGE_GUARDED 0 | ||
100 | |||
101 | |||
102 | #define pcibios_assign_all_busses() pci_assign_all_buses | ||
103 | extern int pci_assign_all_buses; | ||
104 | |||
105 | static inline void pcibios_set_master(struct pci_dev *dev) | ||
106 | { | ||
107 | /* No special bus mastering setup handling */ | ||
108 | } | ||
109 | |||
110 | #define PCIBIOS_MIN_MEM 0 | ||
111 | #define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO | ||
112 | |||
113 | /* | ||
114 | * This flag tells if the platform is TILEmpower that needs | ||
115 | * special configuration for the PLX switch chip. | ||
116 | */ | ||
117 | extern int blade_pci; | ||
118 | |||
119 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | ||
120 | #include <asm-generic/pci-dma-compat.h> | ||
121 | |||
122 | /* generic pci stuff */ | ||
123 | #include <asm-generic/pci.h> | ||
124 | |||
125 | /* Use any cpu for PCI. */ | ||
126 | #define cpumask_of_pcibus(bus) cpu_online_mask | ||
127 | |||
128 | #endif /* _ASM_TILE_PCI_H */ | ||
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h new file mode 100644 index 00000000000..63294f5a8ef --- /dev/null +++ b/arch/tile/include/asm/percpu.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PERCPU_H | ||
16 | #define _ASM_TILE_PERCPU_H | ||
17 | |||
18 | register unsigned long __my_cpu_offset __asm__("tp"); | ||
19 | #define __my_cpu_offset __my_cpu_offset | ||
20 | #define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) | ||
21 | |||
22 | #include <asm-generic/percpu.h> | ||
23 | |||
24 | #endif /* _ASM_TILE_PERCPU_H */ | ||
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h new file mode 100644 index 00000000000..cf52791a550 --- /dev/null +++ b/arch/tile/include/asm/pgalloc.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PGALLOC_H | ||
16 | #define _ASM_TILE_PGALLOC_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/mmzone.h> | ||
21 | #include <asm/fixmap.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* Bits for the size of the second-level page table. */ | ||
25 | #define L2_KERNEL_PGTABLE_SHIFT \ | ||
26 | (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) | ||
27 | |||
28 | /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ | ||
29 | #if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL | ||
30 | #define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL | ||
31 | #else | ||
32 | #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT | ||
33 | #endif | ||
34 | |||
35 | /* How many pages do we need, as an "order", for a user L2 page table? */ | ||
36 | #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) | ||
37 | |||
38 | /* How big is a kernel L2 page table? */ | ||
39 | #define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT) | ||
40 | |||
41 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | ||
42 | { | ||
43 | #ifdef CONFIG_64BIT | ||
44 | set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER); | ||
45 | #else | ||
46 | set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER); | ||
47 | #endif | ||
48 | } | ||
49 | |||
50 | static inline void pmd_populate_kernel(struct mm_struct *mm, | ||
51 | pmd_t *pmd, pte_t *ptep) | ||
52 | { | ||
53 | set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, | ||
54 | __pgprot(_PAGE_PRESENT))); | ||
55 | } | ||
56 | |||
57 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | ||
58 | pgtable_t page) | ||
59 | { | ||
60 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), | ||
61 | __pgprot(_PAGE_PRESENT))); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Allocate and free page tables. | ||
66 | */ | ||
67 | |||
68 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | ||
69 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | ||
70 | |||
71 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); | ||
72 | extern void pte_free(struct mm_struct *mm, struct page *pte); | ||
73 | |||
74 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
75 | |||
76 | static inline pte_t * | ||
77 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
78 | { | ||
79 | return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address))); | ||
80 | } | ||
81 | |||
82 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
83 | { | ||
84 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | ||
85 | pte_free(mm, virt_to_page(pte)); | ||
86 | } | ||
87 | |||
88 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
89 | unsigned long address); | ||
90 | |||
91 | #define check_pgt_cache() do { } while (0) | ||
92 | |||
93 | /* | ||
94 | * Get the small-page pte_t lowmem entry for a given pfn. | ||
95 | * This may or may not be in use, depending on whether the initial | ||
96 | * huge-page entry for the page has already been shattered. | ||
97 | */ | ||
98 | pte_t *get_prealloc_pte(unsigned long pfn); | ||
99 | |||
100 | /* During init, we can shatter kernel huge pages if needed. */ | ||
101 | void shatter_pmd(pmd_t *pmd); | ||
102 | |||
103 | #ifdef __tilegx__ | ||
104 | /* We share a single page allocator for both L1 and L2 page tables. */ | ||
105 | #if HV_L1_SIZE != HV_L2_SIZE | ||
106 | # error Rework assumption that L1 and L2 page tables are same size. | ||
107 | #endif | ||
108 | #define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER | ||
109 | #define pud_populate(mm, pud, pmd) \ | ||
110 | pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) | ||
111 | #define pmd_alloc_one(mm, addr) \ | ||
112 | ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) | ||
113 | #define pmd_free(mm, pmdp) \ | ||
114 | pte_free((mm), virt_to_page(pmdp)) | ||
115 | #define __pmd_free_tlb(tlb, pmdp, address) \ | ||
116 | __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) | ||
117 | #endif | ||
118 | |||
119 | #endif /* _ASM_TILE_PGALLOC_H */ | ||
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h new file mode 100644 index 00000000000..b3367379d53 --- /dev/null +++ b/arch/tile/include/asm/pgtable.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file contains the functions and defines necessary to modify and use | ||
15 | * the TILE page table tree. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_PGTABLE_H | ||
19 | #define _ASM_TILE_PGTABLE_H | ||
20 | |||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | ||
24 | |||
25 | #include <linux/bitops.h> | ||
26 | #include <linux/threads.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <asm/processor.h> | ||
31 | #include <asm/fixmap.h> | ||
32 | #include <asm/system.h> | ||
33 | |||
34 | struct mm_struct; | ||
35 | struct vm_area_struct; | ||
36 | |||
37 | /* | ||
38 | * ZERO_PAGE is a global shared page that is always zero: used | ||
39 | * for zero-mapped memory areas etc.. | ||
40 | */ | ||
41 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
42 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
43 | |||
44 | extern pgd_t swapper_pg_dir[]; | ||
45 | extern pgprot_t swapper_pgprot; | ||
46 | extern struct kmem_cache *pgd_cache; | ||
47 | extern spinlock_t pgd_lock; | ||
48 | extern struct list_head pgd_list; | ||
49 | |||
50 | /* | ||
51 | * The very last slots in the pgd_t are for addresses unusable by Linux | ||
52 | * (pgd_addr_invalid() returns true). So we use them for the list structure. | ||
53 | * The x86 code we are modelled on uses the page->private/index fields | ||
54 | * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since | ||
55 | * our pgds are so much smaller than a page, it seems a waste to | ||
56 | * spend a whole page on each pgd. | ||
57 | */ | ||
58 | #define PGD_LIST_OFFSET \ | ||
59 | ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head)) | ||
60 | #define pgd_to_list(pgd) \ | ||
61 | ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET)) | ||
62 | #define list_to_pgd(list) \ | ||
63 | ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET)) | ||
64 | |||
65 | extern void pgtable_cache_init(void); | ||
66 | extern void paging_init(void); | ||
67 | extern void set_page_homes(void); | ||
68 | |||
69 | #define FIRST_USER_ADDRESS 0 | ||
70 | |||
71 | #define _PAGE_PRESENT HV_PTE_PRESENT | ||
72 | #define _PAGE_HUGE_PAGE HV_PTE_PAGE | ||
73 | #define _PAGE_READABLE HV_PTE_READABLE | ||
74 | #define _PAGE_WRITABLE HV_PTE_WRITABLE | ||
75 | #define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE | ||
76 | #define _PAGE_ACCESSED HV_PTE_ACCESSED | ||
77 | #define _PAGE_DIRTY HV_PTE_DIRTY | ||
78 | #define _PAGE_GLOBAL HV_PTE_GLOBAL | ||
79 | #define _PAGE_USER HV_PTE_USER | ||
80 | |||
81 | /* | ||
82 | * All the "standard" bits. Cache-control bits are managed elsewhere. | ||
83 | * This is used to test for valid level-2 page table pointers by checking | ||
84 | * all the bits, and to mask away the cache control bits for mprotect. | ||
85 | */ | ||
86 | #define _PAGE_ALL (\ | ||
87 | _PAGE_PRESENT | \ | ||
88 | _PAGE_HUGE_PAGE | \ | ||
89 | _PAGE_READABLE | \ | ||
90 | _PAGE_WRITABLE | \ | ||
91 | _PAGE_EXECUTABLE | \ | ||
92 | _PAGE_ACCESSED | \ | ||
93 | _PAGE_DIRTY | \ | ||
94 | _PAGE_GLOBAL | \ | ||
95 | _PAGE_USER \ | ||
96 | ) | ||
97 | |||
98 | #define PAGE_NONE \ | ||
99 | __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | ||
100 | #define PAGE_SHARED \ | ||
101 | __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | ||
102 | _PAGE_USER | _PAGE_ACCESSED) | ||
103 | |||
104 | #define PAGE_SHARED_EXEC \ | ||
105 | __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | ||
106 | _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED) | ||
107 | #define PAGE_COPY_NOEXEC \ | ||
108 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | ||
109 | #define PAGE_COPY_EXEC \ | ||
110 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | ||
111 | _PAGE_READABLE | _PAGE_EXECUTABLE) | ||
112 | #define PAGE_COPY \ | ||
113 | PAGE_COPY_NOEXEC | ||
114 | #define PAGE_READONLY \ | ||
115 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | ||
116 | #define PAGE_READONLY_EXEC \ | ||
117 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | ||
118 | _PAGE_READABLE | _PAGE_EXECUTABLE) | ||
119 | |||
120 | #define _PAGE_KERNEL_RO \ | ||
121 | (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED) | ||
122 | #define _PAGE_KERNEL \ | ||
123 | (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY) | ||
124 | #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE) | ||
125 | |||
126 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | ||
127 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) | ||
128 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) | ||
129 | |||
130 | #define page_to_kpgprot(p) PAGE_KERNEL | ||
131 | |||
132 | /* | ||
133 | * We could tighten these up, but for now writable or executable | ||
134 | * implies readable. | ||
135 | */ | ||
136 | #define __P000 PAGE_NONE | ||
137 | #define __P001 PAGE_READONLY | ||
138 | #define __P010 PAGE_COPY /* this is write-only, which we won't support */ | ||
139 | #define __P011 PAGE_COPY | ||
140 | #define __P100 PAGE_READONLY_EXEC | ||
141 | #define __P101 PAGE_READONLY_EXEC | ||
142 | #define __P110 PAGE_COPY_EXEC | ||
143 | #define __P111 PAGE_COPY_EXEC | ||
144 | |||
145 | #define __S000 PAGE_NONE | ||
146 | #define __S001 PAGE_READONLY | ||
147 | #define __S010 PAGE_SHARED | ||
148 | #define __S011 PAGE_SHARED | ||
149 | #define __S100 PAGE_READONLY_EXEC | ||
150 | #define __S101 PAGE_READONLY_EXEC | ||
151 | #define __S110 PAGE_SHARED_EXEC | ||
152 | #define __S111 PAGE_SHARED_EXEC | ||
153 | |||
154 | /* | ||
155 | * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT | ||
156 | * and PAGE_HUGE_PAGE, which must be one and zero, respectively. | ||
157 | * We set the ignored bits to zero. | ||
158 | */ | ||
159 | #define _PAGE_TABLE _PAGE_PRESENT | ||
160 | |||
161 | /* Inherit the caching flags from the old protection bits. */ | ||
162 | #define pgprot_modify(oldprot, newprot) \ | ||
163 | (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } | ||
164 | |||
165 | /* Just setting the PFN to zero suffices. */ | ||
166 | #define pte_pgprot(x) hv_pte_set_pfn((x), 0) | ||
167 | |||
168 | /* | ||
169 | * For PTEs and PDEs, we must clear the Present bit first when | ||
170 | * clearing a page table entry, so clear the bottom half first and | ||
171 | * enforce ordering with a barrier. | ||
172 | */ | ||
173 | static inline void __pte_clear(pte_t *ptep) | ||
174 | { | ||
175 | #ifdef __tilegx__ | ||
176 | ptep->val = 0; | ||
177 | #else | ||
178 | u32 *tmp = (u32 *)ptep; | ||
179 | tmp[0] = 0; | ||
180 | barrier(); | ||
181 | tmp[1] = 0; | ||
182 | #endif | ||
183 | } | ||
184 | #define pte_clear(mm, addr, ptep) __pte_clear(ptep) | ||
185 | |||
186 | /* | ||
187 | * The following only work if pte_present() is true. | ||
188 | * Undefined behaviour if not.. | ||
189 | */ | ||
190 | #define pte_present hv_pte_get_present | ||
191 | #define pte_user hv_pte_get_user | ||
192 | #define pte_read hv_pte_get_readable | ||
193 | #define pte_dirty hv_pte_get_dirty | ||
194 | #define pte_young hv_pte_get_accessed | ||
195 | #define pte_write hv_pte_get_writable | ||
196 | #define pte_exec hv_pte_get_executable | ||
197 | #define pte_huge hv_pte_get_page | ||
198 | #define pte_rdprotect hv_pte_clear_readable | ||
199 | #define pte_exprotect hv_pte_clear_executable | ||
200 | #define pte_mkclean hv_pte_clear_dirty | ||
201 | #define pte_mkold hv_pte_clear_accessed | ||
202 | #define pte_wrprotect hv_pte_clear_writable | ||
203 | #define pte_mksmall hv_pte_clear_page | ||
204 | #define pte_mkread hv_pte_set_readable | ||
205 | #define pte_mkexec hv_pte_set_executable | ||
206 | #define pte_mkdirty hv_pte_set_dirty | ||
207 | #define pte_mkyoung hv_pte_set_accessed | ||
208 | #define pte_mkwrite hv_pte_set_writable | ||
209 | #define pte_mkhuge hv_pte_set_page | ||
210 | |||
211 | #define pte_special(pte) 0 | ||
212 | #define pte_mkspecial(pte) (pte) | ||
213 | |||
214 | /* | ||
215 | * Use some spare bits in the PTE for user-caching tags. | ||
216 | */ | ||
217 | #define pte_set_forcecache hv_pte_set_client0 | ||
218 | #define pte_get_forcecache hv_pte_get_client0 | ||
219 | #define pte_clear_forcecache hv_pte_clear_client0 | ||
220 | #define pte_set_anyhome hv_pte_set_client1 | ||
221 | #define pte_get_anyhome hv_pte_get_client1 | ||
222 | #define pte_clear_anyhome hv_pte_clear_client1 | ||
223 | |||
224 | /* | ||
225 | * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved. | ||
226 | */ | ||
227 | #define pte_migrating hv_pte_get_migrating | ||
228 | #define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x)) | ||
229 | #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) | ||
230 | |||
231 | #define pte_ERROR(e) \ | ||
232 | pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) | ||
233 | #define pgd_ERROR(e) \ | ||
234 | pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
235 | |||
236 | /* | ||
237 | * set_pte_order() sets the given PTE and also sanity-checks the | ||
238 | * requested PTE against the page homecaching. Unspecified parts | ||
239 | * of the PTE are filled in when it is written to memory, i.e. all | ||
240 | * caching attributes if "!forcecache", or the home cpu if "anyhome". | ||
241 | */ | ||
242 | extern void set_pte_order(pte_t *ptep, pte_t pte, int order); | ||
243 | |||
244 | #define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0) | ||
245 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
246 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) | ||
247 | |||
248 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
249 | |||
250 | static inline int pte_none(pte_t pte) | ||
251 | { | ||
252 | return !pte.val; | ||
253 | } | ||
254 | |||
255 | static inline unsigned long pte_pfn(pte_t pte) | ||
256 | { | ||
257 | return hv_pte_get_pfn(pte); | ||
258 | } | ||
259 | |||
260 | /* Set or get the remote cache cpu in a pgprot with remote caching. */ | ||
261 | extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu); | ||
262 | extern int get_remote_cache_cpu(pgprot_t prot); | ||
263 | |||
264 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | ||
265 | { | ||
266 | return hv_pte_set_pfn(prot, pfn); | ||
267 | } | ||
268 | |||
269 | /* Support for priority mappings. */ | ||
270 | extern void start_mm_caching(struct mm_struct *mm); | ||
271 | extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next); | ||
272 | |||
273 | /* | ||
274 | * Support non-linear file mappings (see sys_remap_file_pages). | ||
275 | * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the | ||
276 | * file offset in the 32 high bits. | ||
277 | */ | ||
278 | #define _PAGE_FILE HV_PTE_CLIENT1 | ||
279 | #define PTE_FILE_MAX_BITS 32 | ||
280 | #define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte)) | ||
281 | #define pte_to_pgoff(pte) ((pte).val >> 32) | ||
282 | #define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE }) | ||
283 | |||
284 | /* | ||
285 | * Encode and de-code a swap entry (see <linux/swapops.h>). | ||
286 | * We put the swap file type+offset in the 32 high bits; | ||
287 | * I believe we can just leave the low bits clear. | ||
288 | */ | ||
289 | #define __swp_type(swp) ((swp).val & 0x1f) | ||
290 | #define __swp_offset(swp) ((swp).val >> 5) | ||
291 | #define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) }) | ||
292 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 }) | ||
293 | #define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) }) | ||
294 | |||
295 | /* | ||
296 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | ||
297 | * | ||
298 | * dst - pointer to pgd range anwhere on a pgd page | ||
299 | * src - "" | ||
300 | * count - the number of pgds to copy. | ||
301 | * | ||
302 | * dst and src can be on the same page, but the range must not overlap, | ||
303 | * and must not cross a page boundary. | ||
304 | */ | ||
305 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | ||
306 | { | ||
307 | memcpy(dst, src, count * sizeof(pgd_t)); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Conversion functions: convert a page and protection to a page entry, | ||
312 | * and a page entry and page directory to the page they refer to. | ||
313 | */ | ||
314 | |||
315 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
316 | |||
317 | /* | ||
318 | * If we are doing an mprotect(), just accept the new vma->vm_page_prot | ||
319 | * value and combine it with the PFN from the old PTE to get a new PTE. | ||
320 | */ | ||
321 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
322 | { | ||
323 | return pfn_pte(hv_pte_get_pfn(pte), newprot); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | ||
328 | * | ||
329 | * This macro returns the index of the entry in the pgd page which would | ||
330 | * control the given virtual address. | ||
331 | */ | ||
332 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | ||
333 | |||
334 | /* | ||
335 | * pgd_offset() returns a (pgd_t *) | ||
336 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's. | ||
337 | */ | ||
338 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
339 | |||
340 | /* | ||
341 | * A shortcut which implies the use of the kernel's pgd, instead | ||
342 | * of a process's. | ||
343 | */ | ||
344 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
345 | |||
346 | #if defined(CONFIG_HIGHPTE) | ||
347 | extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); | ||
348 | #define pte_offset_map(dir, address) \ | ||
349 | _pte_offset_map(dir, address, KM_PTE0) | ||
350 | #define pte_offset_map_nested(dir, address) \ | ||
351 | _pte_offset_map(dir, address, KM_PTE1) | ||
352 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | ||
353 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | ||
354 | #else | ||
355 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | ||
356 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | ||
357 | #define pte_unmap(pte) do { } while (0) | ||
358 | #define pte_unmap_nested(pte) do { } while (0) | ||
359 | #endif | ||
360 | |||
361 | /* Clear a non-executable kernel PTE and flush it from the TLB. */ | ||
362 | #define kpte_clear_flush(ptep, vaddr) \ | ||
363 | do { \ | ||
364 | pte_clear(&init_mm, (vaddr), (ptep)); \ | ||
365 | local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \ | ||
366 | } while (0) | ||
367 | |||
368 | /* | ||
369 | * The kernel page tables contain what we need, and we flush when we | ||
370 | * change specific page table entries. | ||
371 | */ | ||
372 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
373 | |||
374 | #ifdef CONFIG_FLATMEM | ||
375 | #define kern_addr_valid(addr) (1) | ||
376 | #endif /* CONFIG_FLATMEM */ | ||
377 | |||
378 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
379 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
380 | |||
381 | extern void vmalloc_sync_all(void); | ||
382 | |||
383 | #endif /* !__ASSEMBLY__ */ | ||
384 | |||
385 | #ifdef __tilegx__ | ||
386 | #include <asm/pgtable_64.h> | ||
387 | #else | ||
388 | #include <asm/pgtable_32.h> | ||
389 | #endif | ||
390 | |||
391 | #ifndef __ASSEMBLY__ | ||
392 | |||
393 | static inline int pmd_none(pmd_t pmd) | ||
394 | { | ||
395 | /* | ||
396 | * Only check low word on 32-bit platforms, since it might be | ||
397 | * out of sync with upper half. | ||
398 | */ | ||
399 | return (unsigned long)pmd_val(pmd) == 0; | ||
400 | } | ||
401 | |||
402 | static inline int pmd_present(pmd_t pmd) | ||
403 | { | ||
404 | return pmd_val(pmd) & _PAGE_PRESENT; | ||
405 | } | ||
406 | |||
407 | static inline int pmd_bad(pmd_t pmd) | ||
408 | { | ||
409 | return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE); | ||
410 | } | ||
411 | |||
412 | static inline unsigned long pages_to_mb(unsigned long npg) | ||
413 | { | ||
414 | return npg >> (20 - PAGE_SHIFT); | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD] | ||
419 | * | ||
420 | * This function returns the index of the entry in the pmd which would | ||
421 | * control the given virtual address. | ||
422 | */ | ||
423 | static inline unsigned long pmd_index(unsigned long address) | ||
424 | { | ||
425 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * A given kernel pmd_t maps to a specific virtual address (either a | ||
430 | * kernel huge page or a kernel pte_t table). Since kernel pte_t | ||
431 | * tables can be aligned at sub-page granularity, this function can | ||
432 | * return non-page-aligned pointers, despite its name. | ||
433 | */ | ||
434 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | ||
435 | { | ||
436 | phys_addr_t pa = | ||
437 | (phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN; | ||
438 | return (unsigned long)__va(pa); | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * A pmd_t points to the base of a huge page or to a pte_t array. | ||
443 | * If a pte_t array, since we can have multiple per page, we don't | ||
444 | * have a one-to-one mapping of pmd_t's to pages. However, this is | ||
445 | * OK for pte_lockptr(), since we just end up with potentially one | ||
446 | * lock being used for several pte_t arrays. | ||
447 | */ | ||
448 | #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) | ||
449 | |||
450 | /* | ||
451 | * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | ||
452 | * | ||
453 | * This macro returns the index of the entry in the pte page which would | ||
454 | * control the given virtual address. | ||
455 | */ | ||
456 | static inline unsigned long pte_index(unsigned long address) | ||
457 | { | ||
458 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | ||
459 | } | ||
460 | |||
461 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) | ||
462 | { | ||
463 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | ||
464 | } | ||
465 | |||
466 | static inline int pmd_huge_page(pmd_t pmd) | ||
467 | { | ||
468 | return pmd_val(pmd) & _PAGE_HUGE_PAGE; | ||
469 | } | ||
470 | |||
471 | #include <asm-generic/pgtable.h> | ||
472 | |||
473 | /* Support /proc/NN/pgtable API. */ | ||
474 | struct seq_file; | ||
475 | int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm, | ||
476 | unsigned long vaddr, pte_t *ptep, void **datap); | ||
477 | |||
478 | #endif /* !__ASSEMBLY__ */ | ||
479 | |||
480 | #endif /* _ASM_TILE_PGTABLE_H */ | ||
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h new file mode 100644 index 00000000000..53ec3488474 --- /dev/null +++ b/arch/tile/include/asm/pgtable_32.h | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_PGTABLE_32_H | ||
17 | #define _ASM_TILE_PGTABLE_32_H | ||
18 | |||
19 | /* | ||
20 | * The level-1 index is defined by the huge page size. A PGD is composed | ||
21 | * of PTRS_PER_PGD pgd_t's and is the top level of the page table. | ||
22 | */ | ||
23 | #define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE | ||
24 | #define PGDIR_SIZE HV_PAGE_SIZE_LARGE | ||
25 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
26 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
27 | |||
28 | /* | ||
29 | * The level-2 index is defined by the difference between the huge | ||
30 | * page size and the normal page size. A PTE is composed of | ||
31 | * PTRS_PER_PTE pte_t's and is the bottom level of the page table. | ||
32 | * Note that the hypervisor docs use PTE for what we call pte_t, so | ||
33 | * this nomenclature is somewhat confusing. | ||
34 | */ | ||
35 | #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) | ||
36 | |||
37 | #ifndef __ASSEMBLY__ | ||
38 | |||
39 | /* | ||
40 | * Right now we initialize only a single pte table. It can be extended | ||
41 | * easily, subsequent pte tables have to be allocated in one physical | ||
42 | * chunk of RAM. | ||
43 | * | ||
44 | * HOWEVER, if we are using an allocation scheme with slop after the | ||
45 | * end of the page table (e.g. where our L2 page tables are 2KB but | ||
46 | * our pages are 64KB and we are allocating via the page allocator) | ||
47 | * we can't extend it easily. | ||
48 | */ | ||
49 | #define LAST_PKMAP PTRS_PER_PTE | ||
50 | |||
51 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK) | ||
52 | |||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | # define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1)) | ||
55 | #else | ||
56 | # define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1)) | ||
57 | #endif | ||
58 | |||
59 | #ifdef CONFIG_HUGEVMAP | ||
60 | #define HUGE_VMAP_END __VMAPPING_END | ||
61 | #define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE) | ||
62 | #define _VMALLOC_END HUGE_VMAP_BASE | ||
63 | #else | ||
64 | #define _VMALLOC_END __VMAPPING_END | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Align the vmalloc area to an L2 page table, and leave a guard page | ||
69 | * at the beginning and end. The vmalloc code also puts in an internal | ||
70 | * guard page between each allocation. | ||
71 | */ | ||
72 | #define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) | ||
73 | extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; | ||
74 | #define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE) | ||
75 | #define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) | ||
76 | |||
77 | /* This is the maximum possible amount of lowmem. */ | ||
78 | #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) | ||
79 | |||
80 | /* We have no pmd or pud since we are strictly a two-level page table */ | ||
81 | #include <asm-generic/pgtable-nopmd.h> | ||
82 | |||
83 | /* We don't define any pgds for these addresses. */ | ||
84 | static inline int pgd_addr_invalid(unsigned long addr) | ||
85 | { | ||
86 | return addr >= MEM_HV_INTRPT; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Provide versions of these routines that can be used safely when | ||
91 | * the hypervisor may be asynchronously modifying dirty/accessed bits. | ||
92 | * ptep_get_and_clear() matches the generic one but we provide it to | ||
93 | * be parallel with the 64-bit code. | ||
94 | */ | ||
95 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
96 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
97 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
98 | |||
99 | extern int ptep_test_and_clear_young(struct vm_area_struct *, | ||
100 | unsigned long addr, pte_t *); | ||
101 | extern void ptep_set_wrprotect(struct mm_struct *, | ||
102 | unsigned long addr, pte_t *); | ||
103 | |||
104 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
105 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | ||
106 | unsigned long addr, pte_t *ptep) | ||
107 | { | ||
108 | pte_t pte = *ptep; | ||
109 | pte_clear(_mm, addr, ptep); | ||
110 | return pte; | ||
111 | } | ||
112 | |||
113 | /* Create a pmd from a PTFN. */ | ||
114 | static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) | ||
115 | { | ||
116 | return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } }; | ||
117 | } | ||
118 | |||
119 | /* Return the page-table frame number (ptfn) that a pmd_t points at. */ | ||
120 | #define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd) | ||
121 | |||
122 | static inline void pmd_clear(pmd_t *pmdp) | ||
123 | { | ||
124 | __pte_clear(&pmdp->pud.pgd); | ||
125 | } | ||
126 | |||
127 | #endif /* __ASSEMBLY__ */ | ||
128 | |||
129 | #endif /* _ASM_TILE_PGTABLE_32_H */ | ||
diff --git a/arch/tile/include/asm/poll.h b/arch/tile/include/asm/poll.h new file mode 100644 index 00000000000..c98509d3149 --- /dev/null +++ b/arch/tile/include/asm/poll.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/poll.h> | |||
diff --git a/arch/tile/include/asm/posix_types.h b/arch/tile/include/asm/posix_types.h new file mode 100644 index 00000000000..22cae6230ce --- /dev/null +++ b/arch/tile/include/asm/posix_types.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/posix_types.h> | |||
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h new file mode 100644 index 00000000000..d942d09b252 --- /dev/null +++ b/arch/tile/include/asm/processor.h | |||
@@ -0,0 +1,338 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PROCESSOR_H | ||
16 | #define _ASM_TILE_PROCESSOR_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | /* | ||
21 | * NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one | ||
22 | * normally would, due to #include dependencies. | ||
23 | */ | ||
24 | #include <linux/types.h> | ||
25 | #include <asm/ptrace.h> | ||
26 | #include <asm/percpu.h> | ||
27 | |||
28 | #include <arch/chip.h> | ||
29 | #include <arch/spr_def.h> | ||
30 | |||
31 | struct task_struct; | ||
32 | struct thread_struct; | ||
33 | |||
34 | typedef struct { | ||
35 | unsigned long seg; | ||
36 | } mm_segment_t; | ||
37 | |||
38 | /* | ||
39 | * Default implementation of macro that returns current | ||
40 | * instruction pointer ("program counter"). | ||
41 | */ | ||
42 | void *current_text_addr(void); | ||
43 | |||
44 | #if CHIP_HAS_TILE_DMA() | ||
45 | /* Capture the state of a suspended DMA. */ | ||
46 | struct tile_dma_state { | ||
47 | int enabled; | ||
48 | unsigned long src; | ||
49 | unsigned long dest; | ||
50 | unsigned long strides; | ||
51 | unsigned long chunk_size; | ||
52 | unsigned long src_chunk; | ||
53 | unsigned long dest_chunk; | ||
54 | unsigned long byte; | ||
55 | unsigned long status; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * A mask of the DMA status register for selecting only the 'running' | ||
60 | * and 'done' bits. | ||
61 | */ | ||
62 | #define DMA_STATUS_MASK \ | ||
63 | (SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK) | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * Track asynchronous TLB events (faults and access violations) | ||
68 | * that occur while we are in kernel mode from DMA or the SN processor. | ||
69 | */ | ||
70 | struct async_tlb { | ||
71 | short fault_num; /* original fault number; 0 if none */ | ||
72 | char is_fault; /* was it a fault (vs an access violation) */ | ||
73 | char is_write; /* for fault: was it caused by a write? */ | ||
74 | unsigned long address; /* what address faulted? */ | ||
75 | }; | ||
76 | |||
77 | #ifdef CONFIG_HARDWALL | ||
78 | struct hardwall_info; | ||
79 | #endif | ||
80 | |||
81 | struct thread_struct { | ||
82 | /* kernel stack pointer */ | ||
83 | unsigned long ksp; | ||
84 | /* kernel PC */ | ||
85 | unsigned long pc; | ||
86 | /* starting user stack pointer (for page migration) */ | ||
87 | unsigned long usp0; | ||
88 | /* pid of process that created this one */ | ||
89 | pid_t creator_pid; | ||
90 | #if CHIP_HAS_TILE_DMA() | ||
91 | /* DMA info for suspended threads (byte == 0 means no DMA state) */ | ||
92 | struct tile_dma_state tile_dma_state; | ||
93 | #endif | ||
94 | /* User EX_CONTEXT registers */ | ||
95 | unsigned long ex_context[2]; | ||
96 | /* User SYSTEM_SAVE registers */ | ||
97 | unsigned long system_save[4]; | ||
98 | /* User interrupt mask */ | ||
99 | unsigned long long interrupt_mask; | ||
100 | /* User interrupt-control 0 state */ | ||
101 | unsigned long intctrl_0; | ||
102 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
103 | /* Any other miscellaneous processor state bits */ | ||
104 | unsigned long proc_status; | ||
105 | #endif | ||
106 | #ifdef CONFIG_HARDWALL | ||
107 | /* Is this task tied to an activated hardwall? */ | ||
108 | struct hardwall_info *hardwall; | ||
109 | /* Chains this task into the list at hardwall->list. */ | ||
110 | struct list_head hardwall_list; | ||
111 | #endif | ||
112 | #if CHIP_HAS_TILE_DMA() | ||
113 | /* Async DMA TLB fault information */ | ||
114 | struct async_tlb dma_async_tlb; | ||
115 | #endif | ||
116 | #if CHIP_HAS_SN_PROC() | ||
117 | /* Was static network processor when we were switched out? */ | ||
118 | int sn_proc_running; | ||
119 | /* Async SNI TLB fault information */ | ||
120 | struct async_tlb sn_async_tlb; | ||
121 | #endif | ||
122 | }; | ||
123 | |||
124 | #endif /* !__ASSEMBLY__ */ | ||
125 | |||
126 | /* | ||
127 | * Start with "sp" this many bytes below the top of the kernel stack. | ||
128 | * This preserves the invariant that a called function may write to *sp. | ||
129 | */ | ||
130 | #define STACK_TOP_DELTA 8 | ||
131 | |||
132 | /* | ||
133 | * When entering the kernel via a fault, start with the top of the | ||
134 | * pt_regs structure this many bytes below the top of the page. | ||
135 | * This aligns the pt_regs structure optimally for cache-line access. | ||
136 | */ | ||
137 | #ifdef __tilegx__ | ||
138 | #define KSTK_PTREGS_GAP 48 | ||
139 | #else | ||
140 | #define KSTK_PTREGS_GAP 56 | ||
141 | #endif | ||
142 | |||
143 | #ifndef __ASSEMBLY__ | ||
144 | |||
145 | #ifdef __tilegx__ | ||
146 | #define TASK_SIZE_MAX (MEM_LOW_END + 1) | ||
147 | #else | ||
148 | #define TASK_SIZE_MAX PAGE_OFFSET | ||
149 | #endif | ||
150 | |||
151 | /* TASK_SIZE and related variables are always checked in "current" context. */ | ||
152 | #ifdef CONFIG_COMPAT | ||
153 | #define COMPAT_TASK_SIZE (1UL << 31) | ||
154 | #define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\ | ||
155 | COMPAT_TASK_SIZE : TASK_SIZE_MAX) | ||
156 | #else | ||
157 | #define TASK_SIZE TASK_SIZE_MAX | ||
158 | #endif | ||
159 | |||
160 | /* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */ | ||
161 | #define VDSO_BASE (TASK_SIZE - PAGE_SIZE) | ||
162 | |||
163 | #define STACK_TOP VDSO_BASE | ||
164 | |||
165 | /* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */ | ||
166 | #define STACK_TOP_MAX TASK_SIZE_MAX | ||
167 | |||
168 | /* | ||
169 | * This decides where the kernel will search for a free chunk of vm | ||
170 | * space during mmap's, if it is using bottom-up mapping. | ||
171 | */ | ||
172 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
173 | |||
174 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
175 | |||
176 | #define INIT_THREAD { \ | ||
177 | .ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \ | ||
178 | .interrupt_mask = -1ULL \ | ||
179 | } | ||
180 | |||
181 | /* Kernel stack top for the task that first boots on this cpu. */ | ||
182 | DECLARE_PER_CPU(unsigned long, boot_sp); | ||
183 | |||
184 | /* PC to boot from on this cpu. */ | ||
185 | DECLARE_PER_CPU(unsigned long, boot_pc); | ||
186 | |||
187 | /* Do necessary setup to start up a newly executed thread. */ | ||
188 | static inline void start_thread(struct pt_regs *regs, | ||
189 | unsigned long pc, unsigned long usp) | ||
190 | { | ||
191 | regs->pc = pc; | ||
192 | regs->sp = usp; | ||
193 | } | ||
194 | |||
195 | /* Free all resources held by a thread. */ | ||
196 | static inline void release_thread(struct task_struct *dead_task) | ||
197 | { | ||
198 | /* Nothing for now */ | ||
199 | } | ||
200 | |||
201 | /* Prepare to copy thread state - unlazy all lazy status. */ | ||
202 | #define prepare_to_copy(tsk) do { } while (0) | ||
203 | |||
204 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
205 | |||
206 | |||
207 | /* | ||
208 | * Return saved (kernel) PC of a blocked thread. | ||
209 | * Only used in a printk() in kernel/sched.c, so don't work too hard. | ||
210 | */ | ||
211 | #define thread_saved_pc(t) ((t)->thread.pc) | ||
212 | |||
213 | unsigned long get_wchan(struct task_struct *p); | ||
214 | |||
215 | /* Return initial ksp value for given task. */ | ||
216 | #define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE) | ||
217 | |||
218 | /* Return some info about the user process TASK. */ | ||
219 | #define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA) | ||
220 | #define task_pt_regs(task) \ | ||
221 | ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1) | ||
222 | #define task_sp(task) (task_pt_regs(task)->sp) | ||
223 | #define task_pc(task) (task_pt_regs(task)->pc) | ||
224 | /* Aliases for pc and sp (used in fs/proc/array.c) */ | ||
225 | #define KSTK_EIP(task) task_pc(task) | ||
226 | #define KSTK_ESP(task) task_sp(task) | ||
227 | |||
228 | /* Standard format for printing registers and other word-size data. */ | ||
229 | #ifdef __tilegx__ | ||
230 | # define REGFMT "0x%016lx" | ||
231 | #else | ||
232 | # define REGFMT "0x%08lx" | ||
233 | #endif | ||
234 | |||
235 | /* | ||
236 | * Do some slow action (e.g. read a slow SPR). | ||
237 | * Note that this must also have compiler-barrier semantics since | ||
238 | * it may be used in a busy loop reading memory. | ||
239 | */ | ||
240 | static inline void cpu_relax(void) | ||
241 | { | ||
242 | __insn_mfspr(SPR_PASS); | ||
243 | barrier(); | ||
244 | } | ||
245 | |||
246 | struct siginfo; | ||
247 | extern void arch_coredump_signal(struct siginfo *, struct pt_regs *); | ||
248 | #define arch_coredump_signal arch_coredump_signal | ||
249 | |||
250 | /* Info on this processor (see fs/proc/cpuinfo.c) */ | ||
251 | struct seq_operations; | ||
252 | extern const struct seq_operations cpuinfo_op; | ||
253 | |||
254 | /* Provide information about the chip model. */ | ||
255 | extern char chip_model[64]; | ||
256 | |||
257 | /* Data on which physical memory controller corresponds to which NUMA node. */ | ||
258 | extern int node_controller[]; | ||
259 | |||
260 | |||
261 | /* Do we dump information to the console when a user application crashes? */ | ||
262 | extern int show_crashinfo; | ||
263 | |||
264 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
265 | /* Does the heap allocator return hash-for-home pages by default? */ | ||
266 | extern int hash_default; | ||
267 | |||
268 | /* Should kernel stack pages be hash-for-home? */ | ||
269 | extern int kstack_hash; | ||
270 | |||
271 | /* Does MAP_ANONYMOUS return hash-for-home pages by default? */ | ||
272 | #define uheap_hash hash_default | ||
273 | |||
274 | #else | ||
275 | #define hash_default 0 | ||
276 | #define kstack_hash 0 | ||
277 | #define uheap_hash 0 | ||
278 | #endif | ||
279 | |||
280 | /* Are we using huge pages in the TLB for kernel data? */ | ||
281 | extern int kdata_huge; | ||
282 | |||
283 | #define PREFETCH_STRIDE CHIP_L2_LINE_SIZE() | ||
284 | |||
285 | #else /* __ASSEMBLY__ */ | ||
286 | |||
287 | /* Do some slow action (e.g. read a slow SPR). */ | ||
288 | #define CPU_RELAX mfspr zero, SPR_PASS | ||
289 | |||
290 | #endif /* !__ASSEMBLY__ */ | ||
291 | |||
292 | /* Assembly code assumes that the PL is in the low bits. */ | ||
293 | #if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0 | ||
294 | # error Fix assembly assumptions about PL | ||
295 | #endif | ||
296 | |||
297 | /* We sometimes use these macros for EX_CONTEXT_0_1 as well. */ | ||
298 | #if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \ | ||
299 | SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \ | ||
300 | SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \ | ||
301 | SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK | ||
302 | # error Fix assumptions that EX1 macros work for both PL0 and PL1 | ||
303 | #endif | ||
304 | |||
305 | /* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */ | ||
306 | #define EX1_PL(ex1) \ | ||
307 | (((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK) | ||
308 | #define EX1_ICS(ex1) \ | ||
309 | (((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK) | ||
310 | #define PL_ICS_EX1(pl, ics) \ | ||
311 | (((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \ | ||
312 | ((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT)) | ||
313 | |||
314 | /* | ||
315 | * Provide symbolic constants for PLs. | ||
316 | * Note that assembly code assumes that USER_PL is zero. | ||
317 | */ | ||
318 | #define USER_PL 0 | ||
319 | #define KERNEL_PL 1 | ||
320 | |||
321 | /* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */ | ||
322 | #define CPU_LOG_MASK_VALUE 12 | ||
323 | #define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) | ||
324 | #if CONFIG_NR_CPUS > CPU_MASK_VALUE | ||
325 | # error Too many cpus! | ||
326 | #endif | ||
327 | #define raw_smp_processor_id() \ | ||
328 | ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE) | ||
329 | #define get_current_ksp0() \ | ||
330 | (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE) | ||
331 | #define next_current_ksp0(task) ({ \ | ||
332 | unsigned long __ksp0 = task_ksp0(task); \ | ||
333 | int __cpu = raw_smp_processor_id(); \ | ||
334 | BUG_ON(__ksp0 & CPU_MASK_VALUE); \ | ||
335 | __ksp0 | __cpu; \ | ||
336 | }) | ||
337 | |||
338 | #endif /* _ASM_TILE_PROCESSOR_H */ | ||
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h new file mode 100644 index 00000000000..acdae814e01 --- /dev/null +++ b/arch/tile/include/asm/ptrace.h | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PTRACE_H | ||
16 | #define _ASM_TILE_PTRACE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | #include <arch/abi.h> | ||
20 | |||
21 | /* These must match struct pt_regs, below. */ | ||
22 | #if CHIP_WORD_SIZE() == 32 | ||
23 | #define PTREGS_OFFSET_REG(n) ((n)*4) | ||
24 | #else | ||
25 | #define PTREGS_OFFSET_REG(n) ((n)*8) | ||
26 | #endif | ||
27 | #define PTREGS_OFFSET_BASE 0 | ||
28 | #define PTREGS_OFFSET_TP PTREGS_OFFSET_REG(53) | ||
29 | #define PTREGS_OFFSET_SP PTREGS_OFFSET_REG(54) | ||
30 | #define PTREGS_OFFSET_LR PTREGS_OFFSET_REG(55) | ||
31 | #define PTREGS_NR_GPRS 56 | ||
32 | #define PTREGS_OFFSET_PC PTREGS_OFFSET_REG(56) | ||
33 | #define PTREGS_OFFSET_EX1 PTREGS_OFFSET_REG(57) | ||
34 | #define PTREGS_OFFSET_FAULTNUM PTREGS_OFFSET_REG(58) | ||
35 | #define PTREGS_OFFSET_ORIG_R0 PTREGS_OFFSET_REG(59) | ||
36 | #define PTREGS_OFFSET_FLAGS PTREGS_OFFSET_REG(60) | ||
37 | #if CHIP_HAS_CMPEXCH() | ||
38 | #define PTREGS_OFFSET_CMPEXCH PTREGS_OFFSET_REG(61) | ||
39 | #endif | ||
40 | #define PTREGS_SIZE PTREGS_OFFSET_REG(64) | ||
41 | |||
42 | #ifndef __ASSEMBLY__ | ||
43 | |||
44 | #ifdef __KERNEL__ | ||
45 | /* Benefit from consistent use of "long" on all chips. */ | ||
46 | typedef unsigned long pt_reg_t; | ||
47 | #else | ||
48 | /* Provide appropriate length type to userspace regardless of -m32/-m64. */ | ||
49 | typedef uint_reg_t pt_reg_t; | ||
50 | #endif | ||
51 | |||
52 | /* | ||
53 | * This struct defines the way the registers are stored on the stack during a | ||
54 | * system call/exception. It should be a multiple of 8 bytes to preserve | ||
55 | * normal stack alignment rules. | ||
56 | * | ||
57 | * Must track <sys/ucontext.h> and <sys/procfs.h> | ||
58 | */ | ||
59 | struct pt_regs { | ||
60 | /* Saved main processor registers; 56..63 are special. */ | ||
61 | /* tp, sp, and lr must immediately follow regs[] for aliasing. */ | ||
62 | pt_reg_t regs[53]; | ||
63 | pt_reg_t tp; /* aliases regs[TREG_TP] */ | ||
64 | pt_reg_t sp; /* aliases regs[TREG_SP] */ | ||
65 | pt_reg_t lr; /* aliases regs[TREG_LR] */ | ||
66 | |||
67 | /* Saved special registers. */ | ||
68 | pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */ | ||
69 | pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */ | ||
70 | pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */ | ||
71 | pt_reg_t orig_r0; /* r0 at syscall entry, else zero */ | ||
72 | pt_reg_t flags; /* flags (see below) */ | ||
73 | #if !CHIP_HAS_CMPEXCH() | ||
74 | pt_reg_t pad[3]; | ||
75 | #else | ||
76 | pt_reg_t cmpexch; /* value of CMPEXCH_VALUE SPR at interrupt */ | ||
77 | pt_reg_t pad[2]; | ||
78 | #endif | ||
79 | }; | ||
80 | |||
81 | #endif /* __ASSEMBLY__ */ | ||
82 | |||
83 | /* Flag bits in pt_regs.flags */ | ||
84 | #define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ | ||
85 | #define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */ | ||
86 | #define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */ | ||
87 | |||
88 | #define PTRACE_GETREGS 12 | ||
89 | #define PTRACE_SETREGS 13 | ||
90 | #define PTRACE_GETFPREGS 14 | ||
91 | #define PTRACE_SETFPREGS 15 | ||
92 | |||
93 | /* Support TILE-specific ptrace options, with events starting at 16. */ | ||
94 | #define PTRACE_O_TRACEMIGRATE 0x00010000 | ||
95 | #define PTRACE_EVENT_MIGRATE 16 | ||
96 | #ifdef __KERNEL__ | ||
97 | #define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE) | ||
98 | #define PT_TRACE_MIGRATE 0x00080000 | ||
99 | #define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __KERNEL__ | ||
103 | |||
104 | #ifndef __ASSEMBLY__ | ||
105 | |||
106 | #define instruction_pointer(regs) ((regs)->pc) | ||
107 | #define profile_pc(regs) instruction_pointer(regs) | ||
108 | |||
109 | /* Does the process account for user or for system time? */ | ||
110 | #define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL) | ||
111 | |||
112 | /* Fill in a struct pt_regs with the current kernel registers. */ | ||
113 | struct pt_regs *get_pt_regs(struct pt_regs *); | ||
114 | |||
115 | /* Trace the current syscall. */ | ||
116 | extern void do_syscall_trace(void); | ||
117 | |||
118 | extern void show_regs(struct pt_regs *); | ||
119 | |||
120 | #define arch_has_single_step() (1) | ||
121 | |||
122 | /* | ||
123 | * A structure for all single-stepper state. | ||
124 | * | ||
125 | * Also update defines in assembler section if it changes | ||
126 | */ | ||
127 | struct single_step_state { | ||
128 | /* the page to which we will write hacked-up bundles */ | ||
129 | void __user *buffer; | ||
130 | |||
131 | union { | ||
132 | int flags; | ||
133 | struct { | ||
134 | unsigned long is_enabled:1, update:1, update_reg:6; | ||
135 | }; | ||
136 | }; | ||
137 | |||
138 | unsigned long orig_pc; /* the original PC */ | ||
139 | unsigned long next_pc; /* return PC if no branch (PC + 1) */ | ||
140 | unsigned long branch_next_pc; /* return PC if we did branch/jump */ | ||
141 | unsigned long update_value; /* value to restore to update_target */ | ||
142 | }; | ||
143 | |||
144 | /* Single-step the instruction at regs->pc */ | ||
145 | extern void single_step_once(struct pt_regs *regs); | ||
146 | |||
147 | struct task_struct; | ||
148 | |||
149 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | ||
150 | int error_code); | ||
151 | |||
152 | #ifdef __tilegx__ | ||
153 | /* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */ | ||
154 | #define __ARCH_WANT_COMPAT_SYS_PTRACE | ||
155 | #endif | ||
156 | |||
157 | #endif /* !__ASSEMBLY__ */ | ||
158 | |||
159 | #define SINGLESTEP_STATE_MASK_IS_ENABLED 0x1 | ||
160 | #define SINGLESTEP_STATE_MASK_UPDATE 0x2 | ||
161 | #define SINGLESTEP_STATE_TARGET_LB 2 | ||
162 | #define SINGLESTEP_STATE_TARGET_UB 7 | ||
163 | |||
164 | #endif /* !__KERNEL__ */ | ||
165 | |||
166 | #endif /* _ASM_TILE_PTRACE_H */ | ||
diff --git a/arch/tile/include/asm/resource.h b/arch/tile/include/asm/resource.h new file mode 100644 index 00000000000..04bc4db8921 --- /dev/null +++ b/arch/tile/include/asm/resource.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/resource.h> | |||
diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h new file mode 100644 index 00000000000..c5604242c0d --- /dev/null +++ b/arch/tile/include/asm/scatterlist.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SCATTERLIST_H | ||
16 | #define _ASM_TILE_SCATTERLIST_H | ||
17 | |||
18 | #define ISA_DMA_THRESHOLD (~0UL) | ||
19 | |||
20 | #include <asm-generic/scatterlist.h> | ||
21 | |||
22 | #endif /* _ASM_TILE_SCATTERLIST_H */ | ||
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h new file mode 100644 index 00000000000..d062d463fca --- /dev/null +++ b/arch/tile/include/asm/sections.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SECTIONS_H | ||
16 | #define _ASM_TILE_SECTIONS_H | ||
17 | |||
18 | #define arch_is_kernel_data arch_is_kernel_data | ||
19 | |||
20 | #include <asm-generic/sections.h> | ||
21 | |||
22 | /* Text and data are at different areas in the kernel VA space. */ | ||
23 | extern char _sinitdata[], _einitdata[]; | ||
24 | |||
25 | /* Write-once data is writable only till the end of initialization. */ | ||
26 | extern char __w1data_begin[], __w1data_end[]; | ||
27 | |||
28 | |||
29 | /* Not exactly sections, but PC comparison points in the code. */ | ||
30 | extern char __rt_sigreturn[], __rt_sigreturn_end[]; | ||
31 | #ifndef __tilegx__ | ||
32 | extern char sys_cmpxchg[], __sys_cmpxchg_end[]; | ||
33 | extern char __sys_cmpxchg_grab_lock[]; | ||
34 | extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; | ||
35 | #endif | ||
36 | |||
37 | /* Handle the discontiguity between _sdata and _stext. */ | ||
38 | static inline int arch_is_kernel_data(unsigned long addr) | ||
39 | { | ||
40 | return addr >= (unsigned long)_sdata && | ||
41 | addr < (unsigned long)_end; | ||
42 | } | ||
43 | |||
44 | #endif /* _ASM_TILE_SECTIONS_H */ | ||
diff --git a/arch/tile/include/asm/sembuf.h b/arch/tile/include/asm/sembuf.h new file mode 100644 index 00000000000..7673b83cfef --- /dev/null +++ b/arch/tile/include/asm/sembuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/sembuf.h> | |||
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h new file mode 100644 index 00000000000..823ddd47ff6 --- /dev/null +++ b/arch/tile/include/asm/setup.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SETUP_H | ||
16 | #define _ASM_TILE_SETUP_H | ||
17 | |||
18 | #include <linux/pfn.h> | ||
19 | #include <linux/init.h> | ||
20 | |||
21 | /* | ||
22 | * Reserved space for vmalloc and iomap - defined in asm/page.h | ||
23 | */ | ||
24 | #define MAXMEM_PFN PFN_DOWN(MAXMEM) | ||
25 | |||
26 | #define COMMAND_LINE_SIZE 2048 | ||
27 | |||
28 | void early_panic(const char *fmt, ...); | ||
29 | void warn_early_printk(void); | ||
30 | void __init disable_early_printk(void); | ||
31 | |||
32 | #endif /* _ASM_TILE_SETUP_H */ | ||
diff --git a/arch/tile/include/asm/shmbuf.h b/arch/tile/include/asm/shmbuf.h new file mode 100644 index 00000000000..83c05fc2de3 --- /dev/null +++ b/arch/tile/include/asm/shmbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/shmbuf.h> | |||
diff --git a/arch/tile/include/asm/shmparam.h b/arch/tile/include/asm/shmparam.h new file mode 100644 index 00000000000..93f30deb95d --- /dev/null +++ b/arch/tile/include/asm/shmparam.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/shmparam.h> | |||
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h new file mode 100644 index 00000000000..7cd7672e3ad --- /dev/null +++ b/arch/tile/include/asm/sigcontext.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGCONTEXT_H | ||
16 | #define _ASM_TILE_SIGCONTEXT_H | ||
17 | |||
18 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
19 | #include <asm/ptrace.h> | ||
20 | |||
21 | /* Must track <sys/ucontext.h> */ | ||
22 | |||
23 | struct sigcontext { | ||
24 | struct pt_regs regs; | ||
25 | }; | ||
26 | |||
27 | #endif /* _ASM_TILE_SIGCONTEXT_H */ | ||
diff --git a/arch/tile/include/asm/sigframe.h b/arch/tile/include/asm/sigframe.h new file mode 100644 index 00000000000..994d3d30205 --- /dev/null +++ b/arch/tile/include/asm/sigframe.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGFRAME_H | ||
16 | #define _ASM_TILE_SIGFRAME_H | ||
17 | |||
18 | /* Indicate that syscall return should not examine r0 */ | ||
19 | #define INT_SWINT_1_SIGRETURN (~0) | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | #include <arch/abi.h> | ||
24 | |||
25 | struct rt_sigframe { | ||
26 | unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ | ||
27 | struct siginfo info; | ||
28 | struct ucontext uc; | ||
29 | }; | ||
30 | |||
31 | #endif /* !__ASSEMBLY__ */ | ||
32 | |||
33 | #endif /* _ASM_TILE_SIGFRAME_H */ | ||
diff --git a/arch/tile/include/asm/siginfo.h b/arch/tile/include/asm/siginfo.h new file mode 100644 index 00000000000..0c12d1b9ddf --- /dev/null +++ b/arch/tile/include/asm/siginfo.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGINFO_H | ||
16 | #define _ASM_TILE_SIGINFO_H | ||
17 | |||
18 | #define __ARCH_SI_TRAPNO | ||
19 | |||
20 | #include <asm-generic/siginfo.h> | ||
21 | |||
22 | /* | ||
23 | * Additional Tile-specific SIGILL si_codes | ||
24 | */ | ||
25 | #define ILL_DBLFLT (__SI_FAULT|9) /* double fault */ | ||
26 | #define ILL_HARDWALL (__SI_FAULT|10) /* user networks hardwall violation */ | ||
27 | #undef NSIGILL | ||
28 | #define NSIGILL 10 | ||
29 | |||
30 | #endif /* _ASM_TILE_SIGINFO_H */ | ||
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h new file mode 100644 index 00000000000..eb0253f3220 --- /dev/null +++ b/arch/tile/include/asm/signal.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGNAL_H | ||
16 | #define _ASM_TILE_SIGNAL_H | ||
17 | |||
18 | /* Do not notify a ptracer when this signal is handled. */ | ||
19 | #define SA_NOPTRACE 0x02000000u | ||
20 | |||
21 | /* Used in earlier Tilera releases, so keeping for binary compatibility. */ | ||
22 | #define SA_RESTORER 0x04000000u | ||
23 | |||
24 | #include <asm-generic/signal.h> | ||
25 | |||
26 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | ||
27 | int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); | ||
28 | int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); | ||
29 | void do_signal(struct pt_regs *regs); | ||
30 | #endif | ||
31 | |||
32 | #endif /* _ASM_TILE_SIGNAL_H */ | ||
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h new file mode 100644 index 00000000000..532124ae4b1 --- /dev/null +++ b/arch/tile/include/asm/smp.h | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SMP_H | ||
16 | #define _ASM_TILE_SMP_H | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | |||
20 | #include <asm/processor.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/irqreturn.h> | ||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | /* Set up this tile to support receiving hypervisor messages */ | ||
26 | void init_messaging(void); | ||
27 | |||
28 | /* Set up this tile to support receiving device interrupts and IPIs. */ | ||
29 | void init_per_tile_IRQs(void); | ||
30 | |||
31 | /* Send a message to processors specified in mask */ | ||
32 | void send_IPI_many(const struct cpumask *mask, int tag); | ||
33 | |||
34 | /* Send a message to all but the sending processor */ | ||
35 | void send_IPI_allbutself(int tag); | ||
36 | |||
37 | /* Send a message to a specific processor */ | ||
38 | void send_IPI_single(int dest, int tag); | ||
39 | |||
40 | /* Process an IPI message */ | ||
41 | void evaluate_message(int tag); | ||
42 | |||
43 | /* Boot a secondary cpu */ | ||
44 | void online_secondary(void); | ||
45 | |||
46 | /* Call a function on a specified set of CPUs (may include this one). */ | ||
47 | extern void on_each_cpu_mask(const struct cpumask *mask, | ||
48 | void (*func)(void *), void *info, bool wait); | ||
49 | |||
50 | /* Topology of the supervisor tile grid, and coordinates of boot processor */ | ||
51 | extern HV_Topology smp_topology; | ||
52 | |||
53 | /* Accessors for grid size */ | ||
54 | #define smp_height (smp_topology.height) | ||
55 | #define smp_width (smp_topology.width) | ||
56 | |||
57 | /* Convenience functions for converting cpu <-> coords. */ | ||
58 | static inline int cpu_x(int cpu) | ||
59 | { | ||
60 | return cpu % smp_width; | ||
61 | } | ||
62 | static inline int cpu_y(int cpu) | ||
63 | { | ||
64 | return cpu / smp_width; | ||
65 | } | ||
66 | static inline int xy_to_cpu(int x, int y) | ||
67 | { | ||
68 | return y * smp_width + x; | ||
69 | } | ||
70 | |||
71 | /* Hypervisor message tags sent via the tile send_IPI*() routines. */ | ||
72 | #define MSG_TAG_START_CPU 1 | ||
73 | #define MSG_TAG_STOP_CPU 2 | ||
74 | #define MSG_TAG_CALL_FUNCTION_MANY 3 | ||
75 | #define MSG_TAG_CALL_FUNCTION_SINGLE 4 | ||
76 | |||
77 | /* Hook for the generic smp_call_function_many() routine. */ | ||
78 | static inline void arch_send_call_function_ipi_mask(struct cpumask *mask) | ||
79 | { | ||
80 | send_IPI_many(mask, MSG_TAG_CALL_FUNCTION_MANY); | ||
81 | } | ||
82 | |||
83 | /* Hook for the generic smp_call_function_single() routine. */ | ||
84 | static inline void arch_send_call_function_single_ipi(int cpu) | ||
85 | { | ||
86 | send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE); | ||
87 | } | ||
88 | |||
89 | /* Print out the boot string describing which cpus were disabled. */ | ||
90 | void print_disabled_cpus(void); | ||
91 | |||
92 | #else /* !CONFIG_SMP */ | ||
93 | |||
94 | #define on_each_cpu_mask(mask, func, info, wait) \ | ||
95 | do { if (cpumask_test_cpu(0, (mask))) func(info); } while (0) | ||
96 | |||
97 | #define smp_master_cpu 0 | ||
98 | #define smp_height 1 | ||
99 | #define smp_width 1 | ||
100 | #define cpu_x(cpu) 0 | ||
101 | #define cpu_y(cpu) 0 | ||
102 | #define xy_to_cpu(x, y) 0 | ||
103 | |||
104 | #endif /* !CONFIG_SMP */ | ||
105 | |||
106 | |||
107 | /* Which cpus may be used as the lotar in a page table entry. */ | ||
108 | extern struct cpumask cpu_lotar_map; | ||
109 | #define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map) | ||
110 | |||
111 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
112 | /* Which processors are used for hash-for-home mapping */ | ||
113 | extern struct cpumask hash_for_home_map; | ||
114 | #endif | ||
115 | |||
116 | /* Which cpus can have their cache flushed by hv_flush_remote(). */ | ||
117 | extern struct cpumask cpu_cacheable_map; | ||
118 | #define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map) | ||
119 | |||
120 | /* Convert an HV_LOTAR value into a cpu. */ | ||
121 | static inline int hv_lotar_to_cpu(HV_LOTAR lotar) | ||
122 | { | ||
123 | return HV_LOTAR_X(lotar) + (HV_LOTAR_Y(lotar) * smp_width); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Extension of <linux/cpumask.h> functionality when you just want | ||
128 | * to express a mask or suppression or inclusion region without | ||
129 | * being too concerned about exactly which cpus are valid in that region. | ||
130 | */ | ||
131 | int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits); | ||
132 | |||
133 | #define cpulist_parse_crop(buf, dst) \ | ||
134 | __cpulist_parse_crop((buf), (dst), NR_CPUS) | ||
135 | static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp, | ||
136 | int nbits) | ||
137 | { | ||
138 | return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits); | ||
139 | } | ||
140 | |||
141 | /* Initialize the IPI subsystem. */ | ||
142 | void ipi_init(void); | ||
143 | |||
144 | /* Function for start-cpu message to cause us to jump to. */ | ||
145 | extern unsigned long start_cpu_function_addr; | ||
146 | |||
147 | #endif /* _ASM_TILE_SMP_H */ | ||
diff --git a/arch/tile/include/asm/socket.h b/arch/tile/include/asm/socket.h new file mode 100644 index 00000000000..6b71384b9d8 --- /dev/null +++ b/arch/tile/include/asm/socket.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/socket.h> | |||
diff --git a/arch/tile/include/asm/sockios.h b/arch/tile/include/asm/sockios.h new file mode 100644 index 00000000000..def6d4746ee --- /dev/null +++ b/arch/tile/include/asm/sockios.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/sockios.h> | |||
diff --git a/arch/tile/include/asm/spinlock.h b/arch/tile/include/asm/spinlock.h new file mode 100644 index 00000000000..1a8bd4740c2 --- /dev/null +++ b/arch/tile/include/asm/spinlock.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SPINLOCK_H | ||
16 | #define _ASM_TILE_SPINLOCK_H | ||
17 | |||
18 | #ifdef __tilegx__ | ||
19 | #include <asm/spinlock_64.h> | ||
20 | #else | ||
21 | #include <asm/spinlock_32.h> | ||
22 | #endif | ||
23 | |||
24 | #endif /* _ASM_TILE_SPINLOCK_H */ | ||
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h new file mode 100644 index 00000000000..88efdde8dd2 --- /dev/null +++ b/arch/tile/include/asm/spinlock_32.h | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * 32-bit SMP spinlocks. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_SPINLOCK_32_H | ||
18 | #define _ASM_TILE_SPINLOCK_32_H | ||
19 | |||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <linux/compiler.h> | ||
24 | |||
25 | /* | ||
26 | * We only use even ticket numbers so the '1' inserted by a tns is | ||
27 | * an unambiguous "ticket is busy" flag. | ||
28 | */ | ||
29 | #define TICKET_QUANTUM 2 | ||
30 | |||
31 | |||
32 | /* | ||
33 | * SMP ticket spinlocks, allowing only a single CPU anywhere | ||
34 | * | ||
35 | * (the type definitions are in asm/spinlock_types.h) | ||
36 | */ | ||
37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
38 | { | ||
39 | /* | ||
40 | * Note that even if a new ticket is in the process of being | ||
41 | * acquired, so lock->next_ticket is 1, it's still reasonable | ||
42 | * to claim the lock is held, since it will be momentarily | ||
43 | * if not already. There's no need to wait for a "valid" | ||
44 | * lock->next_ticket to become available. | ||
45 | */ | ||
46 | return lock->next_ticket != lock->current_ticket; | ||
47 | } | ||
48 | |||
49 | void arch_spin_lock(arch_spinlock_t *lock); | ||
50 | |||
51 | /* We cannot take an interrupt after getting a ticket, so don't enable them. */ | ||
52 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
53 | |||
54 | int arch_spin_trylock(arch_spinlock_t *lock); | ||
55 | |||
56 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
57 | { | ||
58 | /* For efficiency, overlap fetching the old ticket with the wmb(). */ | ||
59 | int old_ticket = lock->current_ticket; | ||
60 | wmb(); /* guarantee anything modified under the lock is visible */ | ||
61 | lock->current_ticket = old_ticket + TICKET_QUANTUM; | ||
62 | } | ||
63 | |||
64 | void arch_spin_unlock_wait(arch_spinlock_t *lock); | ||
65 | |||
66 | /* | ||
67 | * Read-write spinlocks, allowing multiple readers | ||
68 | * but only one writer. | ||
69 | * | ||
70 | * We use a "tns/store-back" technique on a single word to manage | ||
71 | * the lock state, looping around to retry if the tns returns 1. | ||
72 | */ | ||
73 | |||
74 | /* Internal layout of the word; do not use. */ | ||
75 | #define _WR_NEXT_SHIFT 8 | ||
76 | #define _WR_CURR_SHIFT 16 | ||
77 | #define _WR_WIDTH 8 | ||
78 | #define _RD_COUNT_SHIFT 24 | ||
79 | #define _RD_COUNT_WIDTH 8 | ||
80 | |||
81 | /* Internal functions; do not use. */ | ||
82 | void arch_read_lock_slow(arch_rwlock_t *, u32); | ||
83 | int arch_read_trylock_slow(arch_rwlock_t *); | ||
84 | void arch_read_unlock_slow(arch_rwlock_t *); | ||
85 | void arch_write_lock_slow(arch_rwlock_t *, u32); | ||
86 | void arch_write_unlock_slow(arch_rwlock_t *, u32); | ||
87 | |||
88 | /** | ||
89 | * arch_read_can_lock() - would read_trylock() succeed? | ||
90 | */ | ||
91 | static inline int arch_read_can_lock(arch_rwlock_t *rwlock) | ||
92 | { | ||
93 | return (rwlock->lock << _RD_COUNT_WIDTH) == 0; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * arch_write_can_lock() - would write_trylock() succeed? | ||
98 | */ | ||
99 | static inline int arch_write_can_lock(arch_rwlock_t *rwlock) | ||
100 | { | ||
101 | return rwlock->lock == 0; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * arch_read_lock() - acquire a read lock. | ||
106 | */ | ||
107 | static inline void arch_read_lock(arch_rwlock_t *rwlock) | ||
108 | { | ||
109 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
110 | if (unlikely(val << _RD_COUNT_WIDTH)) { | ||
111 | arch_read_lock_slow(rwlock, val); | ||
112 | return; | ||
113 | } | ||
114 | rwlock->lock = val + (1 << _RD_COUNT_SHIFT); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * arch_read_lock() - acquire a write lock. | ||
119 | */ | ||
120 | static inline void arch_write_lock(arch_rwlock_t *rwlock) | ||
121 | { | ||
122 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
123 | if (unlikely(val != 0)) { | ||
124 | arch_write_lock_slow(rwlock, val); | ||
125 | return; | ||
126 | } | ||
127 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * arch_read_trylock() - try to acquire a read lock. | ||
132 | */ | ||
133 | static inline int arch_read_trylock(arch_rwlock_t *rwlock) | ||
134 | { | ||
135 | int locked; | ||
136 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
137 | if (unlikely(val & 1)) | ||
138 | return arch_read_trylock_slow(rwlock); | ||
139 | locked = (val << _RD_COUNT_WIDTH) == 0; | ||
140 | rwlock->lock = val + (locked << _RD_COUNT_SHIFT); | ||
141 | return locked; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * arch_write_trylock() - try to acquire a write lock. | ||
146 | */ | ||
147 | static inline int arch_write_trylock(arch_rwlock_t *rwlock) | ||
148 | { | ||
149 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
150 | |||
151 | /* | ||
152 | * If a tns is in progress, or there's a waiting or active locker, | ||
153 | * or active readers, we can't take the lock, so give up. | ||
154 | */ | ||
155 | if (unlikely(val != 0)) { | ||
156 | if (!(val & 1)) | ||
157 | rwlock->lock = val; | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* Set the "next" field to mark it locked. */ | ||
162 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
163 | return 1; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * arch_read_unlock() - release a read lock. | ||
168 | */ | ||
169 | static inline void arch_read_unlock(arch_rwlock_t *rwlock) | ||
170 | { | ||
171 | u32 val; | ||
172 | mb(); /* guarantee anything modified under the lock is visible */ | ||
173 | val = __insn_tns((int *)&rwlock->lock); | ||
174 | if (unlikely(val & 1)) { | ||
175 | arch_read_unlock_slow(rwlock); | ||
176 | return; | ||
177 | } | ||
178 | rwlock->lock = val - (1 << _RD_COUNT_SHIFT); | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * arch_write_unlock() - release a write lock. | ||
183 | */ | ||
184 | static inline void arch_write_unlock(arch_rwlock_t *rwlock) | ||
185 | { | ||
186 | u32 val; | ||
187 | mb(); /* guarantee anything modified under the lock is visible */ | ||
188 | val = __insn_tns((int *)&rwlock->lock); | ||
189 | if (unlikely(val != (1 << _WR_NEXT_SHIFT))) { | ||
190 | arch_write_unlock_slow(rwlock, val); | ||
191 | return; | ||
192 | } | ||
193 | rwlock->lock = 0; | ||
194 | } | ||
195 | |||
196 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
197 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
198 | |||
199 | #endif /* _ASM_TILE_SPINLOCK_32_H */ | ||
diff --git a/arch/tile/include/asm/spinlock_types.h b/arch/tile/include/asm/spinlock_types.h new file mode 100644 index 00000000000..a71f59b49c5 --- /dev/null +++ b/arch/tile/include/asm/spinlock_types.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SPINLOCK_TYPES_H | ||
16 | #define _ASM_TILE_SPINLOCK_TYPES_H | ||
17 | |||
18 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
19 | # error "please don't include this file directly" | ||
20 | #endif | ||
21 | |||
22 | #ifdef __tilegx__ | ||
23 | |||
24 | /* Low 15 bits are "next"; high 15 bits are "current". */ | ||
25 | typedef struct arch_spinlock { | ||
26 | unsigned int lock; | ||
27 | } arch_spinlock_t; | ||
28 | |||
29 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | ||
30 | |||
31 | /* High bit is "writer owns"; low 31 bits are a count of readers. */ | ||
32 | typedef struct arch_rwlock { | ||
33 | unsigned int lock; | ||
34 | } arch_rwlock_t; | ||
35 | |||
36 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
37 | |||
38 | #else | ||
39 | |||
40 | typedef struct arch_spinlock { | ||
41 | /* Next ticket number to hand out. */ | ||
42 | int next_ticket; | ||
43 | /* The ticket number that currently owns this lock. */ | ||
44 | int current_ticket; | ||
45 | } arch_spinlock_t; | ||
46 | |||
47 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0, 0 } | ||
48 | |||
49 | /* | ||
50 | * Byte 0 for tns (only the low bit is used), byte 1 for ticket-lock "next", | ||
51 | * byte 2 for ticket-lock "current", byte 3 for reader count. | ||
52 | */ | ||
53 | typedef struct arch_rwlock { | ||
54 | unsigned int lock; | ||
55 | } arch_rwlock_t; | ||
56 | |||
57 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
58 | |||
59 | #endif | ||
60 | #endif /* _ASM_TILE_SPINLOCK_TYPES_H */ | ||
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h new file mode 100644 index 00000000000..f908473c322 --- /dev/null +++ b/arch/tile/include/asm/stack.h | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_STACK_H | ||
16 | #define _ASM_TILE_STACK_H | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <asm/backtrace.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | /* Everything we need to keep track of a backtrace iteration */ | ||
24 | struct KBacktraceIterator { | ||
25 | BacktraceIterator it; | ||
26 | struct task_struct *task; /* task we are backtracing */ | ||
27 | HV_PTE *pgtable; /* page table for user space access */ | ||
28 | int end; /* iteration complete. */ | ||
29 | int new_context; /* new context is starting */ | ||
30 | int profile; /* profiling, so stop on async intrpt */ | ||
31 | int verbose; /* printk extra info (don't want to | ||
32 | * do this for profiling) */ | ||
33 | int is_current; /* backtracing current task */ | ||
34 | }; | ||
35 | |||
36 | /* Iteration methods for kernel backtraces */ | ||
37 | |||
38 | /* | ||
39 | * Initialize a KBacktraceIterator from a task_struct, and optionally from | ||
40 | * a set of registers. If the registers are omitted, the process is | ||
41 | * assumed to be descheduled, and registers are read from the process's | ||
42 | * thread_struct and stack. "verbose" means to printk some additional | ||
43 | * information about fault handlers as we pass them on the stack. | ||
44 | */ | ||
45 | extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | ||
46 | struct task_struct *, struct pt_regs *); | ||
47 | |||
48 | /* Initialize iterator based on current stack. */ | ||
49 | extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt); | ||
50 | |||
51 | /* Helper method for above. */ | ||
52 | extern void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, | ||
53 | ulong pc, ulong lr, ulong sp, ulong r52); | ||
54 | |||
55 | /* No more frames? */ | ||
56 | extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt); | ||
57 | |||
58 | /* Advance to the next frame. */ | ||
59 | extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt); | ||
60 | |||
61 | /* | ||
62 | * Dump stack given complete register info. Use only from the | ||
63 | * architecture-specific code; show_stack() | ||
64 | * and dump_stack() (in entry.S) are architecture-independent entry points. | ||
65 | */ | ||
66 | extern void tile_show_stack(struct KBacktraceIterator *, int headers); | ||
67 | |||
68 | /* Dump stack of current process, with registers to seed the backtrace. */ | ||
69 | extern void dump_stack_regs(struct pt_regs *); | ||
70 | |||
71 | /* Helper method for assembly dump_stack(). */ | ||
72 | extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52); | ||
73 | |||
74 | #endif /* _ASM_TILE_STACK_H */ | ||
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h new file mode 100644 index 00000000000..3dc90fa92c7 --- /dev/null +++ b/arch/tile/include/asm/stat.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/stat.h> | |||
diff --git a/arch/tile/include/asm/statfs.h b/arch/tile/include/asm/statfs.h new file mode 100644 index 00000000000..0b91fe198c2 --- /dev/null +++ b/arch/tile/include/asm/statfs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/statfs.h> | |||
diff --git a/arch/tile/include/asm/string.h b/arch/tile/include/asm/string.h new file mode 100644 index 00000000000..7535cf1a30e --- /dev/null +++ b/arch/tile/include/asm/string.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_STRING_H | ||
16 | #define _ASM_TILE_STRING_H | ||
17 | |||
18 | #define __HAVE_ARCH_MEMCHR | ||
19 | #define __HAVE_ARCH_MEMSET | ||
20 | #define __HAVE_ARCH_MEMCPY | ||
21 | #define __HAVE_ARCH_MEMMOVE | ||
22 | #define __HAVE_ARCH_STRCHR | ||
23 | #define __HAVE_ARCH_STRLEN | ||
24 | |||
25 | extern __kernel_size_t strlen(const char *); | ||
26 | extern char *strchr(const char *s, int c); | ||
27 | extern void *memchr(const void *s, int c, size_t n); | ||
28 | extern void *memset(void *, int, __kernel_size_t); | ||
29 | extern void *memcpy(void *, const void *, __kernel_size_t); | ||
30 | extern void *memmove(void *, const void *, __kernel_size_t); | ||
31 | |||
32 | #endif /* _ASM_TILE_STRING_H */ | ||
diff --git a/arch/tile/include/asm/swab.h b/arch/tile/include/asm/swab.h new file mode 100644 index 00000000000..25c686a00f1 --- /dev/null +++ b/arch/tile/include/asm/swab.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SWAB_H | ||
16 | #define _ASM_TILE_SWAB_H | ||
17 | |||
18 | /* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */ | ||
19 | #define __arch_swab32(x) __builtin_bswap32(x) | ||
20 | #define __arch_swab64(x) __builtin_bswap64(x) | ||
21 | |||
22 | /* Use the variant that is natural for the wordsize. */ | ||
23 | #ifdef CONFIG_64BIT | ||
24 | #define __arch_swab16(x) (__builtin_bswap64(x) >> 48) | ||
25 | #else | ||
26 | #define __arch_swab16(x) (__builtin_bswap32(x) >> 16) | ||
27 | #endif | ||
28 | |||
29 | #endif /* _ASM_TILE_SWAB_H */ | ||
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h new file mode 100644 index 00000000000..d35e0dcb67b --- /dev/null +++ b/arch/tile/include/asm/syscall.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * See asm-generic/syscall.h for descriptions of what we must do here. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SYSCALL_H | ||
19 | #define _ASM_TILE_SYSCALL_H | ||
20 | |||
21 | #include <linux/sched.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <arch/abi.h> | ||
24 | |||
25 | /* | ||
26 | * Only the low 32 bits of orig_r0 are meaningful, so we return int. | ||
27 | * This importantly ignores the high bits on 64-bit, so comparisons | ||
28 | * sign-extend the low 32 bits. | ||
29 | */ | ||
30 | static inline int syscall_get_nr(struct task_struct *t, struct pt_regs *regs) | ||
31 | { | ||
32 | return regs->regs[TREG_SYSCALL_NR]; | ||
33 | } | ||
34 | |||
35 | static inline void syscall_rollback(struct task_struct *task, | ||
36 | struct pt_regs *regs) | ||
37 | { | ||
38 | regs->regs[0] = regs->orig_r0; | ||
39 | } | ||
40 | |||
41 | static inline long syscall_get_error(struct task_struct *task, | ||
42 | struct pt_regs *regs) | ||
43 | { | ||
44 | unsigned long error = regs->regs[0]; | ||
45 | return IS_ERR_VALUE(error) ? error : 0; | ||
46 | } | ||
47 | |||
48 | static inline long syscall_get_return_value(struct task_struct *task, | ||
49 | struct pt_regs *regs) | ||
50 | { | ||
51 | return regs->regs[0]; | ||
52 | } | ||
53 | |||
54 | static inline void syscall_set_return_value(struct task_struct *task, | ||
55 | struct pt_regs *regs, | ||
56 | int error, long val) | ||
57 | { | ||
58 | regs->regs[0] = (long) error ?: val; | ||
59 | } | ||
60 | |||
61 | static inline void syscall_get_arguments(struct task_struct *task, | ||
62 | struct pt_regs *regs, | ||
63 | unsigned int i, unsigned int n, | ||
64 | unsigned long *args) | ||
65 | { | ||
66 | BUG_ON(i + n > 6); | ||
67 | memcpy(args, ®s[i], n * sizeof(args[0])); | ||
68 | } | ||
69 | |||
70 | static inline void syscall_set_arguments(struct task_struct *task, | ||
71 | struct pt_regs *regs, | ||
72 | unsigned int i, unsigned int n, | ||
73 | const unsigned long *args) | ||
74 | { | ||
75 | BUG_ON(i + n > 6); | ||
76 | memcpy(®s[i], args, n * sizeof(args[0])); | ||
77 | } | ||
78 | |||
79 | #endif /* _ASM_TILE_SYSCALL_H */ | ||
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h new file mode 100644 index 00000000000..af165a74537 --- /dev/null +++ b/arch/tile/include/asm/syscalls.h | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * syscalls.h - Linux syscall interfaces (arch-specific) | ||
3 | * | ||
4 | * Copyright (c) 2008 Jaswinder Singh Rajput | ||
5 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation, version 2. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for | ||
15 | * more details. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SYSCALLS_H | ||
19 | #define _ASM_TILE_SYSCALLS_H | ||
20 | |||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/linkage.h> | ||
23 | #include <linux/signal.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/compat.h> | ||
26 | |||
27 | /* The array of function pointers for syscalls. */ | ||
28 | extern void *sys_call_table[]; | ||
29 | #ifdef CONFIG_COMPAT | ||
30 | extern void *compat_sys_call_table[]; | ||
31 | #endif | ||
32 | |||
33 | /* | ||
34 | * Note that by convention, any syscall which requires the current | ||
35 | * register set takes an additional "struct pt_regs *" pointer; the | ||
36 | * sys_xxx() function just adds the pointer and tail-calls to _sys_xxx(). | ||
37 | */ | ||
38 | |||
39 | /* kernel/sys.c */ | ||
40 | ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count); | ||
41 | long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi, | ||
42 | u32 len, int advice); | ||
43 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, | ||
44 | u32 len_lo, u32 len_hi, int advice); | ||
45 | long sys_flush_cache(void); | ||
46 | long sys_mmap2(unsigned long addr, unsigned long len, | ||
47 | unsigned long prot, unsigned long flags, | ||
48 | unsigned long fd, unsigned long pgoff); | ||
49 | #ifdef __tilegx__ | ||
50 | long sys_mmap(unsigned long addr, unsigned long len, | ||
51 | unsigned long prot, unsigned long flags, | ||
52 | unsigned long fd, off_t pgoff); | ||
53 | #endif | ||
54 | |||
55 | /* kernel/process.c */ | ||
56 | long sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
57 | void __user *parent_tid, void __user *child_tid); | ||
58 | long _sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
59 | void __user *parent_tid, void __user *child_tid, | ||
60 | struct pt_regs *regs); | ||
61 | long sys_fork(void); | ||
62 | long _sys_fork(struct pt_regs *regs); | ||
63 | long sys_vfork(void); | ||
64 | long _sys_vfork(struct pt_regs *regs); | ||
65 | long sys_execve(char __user *filename, char __user * __user *argv, | ||
66 | char __user * __user *envp); | ||
67 | long _sys_execve(char __user *filename, char __user * __user *argv, | ||
68 | char __user * __user *envp, struct pt_regs *regs); | ||
69 | |||
70 | /* kernel/signal.c */ | ||
71 | long sys_sigaltstack(const stack_t __user *, stack_t __user *); | ||
72 | long _sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
73 | struct pt_regs *); | ||
74 | long sys_rt_sigreturn(void); | ||
75 | long _sys_rt_sigreturn(struct pt_regs *regs); | ||
76 | |||
77 | /* platform-independent functions */ | ||
78 | long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); | ||
79 | long sys_rt_sigaction(int sig, const struct sigaction __user *act, | ||
80 | struct sigaction __user *oact, size_t sigsetsize); | ||
81 | |||
82 | #ifndef __tilegx__ | ||
83 | /* mm/fault.c */ | ||
84 | int sys_cmpxchg_badaddr(unsigned long address); | ||
85 | int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *); | ||
86 | #endif | ||
87 | |||
88 | #ifdef CONFIG_COMPAT | ||
89 | long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
90 | compat_uptr_t __user *envp); | ||
91 | long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
92 | compat_uptr_t __user *envp, struct pt_regs *regs); | ||
93 | long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
94 | struct compat_sigaltstack __user *uoss_ptr); | ||
95 | long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
96 | struct compat_sigaltstack __user *uoss_ptr, | ||
97 | struct pt_regs *regs); | ||
98 | long compat_sys_rt_sigreturn(void); | ||
99 | long _compat_sys_rt_sigreturn(struct pt_regs *regs); | ||
100 | |||
101 | /* These four are not defined for 64-bit, but serve as "compat" syscalls. */ | ||
102 | long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg); | ||
103 | long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); | ||
104 | long sys_truncate64(const char __user *path, loff_t length); | ||
105 | long sys_ftruncate64(unsigned int fd, loff_t length); | ||
106 | #endif | ||
107 | |||
108 | #endif /* _ASM_TILE_SYSCALLS_H */ | ||
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h new file mode 100644 index 00000000000..f749be327ce --- /dev/null +++ b/arch/tile/include/asm/system.h | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SYSTEM_H | ||
16 | #define _ASM_TILE_SYSTEM_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/irqflags.h> | ||
22 | |||
23 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
24 | #include <asm/ptrace.h> | ||
25 | |||
26 | #include <arch/chip.h> | ||
27 | #include <arch/sim_def.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | /* | ||
31 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
32 | * depend on. | ||
33 | * | ||
34 | * No data-dependent reads from memory-like regions are ever reordered | ||
35 | * over this barrier. All reads preceding this primitive are guaranteed | ||
36 | * to access memory (but not necessarily other CPUs' caches) before any | ||
37 | * reads following this primitive that depend on the data return by | ||
38 | * any of the preceding reads. This primitive is much lighter weight than | ||
39 | * rmb() on most CPUs, and is never heavier weight than is | ||
40 | * rmb(). | ||
41 | * | ||
42 | * These ordering constraints are respected by both the local CPU | ||
43 | * and the compiler. | ||
44 | * | ||
45 | * Ordering is not guaranteed by anything other than these primitives, | ||
46 | * not even by data dependencies. See the documentation for | ||
47 | * memory_barrier() for examples and URLs to more information. | ||
48 | * | ||
49 | * For example, the following code would force ordering (the initial | ||
50 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
51 | * | ||
52 | * <programlisting> | ||
53 | * CPU 0 CPU 1 | ||
54 | * | ||
55 | * b = 2; | ||
56 | * memory_barrier(); | ||
57 | * p = &b; q = p; | ||
58 | * read_barrier_depends(); | ||
59 | * d = *q; | ||
60 | * </programlisting> | ||
61 | * | ||
62 | * because the read of "*q" depends on the read of "p" and these | ||
63 | * two reads are separated by a read_barrier_depends(). However, | ||
64 | * the following code, with the same initial values for "a" and "b": | ||
65 | * | ||
66 | * <programlisting> | ||
67 | * CPU 0 CPU 1 | ||
68 | * | ||
69 | * a = 2; | ||
70 | * memory_barrier(); | ||
71 | * b = 3; y = b; | ||
72 | * read_barrier_depends(); | ||
73 | * x = a; | ||
74 | * </programlisting> | ||
75 | * | ||
76 | * does not enforce ordering, since there is no data dependency between | ||
77 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
78 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
79 | * in cases like this where there are no data dependencies. | ||
80 | */ | ||
81 | |||
82 | #define read_barrier_depends() do { } while (0) | ||
83 | |||
84 | #define __sync() __insn_mf() | ||
85 | |||
86 | #if CHIP_HAS_SPLIT_CYCLE() | ||
87 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) | ||
88 | #else | ||
89 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ | ||
90 | #endif | ||
91 | |||
92 | /* Fence to guarantee visibility of stores to incoherent memory. */ | ||
93 | static inline void | ||
94 | mb_incoherent(void) | ||
95 | { | ||
96 | __insn_mf(); | ||
97 | |||
98 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | ||
99 | { | ||
100 | int __mb_incoherent(void); | ||
101 | #if CHIP_HAS_TILE_WRITE_PENDING() | ||
102 | const unsigned long WRITE_TIMEOUT_CYCLES = 400; | ||
103 | unsigned long start = get_cycles_low(); | ||
104 | do { | ||
105 | if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0) | ||
106 | return; | ||
107 | } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES); | ||
108 | #endif /* CHIP_HAS_TILE_WRITE_PENDING() */ | ||
109 | (void) __mb_incoherent(); | ||
110 | } | ||
111 | #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */ | ||
112 | } | ||
113 | |||
114 | #define fast_wmb() __sync() | ||
115 | #define fast_rmb() __sync() | ||
116 | #define fast_mb() __sync() | ||
117 | #define fast_iob() mb_incoherent() | ||
118 | |||
119 | #define wmb() fast_wmb() | ||
120 | #define rmb() fast_rmb() | ||
121 | #define mb() fast_mb() | ||
122 | #define iob() fast_iob() | ||
123 | |||
124 | #ifdef CONFIG_SMP | ||
125 | #define smp_mb() mb() | ||
126 | #define smp_rmb() rmb() | ||
127 | #define smp_wmb() wmb() | ||
128 | #define smp_read_barrier_depends() read_barrier_depends() | ||
129 | #else | ||
130 | #define smp_mb() barrier() | ||
131 | #define smp_rmb() barrier() | ||
132 | #define smp_wmb() barrier() | ||
133 | #define smp_read_barrier_depends() do { } while (0) | ||
134 | #endif | ||
135 | |||
136 | #define set_mb(var, value) \ | ||
137 | do { var = value; mb(); } while (0) | ||
138 | |||
139 | /* | ||
140 | * Pause the DMA engine and static network before task switching. | ||
141 | */ | ||
142 | #define prepare_arch_switch(next) _prepare_arch_switch(next) | ||
143 | void _prepare_arch_switch(struct task_struct *next); | ||
144 | |||
145 | |||
146 | /* | ||
147 | * switch_to(n) should switch tasks to task nr n, first | ||
148 | * checking that n isn't the current task, in which case it does nothing. | ||
149 | * The number of callee-saved registers saved on the kernel stack | ||
150 | * is defined here for use in copy_thread() and must agree with __switch_to(). | ||
151 | */ | ||
152 | #endif /* !__ASSEMBLY__ */ | ||
153 | #define CALLEE_SAVED_FIRST_REG 30 | ||
154 | #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ | ||
155 | #ifndef __ASSEMBLY__ | ||
156 | struct task_struct; | ||
157 | #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) | ||
158 | extern struct task_struct *_switch_to(struct task_struct *prev, | ||
159 | struct task_struct *next); | ||
160 | |||
161 | /* Helper function for _switch_to(). */ | ||
162 | extern struct task_struct *__switch_to(struct task_struct *prev, | ||
163 | struct task_struct *next, | ||
164 | unsigned long new_system_save_1_0); | ||
165 | |||
166 | /* Address that switched-away from tasks are at. */ | ||
167 | extern unsigned long get_switch_to_pc(void); | ||
168 | |||
169 | /* | ||
170 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
171 | * it needs a way to flush as much of the CPU's caches as possible: | ||
172 | * | ||
173 | * TODO: fill this in! | ||
174 | */ | ||
175 | static inline void sched_cacheflush(void) | ||
176 | { | ||
177 | } | ||
178 | |||
179 | #define arch_align_stack(x) (x) | ||
180 | |||
181 | /* | ||
182 | * Is the kernel doing fixups of unaligned accesses? If <0, no kernel | ||
183 | * intervention occurs and SIGBUS is delivered with no data address | ||
184 | * info. If 0, the kernel single-steps the instruction to discover | ||
185 | * the data address to provide with the SIGBUS. If 1, the kernel does | ||
186 | * a fixup. | ||
187 | */ | ||
188 | extern int unaligned_fixup; | ||
189 | |||
190 | /* Is the kernel printing on each unaligned fixup? */ | ||
191 | extern int unaligned_printk; | ||
192 | |||
193 | /* Number of unaligned fixups performed */ | ||
194 | extern unsigned int unaligned_fixup_count; | ||
195 | |||
196 | /* Init-time routine to do tile-specific per-cpu setup. */ | ||
197 | void setup_cpu(int boot); | ||
198 | |||
199 | /* User-level DMA management functions */ | ||
200 | void grant_dma_mpls(void); | ||
201 | void restrict_dma_mpls(void); | ||
202 | |||
203 | #ifdef CONFIG_HARDWALL | ||
204 | /* User-level network management functions */ | ||
205 | void reset_network_state(void); | ||
206 | void grant_network_mpls(void); | ||
207 | void restrict_network_mpls(void); | ||
208 | int hardwall_deactivate(struct task_struct *task); | ||
209 | |||
210 | /* Hook hardwall code into changes in affinity. */ | ||
211 | #define arch_set_cpus_allowed(p, new_mask) do { \ | ||
212 | if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ | ||
213 | hardwall_deactivate(p); \ | ||
214 | } while (0) | ||
215 | #endif | ||
216 | |||
217 | /* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */ | ||
218 | extern int _sim_syscall(int syscall_num, ...); | ||
219 | #define sim_syscall(syscall_num, ...) \ | ||
220 | _sim_syscall(SIM_CONTROL_SYSCALL + \ | ||
221 | ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \ | ||
222 | ## __VA_ARGS__) | ||
223 | |||
224 | /* | ||
225 | * Kernel threads can check to see if they need to migrate their | ||
226 | * stack whenever they return from a context switch; for user | ||
227 | * threads, we defer until they are returning to user-space. | ||
228 | */ | ||
229 | #define finish_arch_switch(prev) do { \ | ||
230 | if (unlikely((prev)->state == TASK_DEAD)) \ | ||
231 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ | ||
232 | ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
233 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ | ||
234 | (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
235 | if (current->mm == NULL && !kstack_hash && \ | ||
236 | current_thread_info()->homecache_cpu != smp_processor_id()) \ | ||
237 | homecache_migrate_kthread(); \ | ||
238 | } while (0) | ||
239 | |||
240 | /* Support function for forking a new task. */ | ||
241 | void ret_from_fork(void); | ||
242 | |||
243 | /* Called from ret_from_fork() when a new process starts up. */ | ||
244 | struct task_struct *sim_notify_fork(struct task_struct *prev); | ||
245 | |||
246 | #endif /* !__ASSEMBLY__ */ | ||
247 | |||
248 | #endif /* _ASM_TILE_SYSTEM_H */ | ||
diff --git a/arch/tile/include/asm/termbits.h b/arch/tile/include/asm/termbits.h new file mode 100644 index 00000000000..3935b106de7 --- /dev/null +++ b/arch/tile/include/asm/termbits.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/termbits.h> | |||
diff --git a/arch/tile/include/asm/termios.h b/arch/tile/include/asm/termios.h new file mode 100644 index 00000000000..280d78a9d96 --- /dev/null +++ b/arch/tile/include/asm/termios.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/termios.h> | |||
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h new file mode 100644 index 00000000000..3872f2b345d --- /dev/null +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_THREAD_INFO_H | ||
17 | #define _ASM_TILE_THREAD_INFO_H | ||
18 | |||
19 | #include <asm/processor.h> | ||
20 | #include <asm/page.h> | ||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | /* | ||
24 | * Low level task data that assembly code needs immediate access to. | ||
25 | * The structure is placed at the bottom of the supervisor stack. | ||
26 | */ | ||
27 | struct thread_info { | ||
28 | struct task_struct *task; /* main task structure */ | ||
29 | struct exec_domain *exec_domain; /* execution domain */ | ||
30 | unsigned long flags; /* low level flags */ | ||
31 | unsigned long status; /* thread-synchronous flags */ | ||
32 | __u32 homecache_cpu; /* CPU we are homecached on */ | ||
33 | __u32 cpu; /* current CPU */ | ||
34 | int preempt_count; /* 0 => preemptable, | ||
35 | <0 => BUG */ | ||
36 | |||
37 | mm_segment_t addr_limit; /* thread address space | ||
38 | (KERNEL_DS or USER_DS) */ | ||
39 | struct restart_block restart_block; | ||
40 | struct single_step_state *step_state; /* single step state | ||
41 | (if non-zero) */ | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * macros/functions for gaining access to the thread information structure. | ||
46 | */ | ||
47 | #define INIT_THREAD_INFO(tsk) \ | ||
48 | { \ | ||
49 | .task = &tsk, \ | ||
50 | .exec_domain = &default_exec_domain, \ | ||
51 | .flags = 0, \ | ||
52 | .cpu = 0, \ | ||
53 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
54 | .addr_limit = KERNEL_DS, \ | ||
55 | .restart_block = { \ | ||
56 | .fn = do_no_restart_syscall, \ | ||
57 | }, \ | ||
58 | .step_state = NULL, \ | ||
59 | } | ||
60 | |||
61 | #define init_thread_info (init_thread_union.thread_info) | ||
62 | #define init_stack (init_thread_union.stack) | ||
63 | |||
64 | #endif /* !__ASSEMBLY__ */ | ||
65 | |||
66 | #if PAGE_SIZE < 8192 | ||
67 | #define THREAD_SIZE_ORDER (13 - PAGE_SHIFT) | ||
68 | #else | ||
69 | #define THREAD_SIZE_ORDER (0) | ||
70 | #endif | ||
71 | |||
72 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | ||
73 | #define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER) | ||
74 | |||
75 | #define STACK_WARN (THREAD_SIZE/8) | ||
76 | |||
77 | #ifndef __ASSEMBLY__ | ||
78 | |||
79 | /* How to get the thread information struct from C. */ | ||
80 | register unsigned long stack_pointer __asm__("sp"); | ||
81 | |||
82 | #define current_thread_info() \ | ||
83 | ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) | ||
84 | |||
85 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR | ||
86 | extern struct thread_info *alloc_thread_info(struct task_struct *task); | ||
87 | extern void free_thread_info(struct thread_info *info); | ||
88 | |||
89 | /* Sit on a nap instruction until interrupted. */ | ||
90 | extern void smp_nap(void); | ||
91 | |||
92 | /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ | ||
93 | extern void _cpu_idle(void); | ||
94 | |||
95 | /* Switch boot idle thread to a freshly-allocated stack and free old stack. */ | ||
96 | extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | ||
97 | unsigned long new_sp, | ||
98 | unsigned long new_ss10); | ||
99 | |||
100 | #else /* __ASSEMBLY__ */ | ||
101 | |||
102 | /* how to get the thread information struct from ASM */ | ||
103 | #ifdef __tilegx__ | ||
104 | #define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63 | ||
105 | #else | ||
106 | #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31 | ||
107 | #endif | ||
108 | |||
109 | #endif /* !__ASSEMBLY__ */ | ||
110 | |||
111 | #define PREEMPT_ACTIVE 0x10000000 | ||
112 | |||
113 | /* | ||
114 | * Thread information flags that various assembly files may need to access. | ||
115 | * Keep flags accessed frequently in low bits, particular since it makes | ||
116 | * it easier to build constants in assembly. | ||
117 | */ | ||
118 | #define TIF_SIGPENDING 0 /* signal pending */ | ||
119 | #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ | ||
120 | #define TIF_SINGLESTEP 2 /* restore singlestep on return to | ||
121 | user mode */ | ||
122 | #define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */ | ||
123 | #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ | ||
124 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ | ||
125 | #define TIF_SECCOMP 6 /* secure computing */ | ||
126 | #define TIF_MEMDIE 7 /* OOM killer at work */ | ||
127 | |||
128 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | ||
129 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | ||
130 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | ||
131 | #define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB) | ||
132 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
133 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | ||
134 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | ||
135 | #define _TIF_MEMDIE (1<<TIF_MEMDIE) | ||
136 | |||
137 | /* Work to do on any return to user space. */ | ||
138 | #define _TIF_ALLWORK_MASK \ | ||
139 | (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) | ||
140 | |||
141 | /* | ||
142 | * Thread-synchronous status. | ||
143 | * | ||
144 | * This is different from the flags in that nobody else | ||
145 | * ever touches our thread-synchronous status, so we don't | ||
146 | * have to worry about atomic accesses. | ||
147 | */ | ||
148 | #ifdef __tilegx__ | ||
149 | #define TS_COMPAT 0x0001 /* 32-bit compatibility mode */ | ||
150 | #endif | ||
151 | #define TS_POLLING 0x0004 /* in idle loop but not sleeping */ | ||
152 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ | ||
153 | |||
154 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
155 | |||
156 | #ifndef __ASSEMBLY__ | ||
157 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
158 | static inline void set_restore_sigmask(void) | ||
159 | { | ||
160 | struct thread_info *ti = current_thread_info(); | ||
161 | ti->status |= TS_RESTORE_SIGMASK; | ||
162 | set_bit(TIF_SIGPENDING, &ti->flags); | ||
163 | } | ||
164 | #endif /* !__ASSEMBLY__ */ | ||
165 | |||
166 | #endif /* _ASM_TILE_THREAD_INFO_H */ | ||
diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h new file mode 100644 index 00000000000..3baf5fc4c0a --- /dev/null +++ b/arch/tile/include/asm/timex.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TIMEX_H | ||
16 | #define _ASM_TILE_TIMEX_H | ||
17 | |||
18 | /* | ||
19 | * This rate should be a multiple of the possible HZ values (100, 250, 1000) | ||
20 | * and a fraction of the possible hardware timer frequencies. Our timer | ||
21 | * frequency is highly tunable but also quite precise, so for the primary use | ||
22 | * of this value (setting ACT_HZ from HZ) we just pick a value that causes | ||
23 | * ACT_HZ to be set to HZ. We make the value somewhat large just to be | ||
24 | * more robust in case someone tries out a new value of HZ. | ||
25 | */ | ||
26 | #define CLOCK_TICK_RATE 1000000 | ||
27 | |||
28 | typedef unsigned long long cycles_t; | ||
29 | |||
30 | #if CHIP_HAS_SPLIT_CYCLE() | ||
31 | cycles_t get_cycles(void); | ||
32 | #else | ||
33 | static inline cycles_t get_cycles(void) | ||
34 | { | ||
35 | return __insn_mfspr(SPR_CYCLE); | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | cycles_t get_clock_rate(void); | ||
40 | |||
41 | /* Called at cpu initialization to set some low-level constants. */ | ||
42 | void setup_clock(void); | ||
43 | |||
44 | /* Called at cpu initialization to start the tile-timer clock device. */ | ||
45 | void setup_tile_timer(void); | ||
46 | |||
47 | #endif /* _ASM_TILE_TIMEX_H */ | ||
diff --git a/arch/tile/include/asm/tlb.h b/arch/tile/include/asm/tlb.h new file mode 100644 index 00000000000..4a891a1a8df --- /dev/null +++ b/arch/tile/include/asm/tlb.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TLB_H | ||
16 | #define _ASM_TILE_TLB_H | ||
17 | |||
18 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
19 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
20 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | ||
21 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
22 | |||
23 | #include <asm-generic/tlb.h> | ||
24 | |||
25 | #endif /* _ASM_TILE_TLB_H */ | ||
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h new file mode 100644 index 00000000000..96199d214fb --- /dev/null +++ b/arch/tile/include/asm/tlbflush.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TLBFLUSH_H | ||
16 | #define _ASM_TILE_TLBFLUSH_H | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <asm/cacheflush.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | /* | ||
26 | * Rather than associating each mm with its own ASID, we just use | ||
27 | * ASIDs to allow us to lazily flush the TLB when we switch mms. | ||
28 | * This way we only have to do an actual TLB flush on mm switch | ||
29 | * every time we wrap ASIDs, not every single time we switch. | ||
30 | * | ||
31 | * FIXME: We might improve performance by keeping ASIDs around | ||
32 | * properly, though since the hypervisor direct-maps VAs to TSB | ||
33 | * entries, we're likely to have lost at least the executable page | ||
34 | * mappings by the time we switch back to the original mm. | ||
35 | */ | ||
36 | DECLARE_PER_CPU(int, current_asid); | ||
37 | |||
38 | /* The hypervisor tells us what ASIDs are available to us. */ | ||
39 | extern int min_asid, max_asid; | ||
40 | |||
41 | static inline unsigned long hv_page_size(const struct vm_area_struct *vma) | ||
42 | { | ||
43 | return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE; | ||
44 | } | ||
45 | |||
46 | /* Pass as vma pointer for non-executable mapping, if no vma available. */ | ||
47 | #define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL) | ||
48 | |||
49 | /* Flush a single user page on this cpu. */ | ||
50 | static inline void local_flush_tlb_page(const struct vm_area_struct *vma, | ||
51 | unsigned long addr, | ||
52 | unsigned long page_size) | ||
53 | { | ||
54 | int rc = hv_flush_page(addr, page_size); | ||
55 | if (rc < 0) | ||
56 | panic("hv_flush_page(%#lx,%#lx) failed: %d", | ||
57 | addr, page_size, rc); | ||
58 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) | ||
59 | __flush_icache(); | ||
60 | } | ||
61 | |||
62 | /* Flush range of user pages on this cpu. */ | ||
63 | static inline void local_flush_tlb_pages(const struct vm_area_struct *vma, | ||
64 | unsigned long addr, | ||
65 | unsigned long page_size, | ||
66 | unsigned long len) | ||
67 | { | ||
68 | int rc = hv_flush_pages(addr, page_size, len); | ||
69 | if (rc < 0) | ||
70 | panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d", | ||
71 | addr, page_size, len, rc); | ||
72 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) | ||
73 | __flush_icache(); | ||
74 | } | ||
75 | |||
76 | /* Flush all user pages on this cpu. */ | ||
77 | static inline void local_flush_tlb(void) | ||
78 | { | ||
79 | int rc = hv_flush_all(1); /* preserve global mappings */ | ||
80 | if (rc < 0) | ||
81 | panic("hv_flush_all(1) failed: %d", rc); | ||
82 | __flush_icache(); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Global pages have to be flushed a bit differently. Not a real | ||
87 | * performance problem because this does not happen often. | ||
88 | */ | ||
89 | static inline void local_flush_tlb_all(void) | ||
90 | { | ||
91 | int i; | ||
92 | for (i = 0; ; ++i) { | ||
93 | HV_VirtAddrRange r = hv_inquire_virtual(i); | ||
94 | if (r.size == 0) | ||
95 | break; | ||
96 | local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size); | ||
97 | local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * TLB flushing: | ||
103 | * | ||
104 | * - flush_tlb() flushes the current mm struct TLBs | ||
105 | * - flush_tlb_all() flushes all processes TLBs | ||
106 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
107 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
108 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
109 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
110 | * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus | ||
111 | * | ||
112 | * Here (as in vm_area_struct), "end" means the first byte after | ||
113 | * our end address. | ||
114 | */ | ||
115 | |||
116 | extern void flush_tlb_all(void); | ||
117 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
118 | extern void flush_tlb_current_task(void); | ||
119 | extern void flush_tlb_mm(struct mm_struct *); | ||
120 | extern void flush_tlb_page(const struct vm_area_struct *, unsigned long); | ||
121 | extern void flush_tlb_page_mm(const struct vm_area_struct *, | ||
122 | struct mm_struct *, unsigned long); | ||
123 | extern void flush_tlb_range(const struct vm_area_struct *, | ||
124 | unsigned long start, unsigned long end); | ||
125 | |||
126 | #define flush_tlb() flush_tlb_current_task() | ||
127 | |||
128 | #endif /* _ASM_TILE_TLBFLUSH_H */ | ||
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h new file mode 100644 index 00000000000..343172d422a --- /dev/null +++ b/arch/tile/include/asm/topology.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TOPOLOGY_H | ||
16 | #define _ASM_TILE_TOPOLOGY_H | ||
17 | |||
18 | #ifdef CONFIG_NUMA | ||
19 | |||
20 | #include <linux/cpumask.h> | ||
21 | |||
22 | /* Mappings between logical cpu number and node number. */ | ||
23 | extern struct cpumask node_2_cpu_mask[]; | ||
24 | extern char cpu_2_node[]; | ||
25 | |||
26 | /* Returns the number of the node containing CPU 'cpu'. */ | ||
27 | static inline int cpu_to_node(int cpu) | ||
28 | { | ||
29 | return cpu_2_node[cpu]; | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * Returns the number of the node containing Node 'node'. | ||
34 | * This architecture is flat, so it is a pretty simple function! | ||
35 | */ | ||
36 | #define parent_node(node) (node) | ||
37 | |||
38 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
39 | static inline const struct cpumask *cpumask_of_node(int node) | ||
40 | { | ||
41 | return &node_2_cpu_mask[node]; | ||
42 | } | ||
43 | |||
44 | /* For now, use numa node -1 for global allocation. */ | ||
45 | #define pcibus_to_node(bus) ((void)(bus), -1) | ||
46 | |||
47 | /* sched_domains SD_NODE_INIT for TILE architecture */ | ||
48 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
49 | .min_interval = 8, \ | ||
50 | .max_interval = 32, \ | ||
51 | .busy_factor = 32, \ | ||
52 | .imbalance_pct = 125, \ | ||
53 | .cache_nice_tries = 1, \ | ||
54 | .busy_idx = 3, \ | ||
55 | .idle_idx = 1, \ | ||
56 | .newidle_idx = 2, \ | ||
57 | .wake_idx = 1, \ | ||
58 | .flags = SD_LOAD_BALANCE \ | ||
59 | | SD_BALANCE_NEWIDLE \ | ||
60 | | SD_BALANCE_EXEC \ | ||
61 | | SD_BALANCE_FORK \ | ||
62 | | SD_WAKE_AFFINE \ | ||
63 | | SD_SERIALIZE, \ | ||
64 | .last_balance = jiffies, \ | ||
65 | .balance_interval = 1, \ | ||
66 | } | ||
67 | |||
68 | /* By definition, we create nodes based on online memory. */ | ||
69 | #define node_has_online_mem(nid) 1 | ||
70 | |||
71 | #endif /* CONFIG_NUMA */ | ||
72 | |||
73 | #include <asm-generic/topology.h> | ||
74 | |||
75 | #ifdef CONFIG_SMP | ||
76 | #define topology_physical_package_id(cpu) ((void)(cpu), 0) | ||
77 | #define topology_core_id(cpu) (cpu) | ||
78 | #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) | ||
79 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) | ||
80 | |||
81 | /* indicates that pointers to the topology struct cpumask maps are valid */ | ||
82 | #define arch_provides_topology_pointers yes | ||
83 | #endif | ||
84 | |||
85 | #endif /* _ASM_TILE_TOPOLOGY_H */ | ||
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h new file mode 100644 index 00000000000..432a9c15c8a --- /dev/null +++ b/arch/tile/include/asm/traps.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TRAPS_H | ||
16 | #define _ASM_TILE_TRAPS_H | ||
17 | |||
18 | /* mm/fault.c */ | ||
19 | void do_page_fault(struct pt_regs *, int fault_num, | ||
20 | unsigned long address, unsigned long write); | ||
21 | void do_async_page_fault(struct pt_regs *); | ||
22 | |||
23 | #ifndef __tilegx__ | ||
24 | /* | ||
25 | * We return this structure in registers to avoid having to write | ||
26 | * additional save/restore code in the intvec.S caller. | ||
27 | */ | ||
28 | struct intvec_state { | ||
29 | void *handler; | ||
30 | unsigned long vecnum; | ||
31 | unsigned long fault_num; | ||
32 | unsigned long info; | ||
33 | unsigned long retval; | ||
34 | }; | ||
35 | struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | ||
36 | unsigned long address, | ||
37 | unsigned long info); | ||
38 | #endif | ||
39 | |||
40 | /* kernel/traps.c */ | ||
41 | void do_trap(struct pt_regs *, int fault_num, unsigned long reason); | ||
42 | void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52); | ||
43 | |||
44 | /* kernel/time.c */ | ||
45 | void do_timer_interrupt(struct pt_regs *, int fault_num); | ||
46 | |||
47 | /* kernel/messaging.c */ | ||
48 | void hv_message_intr(struct pt_regs *, int intnum); | ||
49 | |||
50 | /* kernel/irq.c */ | ||
51 | void tile_dev_intr(struct pt_regs *, int intnum); | ||
52 | |||
53 | #ifdef CONFIG_HARDWALL | ||
54 | /* kernel/hardwall.c */ | ||
55 | void do_hardwall_trap(struct pt_regs *, int fault_num); | ||
56 | #endif | ||
57 | |||
58 | /* kernel/ptrace.c */ | ||
59 | void do_breakpoint(struct pt_regs *, int fault_num); | ||
60 | |||
61 | |||
62 | #endif /* _ASM_TILE_SYSCALLS_H */ | ||
diff --git a/arch/tile/include/asm/types.h b/arch/tile/include/asm/types.h new file mode 100644 index 00000000000..b9e79bc580d --- /dev/null +++ b/arch/tile/include/asm/types.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/types.h> | |||
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h new file mode 100644 index 00000000000..ed17a80ec0e --- /dev/null +++ b/arch/tile/include/asm/uaccess.h | |||
@@ -0,0 +1,580 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_UACCESS_H | ||
16 | #define _ASM_TILE_UACCESS_H | ||
17 | |||
18 | /* | ||
19 | * User space memory access functions | ||
20 | */ | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <asm-generic/uaccess-unaligned.h> | ||
24 | #include <asm/processor.h> | ||
25 | #include <asm/page.h> | ||
26 | |||
27 | #define VERIFY_READ 0 | ||
28 | #define VERIFY_WRITE 1 | ||
29 | |||
30 | /* | ||
31 | * The fs value determines whether argument validity checking should be | ||
32 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
33 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
34 | * | ||
35 | * For historical reasons, these macros are grossly misnamed. | ||
36 | */ | ||
37 | #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) }) | ||
38 | |||
39 | #define KERNEL_DS MAKE_MM_SEG(-1UL) | ||
40 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
41 | |||
42 | #define get_ds() (KERNEL_DS) | ||
43 | #define get_fs() (current_thread_info()->addr_limit) | ||
44 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
45 | |||
46 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
47 | |||
48 | #ifndef __tilegx__ | ||
49 | /* | ||
50 | * We could allow mapping all 16 MB at 0xfc000000, but we set up a | ||
51 | * special hack in arch_setup_additional_pages() to auto-create a mapping | ||
52 | * for the first 16 KB, and it would seem strange to have different | ||
53 | * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000. | ||
54 | */ | ||
55 | static inline int is_arch_mappable_range(unsigned long addr, | ||
56 | unsigned long size) | ||
57 | { | ||
58 | return (addr >= MEM_USER_INTRPT && | ||
59 | addr < (MEM_USER_INTRPT + INTRPT_SIZE) && | ||
60 | size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr); | ||
61 | } | ||
62 | #define is_arch_mappable_range is_arch_mappable_range | ||
63 | #else | ||
64 | #define is_arch_mappable_range(addr, size) 0 | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Test whether a block of memory is a valid user space address. | ||
69 | * Returns 0 if the range is valid, nonzero otherwise. | ||
70 | */ | ||
71 | int __range_ok(unsigned long addr, unsigned long size); | ||
72 | |||
73 | /** | ||
74 | * access_ok: - Checks if a user space pointer is valid | ||
75 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that | ||
76 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe | ||
77 | * to write to a block, it is always safe to read from it. | ||
78 | * @addr: User space pointer to start of block to check | ||
79 | * @size: Size of block to check | ||
80 | * | ||
81 | * Context: User context only. This function may sleep. | ||
82 | * | ||
83 | * Checks if a pointer to a block of memory in user space is valid. | ||
84 | * | ||
85 | * Returns true (nonzero) if the memory block may be valid, false (zero) | ||
86 | * if it is definitely invalid. | ||
87 | * | ||
88 | * Note that, depending on architecture, this function probably just | ||
89 | * checks that the pointer is in the user space range - after calling | ||
90 | * this function, memory access functions may still return -EFAULT. | ||
91 | */ | ||
92 | #define access_ok(type, addr, size) ({ \ | ||
93 | __chk_user_ptr(addr); \ | ||
94 | likely(__range_ok((unsigned long)(addr), (size)) == 0); \ | ||
95 | }) | ||
96 | |||
97 | /* | ||
98 | * The exception table consists of pairs of addresses: the first is the | ||
99 | * address of an instruction that is allowed to fault, and the second is | ||
100 | * the address at which the program should continue. No registers are | ||
101 | * modified, so it is entirely up to the continuation code to figure out | ||
102 | * what to do. | ||
103 | * | ||
104 | * All the routines below use bits of fixup code that are out of line | ||
105 | * with the main instruction path. This means when everything is well, | ||
106 | * we don't even have to jump over them. Further, they do not intrude | ||
107 | * on our cache or tlb entries. | ||
108 | */ | ||
109 | |||
110 | struct exception_table_entry { | ||
111 | unsigned long insn, fixup; | ||
112 | }; | ||
113 | |||
114 | extern int fixup_exception(struct pt_regs *regs); | ||
115 | |||
116 | /* | ||
117 | * We return the __get_user_N function results in a structure, | ||
118 | * thus in r0 and r1. If "err" is zero, "val" is the result | ||
119 | * of the read; otherwise, "err" is -EFAULT. | ||
120 | * | ||
121 | * We rarely need 8-byte values on a 32-bit architecture, but | ||
122 | * we size the structure to accommodate. In practice, for the | ||
123 | * the smaller reads, we can zero the high word for free, and | ||
124 | * the caller will ignore it by virtue of casting anyway. | ||
125 | */ | ||
126 | struct __get_user { | ||
127 | unsigned long long val; | ||
128 | int err; | ||
129 | }; | ||
130 | |||
131 | /* | ||
132 | * FIXME: we should express these as inline extended assembler, since | ||
133 | * they're fundamentally just a variable dereference and some | ||
134 | * supporting exception_table gunk. Note that (a la i386) we can | ||
135 | * extend the copy_to_user and copy_from_user routines to call into | ||
136 | * such extended assembler routines, though we will have to use a | ||
137 | * different return code in that case (1, 2, or 4, rather than -EFAULT). | ||
138 | */ | ||
139 | extern struct __get_user __get_user_1(const void __user *); | ||
140 | extern struct __get_user __get_user_2(const void __user *); | ||
141 | extern struct __get_user __get_user_4(const void __user *); | ||
142 | extern struct __get_user __get_user_8(const void __user *); | ||
143 | extern int __put_user_1(long, void __user *); | ||
144 | extern int __put_user_2(long, void __user *); | ||
145 | extern int __put_user_4(long, void __user *); | ||
146 | extern int __put_user_8(long long, void __user *); | ||
147 | |||
148 | /* Unimplemented routines to cause linker failures */ | ||
149 | extern struct __get_user __get_user_bad(void); | ||
150 | extern int __put_user_bad(void); | ||
151 | |||
152 | /* | ||
153 | * Careful: we have to cast the result to the type of the pointer | ||
154 | * for sign reasons. | ||
155 | */ | ||
156 | /** | ||
157 | * __get_user: - Get a simple variable from user space, with less checking. | ||
158 | * @x: Variable to store result. | ||
159 | * @ptr: Source address, in user space. | ||
160 | * | ||
161 | * Context: User context only. This function may sleep. | ||
162 | * | ||
163 | * This macro copies a single simple variable from user space to kernel | ||
164 | * space. It supports simple types like char and int, but not larger | ||
165 | * data types like structures or arrays. | ||
166 | * | ||
167 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
168 | * dereferencing @ptr must be assignable to @x without a cast. | ||
169 | * | ||
170 | * Returns zero on success, or -EFAULT on error. | ||
171 | * On error, the variable @x is set to zero. | ||
172 | * | ||
173 | * Caller must check the pointer with access_ok() before calling this | ||
174 | * function. | ||
175 | */ | ||
176 | #define __get_user(x, ptr) \ | ||
177 | ({ struct __get_user __ret; \ | ||
178 | __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \ | ||
179 | __chk_user_ptr(__gu_addr); \ | ||
180 | switch (sizeof(*(__gu_addr))) { \ | ||
181 | case 1: \ | ||
182 | __ret = __get_user_1(__gu_addr); \ | ||
183 | break; \ | ||
184 | case 2: \ | ||
185 | __ret = __get_user_2(__gu_addr); \ | ||
186 | break; \ | ||
187 | case 4: \ | ||
188 | __ret = __get_user_4(__gu_addr); \ | ||
189 | break; \ | ||
190 | case 8: \ | ||
191 | __ret = __get_user_8(__gu_addr); \ | ||
192 | break; \ | ||
193 | default: \ | ||
194 | __ret = __get_user_bad(); \ | ||
195 | break; \ | ||
196 | } \ | ||
197 | (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \ | ||
198 | __ret.val; \ | ||
199 | __ret.err; \ | ||
200 | }) | ||
201 | |||
202 | /** | ||
203 | * __put_user: - Write a simple value into user space, with less checking. | ||
204 | * @x: Value to copy to user space. | ||
205 | * @ptr: Destination address, in user space. | ||
206 | * | ||
207 | * Context: User context only. This function may sleep. | ||
208 | * | ||
209 | * This macro copies a single simple value from kernel space to user | ||
210 | * space. It supports simple types like char and int, but not larger | ||
211 | * data types like structures or arrays. | ||
212 | * | ||
213 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
214 | * to the result of dereferencing @ptr. | ||
215 | * | ||
216 | * Caller must check the pointer with access_ok() before calling this | ||
217 | * function. | ||
218 | * | ||
219 | * Returns zero on success, or -EFAULT on error. | ||
220 | * | ||
221 | * Implementation note: The "case 8" logic of casting to the type of | ||
222 | * the result of subtracting the value from itself is basically a way | ||
223 | * of keeping all integer types the same, but casting any pointers to | ||
224 | * ptrdiff_t, i.e. also an integer type. This way there are no | ||
225 | * questionable casts seen by the compiler on an ILP32 platform. | ||
226 | */ | ||
227 | #define __put_user(x, ptr) \ | ||
228 | ({ \ | ||
229 | int __pu_err = 0; \ | ||
230 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
231 | typeof(*__pu_addr) __pu_val = (x); \ | ||
232 | __chk_user_ptr(__pu_addr); \ | ||
233 | switch (sizeof(__pu_val)) { \ | ||
234 | case 1: \ | ||
235 | __pu_err = __put_user_1((long)__pu_val, __pu_addr); \ | ||
236 | break; \ | ||
237 | case 2: \ | ||
238 | __pu_err = __put_user_2((long)__pu_val, __pu_addr); \ | ||
239 | break; \ | ||
240 | case 4: \ | ||
241 | __pu_err = __put_user_4((long)__pu_val, __pu_addr); \ | ||
242 | break; \ | ||
243 | case 8: \ | ||
244 | __pu_err = \ | ||
245 | __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\ | ||
246 | __pu_addr); \ | ||
247 | break; \ | ||
248 | default: \ | ||
249 | __pu_err = __put_user_bad(); \ | ||
250 | break; \ | ||
251 | } \ | ||
252 | __pu_err; \ | ||
253 | }) | ||
254 | |||
255 | /* | ||
256 | * The versions of get_user and put_user without initial underscores | ||
257 | * check the address of their arguments to make sure they are not | ||
258 | * in kernel space. | ||
259 | */ | ||
260 | #define put_user(x, ptr) \ | ||
261 | ({ \ | ||
262 | __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \ | ||
263 | access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \ | ||
264 | __put_user((x), (__Pu_addr)) : \ | ||
265 | -EFAULT; \ | ||
266 | }) | ||
267 | |||
268 | #define get_user(x, ptr) \ | ||
269 | ({ \ | ||
270 | __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \ | ||
271 | access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \ | ||
272 | __get_user((x), (__Gu_addr)) : \ | ||
273 | ((x) = 0, -EFAULT); \ | ||
274 | }) | ||
275 | |||
276 | /** | ||
277 | * __copy_to_user() - copy data into user space, with less checking. | ||
278 | * @to: Destination address, in user space. | ||
279 | * @from: Source address, in kernel space. | ||
280 | * @n: Number of bytes to copy. | ||
281 | * | ||
282 | * Context: User context only. This function may sleep. | ||
283 | * | ||
284 | * Copy data from kernel space to user space. Caller must check | ||
285 | * the specified block with access_ok() before calling this function. | ||
286 | * | ||
287 | * Returns number of bytes that could not be copied. | ||
288 | * On success, this will be zero. | ||
289 | * | ||
290 | * An alternate version - __copy_to_user_inatomic() - is designed | ||
291 | * to be called from atomic context, typically bracketed by calls | ||
292 | * to pagefault_disable() and pagefault_enable(). | ||
293 | */ | ||
294 | extern unsigned long __must_check __copy_to_user_inatomic( | ||
295 | void __user *to, const void *from, unsigned long n); | ||
296 | |||
297 | static inline unsigned long __must_check | ||
298 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
299 | { | ||
300 | might_fault(); | ||
301 | return __copy_to_user_inatomic(to, from, n); | ||
302 | } | ||
303 | |||
304 | static inline unsigned long __must_check | ||
305 | copy_to_user(void __user *to, const void *from, unsigned long n) | ||
306 | { | ||
307 | if (access_ok(VERIFY_WRITE, to, n)) | ||
308 | n = __copy_to_user(to, from, n); | ||
309 | return n; | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * __copy_from_user() - copy data from user space, with less checking. | ||
314 | * @to: Destination address, in kernel space. | ||
315 | * @from: Source address, in user space. | ||
316 | * @n: Number of bytes to copy. | ||
317 | * | ||
318 | * Context: User context only. This function may sleep. | ||
319 | * | ||
320 | * Copy data from user space to kernel space. Caller must check | ||
321 | * the specified block with access_ok() before calling this function. | ||
322 | * | ||
323 | * Returns number of bytes that could not be copied. | ||
324 | * On success, this will be zero. | ||
325 | * | ||
326 | * If some data could not be copied, this function will pad the copied | ||
327 | * data to the requested size using zero bytes. | ||
328 | * | ||
329 | * An alternate version - __copy_from_user_inatomic() - is designed | ||
330 | * to be called from atomic context, typically bracketed by calls | ||
331 | * to pagefault_disable() and pagefault_enable(). This version | ||
332 | * does *NOT* pad with zeros. | ||
333 | */ | ||
334 | extern unsigned long __must_check __copy_from_user_inatomic( | ||
335 | void *to, const void __user *from, unsigned long n); | ||
336 | extern unsigned long __must_check __copy_from_user_zeroing( | ||
337 | void *to, const void __user *from, unsigned long n); | ||
338 | |||
339 | static inline unsigned long __must_check | ||
340 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
341 | { | ||
342 | might_fault(); | ||
343 | return __copy_from_user_zeroing(to, from, n); | ||
344 | } | ||
345 | |||
346 | static inline unsigned long __must_check | ||
347 | _copy_from_user(void *to, const void __user *from, unsigned long n) | ||
348 | { | ||
349 | if (access_ok(VERIFY_READ, from, n)) | ||
350 | n = __copy_from_user(to, from, n); | ||
351 | else | ||
352 | memset(to, 0, n); | ||
353 | return n; | ||
354 | } | ||
355 | |||
356 | #ifdef CONFIG_DEBUG_COPY_FROM_USER | ||
357 | extern void copy_from_user_overflow(void) | ||
358 | __compiletime_warning("copy_from_user() size is not provably correct"); | ||
359 | |||
360 | static inline unsigned long __must_check copy_from_user(void *to, | ||
361 | const void __user *from, | ||
362 | unsigned long n) | ||
363 | { | ||
364 | int sz = __compiletime_object_size(to); | ||
365 | |||
366 | if (likely(sz == -1 || sz >= n)) | ||
367 | n = _copy_from_user(to, from, n); | ||
368 | else | ||
369 | copy_from_user_overflow(); | ||
370 | |||
371 | return n; | ||
372 | } | ||
373 | #else | ||
374 | #define copy_from_user _copy_from_user | ||
375 | #endif | ||
376 | |||
377 | #ifdef __tilegx__ | ||
378 | /** | ||
379 | * __copy_in_user() - copy data within user space, with less checking. | ||
380 | * @to: Destination address, in user space. | ||
381 | * @from: Source address, in kernel space. | ||
382 | * @n: Number of bytes to copy. | ||
383 | * | ||
384 | * Context: User context only. This function may sleep. | ||
385 | * | ||
386 | * Copy data from user space to user space. Caller must check | ||
387 | * the specified blocks with access_ok() before calling this function. | ||
388 | * | ||
389 | * Returns number of bytes that could not be copied. | ||
390 | * On success, this will be zero. | ||
391 | */ | ||
392 | extern unsigned long __copy_in_user_asm( | ||
393 | void __user *to, const void __user *from, unsigned long n); | ||
394 | |||
395 | static inline unsigned long __must_check | ||
396 | __copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
397 | { | ||
398 | might_sleep(); | ||
399 | return __copy_in_user_asm(to, from, n); | ||
400 | } | ||
401 | |||
402 | static inline unsigned long __must_check | ||
403 | copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
404 | { | ||
405 | if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) | ||
406 | n = __copy_in_user(to, from, n); | ||
407 | return n; | ||
408 | } | ||
409 | #endif | ||
410 | |||
411 | |||
412 | /** | ||
413 | * strlen_user: - Get the size of a string in user space. | ||
414 | * @str: The string to measure. | ||
415 | * | ||
416 | * Context: User context only. This function may sleep. | ||
417 | * | ||
418 | * Get the size of a NUL-terminated string in user space. | ||
419 | * | ||
420 | * Returns the size of the string INCLUDING the terminating NUL. | ||
421 | * On exception, returns 0. | ||
422 | * | ||
423 | * If there is a limit on the length of a valid string, you may wish to | ||
424 | * consider using strnlen_user() instead. | ||
425 | */ | ||
426 | extern long strnlen_user_asm(const char __user *str, long n); | ||
427 | static inline long __must_check strnlen_user(const char __user *str, long n) | ||
428 | { | ||
429 | might_fault(); | ||
430 | return strnlen_user_asm(str, n); | ||
431 | } | ||
432 | #define strlen_user(str) strnlen_user(str, LONG_MAX) | ||
433 | |||
434 | /** | ||
435 | * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. | ||
436 | * @dst: Destination address, in kernel space. This buffer must be at | ||
437 | * least @count bytes long. | ||
438 | * @src: Source address, in user space. | ||
439 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
440 | * | ||
441 | * Copies a NUL-terminated string from userspace to kernel space. | ||
442 | * Caller must check the specified block with access_ok() before calling | ||
443 | * this function. | ||
444 | * | ||
445 | * On success, returns the length of the string (not including the trailing | ||
446 | * NUL). | ||
447 | * | ||
448 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
449 | * copied). | ||
450 | * | ||
451 | * If @count is smaller than the length of the string, copies @count bytes | ||
452 | * and returns @count. | ||
453 | */ | ||
454 | extern long strncpy_from_user_asm(char *dst, const char __user *src, long); | ||
455 | static inline long __must_check __strncpy_from_user( | ||
456 | char *dst, const char __user *src, long count) | ||
457 | { | ||
458 | might_fault(); | ||
459 | return strncpy_from_user_asm(dst, src, count); | ||
460 | } | ||
461 | static inline long __must_check strncpy_from_user( | ||
462 | char *dst, const char __user *src, long count) | ||
463 | { | ||
464 | if (access_ok(VERIFY_READ, src, 1)) | ||
465 | return __strncpy_from_user(dst, src, count); | ||
466 | return -EFAULT; | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * clear_user: - Zero a block of memory in user space. | ||
471 | * @mem: Destination address, in user space. | ||
472 | * @len: Number of bytes to zero. | ||
473 | * | ||
474 | * Zero a block of memory in user space. | ||
475 | * | ||
476 | * Returns number of bytes that could not be cleared. | ||
477 | * On success, this will be zero. | ||
478 | */ | ||
479 | extern unsigned long clear_user_asm(void __user *mem, unsigned long len); | ||
480 | static inline unsigned long __must_check __clear_user( | ||
481 | void __user *mem, unsigned long len) | ||
482 | { | ||
483 | might_fault(); | ||
484 | return clear_user_asm(mem, len); | ||
485 | } | ||
486 | static inline unsigned long __must_check clear_user( | ||
487 | void __user *mem, unsigned long len) | ||
488 | { | ||
489 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
490 | return __clear_user(mem, len); | ||
491 | return len; | ||
492 | } | ||
493 | |||
494 | /** | ||
495 | * flush_user: - Flush a block of memory in user space from cache. | ||
496 | * @mem: Destination address, in user space. | ||
497 | * @len: Number of bytes to flush. | ||
498 | * | ||
499 | * Returns number of bytes that could not be flushed. | ||
500 | * On success, this will be zero. | ||
501 | */ | ||
502 | extern unsigned long flush_user_asm(void __user *mem, unsigned long len); | ||
503 | static inline unsigned long __must_check __flush_user( | ||
504 | void __user *mem, unsigned long len) | ||
505 | { | ||
506 | int retval; | ||
507 | |||
508 | might_fault(); | ||
509 | retval = flush_user_asm(mem, len); | ||
510 | mb_incoherent(); | ||
511 | return retval; | ||
512 | } | ||
513 | |||
514 | static inline unsigned long __must_check flush_user( | ||
515 | void __user *mem, unsigned long len) | ||
516 | { | ||
517 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
518 | return __flush_user(mem, len); | ||
519 | return len; | ||
520 | } | ||
521 | |||
522 | /** | ||
523 | * inv_user: - Invalidate a block of memory in user space from cache. | ||
524 | * @mem: Destination address, in user space. | ||
525 | * @len: Number of bytes to invalidate. | ||
526 | * | ||
527 | * Returns number of bytes that could not be invalidated. | ||
528 | * On success, this will be zero. | ||
529 | * | ||
530 | * Note that on Tile64, the "inv" operation is in fact a | ||
531 | * "flush and invalidate", so cache write-backs will occur prior | ||
532 | * to the cache being marked invalid. | ||
533 | */ | ||
534 | extern unsigned long inv_user_asm(void __user *mem, unsigned long len); | ||
535 | static inline unsigned long __must_check __inv_user( | ||
536 | void __user *mem, unsigned long len) | ||
537 | { | ||
538 | int retval; | ||
539 | |||
540 | might_fault(); | ||
541 | retval = inv_user_asm(mem, len); | ||
542 | mb_incoherent(); | ||
543 | return retval; | ||
544 | } | ||
545 | static inline unsigned long __must_check inv_user( | ||
546 | void __user *mem, unsigned long len) | ||
547 | { | ||
548 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
549 | return __inv_user(mem, len); | ||
550 | return len; | ||
551 | } | ||
552 | |||
553 | /** | ||
554 | * finv_user: - Flush-inval a block of memory in user space from cache. | ||
555 | * @mem: Destination address, in user space. | ||
556 | * @len: Number of bytes to invalidate. | ||
557 | * | ||
558 | * Returns number of bytes that could not be flush-invalidated. | ||
559 | * On success, this will be zero. | ||
560 | */ | ||
561 | extern unsigned long finv_user_asm(void __user *mem, unsigned long len); | ||
562 | static inline unsigned long __must_check __finv_user( | ||
563 | void __user *mem, unsigned long len) | ||
564 | { | ||
565 | int retval; | ||
566 | |||
567 | might_fault(); | ||
568 | retval = finv_user_asm(mem, len); | ||
569 | mb_incoherent(); | ||
570 | return retval; | ||
571 | } | ||
572 | static inline unsigned long __must_check finv_user( | ||
573 | void __user *mem, unsigned long len) | ||
574 | { | ||
575 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
576 | return __finv_user(mem, len); | ||
577 | return len; | ||
578 | } | ||
579 | |||
580 | #endif /* _ASM_TILE_UACCESS_H */ | ||
diff --git a/arch/tile/include/asm/ucontext.h b/arch/tile/include/asm/ucontext.h new file mode 100644 index 00000000000..9bc07b9f30f --- /dev/null +++ b/arch/tile/include/asm/ucontext.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ucontext.h> | |||
diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h new file mode 100644 index 00000000000..137e2de5b10 --- /dev/null +++ b/arch/tile/include/asm/unaligned.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_UNALIGNED_H | ||
16 | #define _ASM_TILE_UNALIGNED_H | ||
17 | |||
18 | #include <linux/unaligned/le_struct.h> | ||
19 | #include <linux/unaligned/be_byteshift.h> | ||
20 | #include <linux/unaligned/generic.h> | ||
21 | #define get_unaligned __get_unaligned_le | ||
22 | #define put_unaligned __put_unaligned_le | ||
23 | |||
24 | #endif /* _ASM_TILE_UNALIGNED_H */ | ||
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h new file mode 100644 index 00000000000..f2e3ff48533 --- /dev/null +++ b/arch/tile/include/asm/unistd.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL) | ||
16 | #define _ASM_TILE_UNISTD_H | ||
17 | |||
18 | #ifndef __LP64__ | ||
19 | /* Use the flavor of this syscall that matches the 32-bit API better. */ | ||
20 | #define __ARCH_WANT_SYNC_FILE_RANGE2 | ||
21 | #endif | ||
22 | |||
23 | /* Use the standard ABI for syscalls. */ | ||
24 | #include <asm-generic/unistd.h> | ||
25 | |||
26 | /* Additional Tilera-specific syscalls. */ | ||
27 | #define __NR_flush_cache (__NR_arch_specific_syscall + 1) | ||
28 | __SYSCALL(__NR_flush_cache, sys_flush_cache) | ||
29 | |||
30 | #ifndef __tilegx__ | ||
31 | /* "Fast" syscalls provide atomic support for 32-bit chips. */ | ||
32 | #define __NR_FAST_cmpxchg -1 | ||
33 | #define __NR_FAST_atomic_update -2 | ||
34 | #define __NR_FAST_cmpxchg64 -3 | ||
35 | #define __NR_cmpxchg_badaddr (__NR_arch_specific_syscall + 0) | ||
36 | __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr) | ||
37 | #endif | ||
38 | |||
39 | #ifdef __KERNEL__ | ||
40 | /* In compat mode, we use sys_llseek() for compat_sys_llseek(). */ | ||
41 | #ifdef CONFIG_COMPAT | ||
42 | #define __ARCH_WANT_SYS_LLSEEK | ||
43 | #endif | ||
44 | #endif | ||
45 | |||
46 | #endif /* _ASM_TILE_UNISTD_H */ | ||
diff --git a/arch/tile/include/asm/user.h b/arch/tile/include/asm/user.h new file mode 100644 index 00000000000..cbc8b4d5a5c --- /dev/null +++ b/arch/tile/include/asm/user.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_USER_H | ||
17 | #define _ASM_TILE_USER_H | ||
18 | |||
19 | /* This header is for a.out file formats, which TILE does not support. */ | ||
20 | |||
21 | #endif /* _ASM_TILE_USER_H */ | ||
diff --git a/arch/tile/include/asm/xor.h b/arch/tile/include/asm/xor.h new file mode 100644 index 00000000000..c82eb12a5b1 --- /dev/null +++ b/arch/tile/include/asm/xor.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/xor.h> | |||