diff options
Diffstat (limited to 'arch/xtensa/include')
30 files changed, 733 insertions, 198 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 24f50cada70c..c3f289174c10 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h | |||
@@ -66,19 +66,35 @@ | |||
66 | */ | 66 | */ |
67 | static inline void atomic_add(int i, atomic_t * v) | 67 | static inline void atomic_add(int i, atomic_t * v) |
68 | { | 68 | { |
69 | unsigned int vval; | 69 | #if XCHAL_HAVE_S32C1I |
70 | 70 | unsigned long tmp; | |
71 | __asm__ __volatile__( | 71 | int result; |
72 | "rsil a15, "__stringify(LOCKLEVEL)"\n\t" | 72 | |
73 | "l32i %0, %2, 0 \n\t" | 73 | __asm__ __volatile__( |
74 | "add %0, %0, %1 \n\t" | 74 | "1: l32i %1, %3, 0\n" |
75 | "s32i %0, %2, 0 \n\t" | 75 | " wsr %1, scompare1\n" |
76 | "wsr a15, ps \n\t" | 76 | " add %0, %1, %2\n" |
77 | "rsync \n" | 77 | " s32c1i %0, %3, 0\n" |
78 | : "=&a" (vval) | 78 | " bne %0, %1, 1b\n" |
79 | : "a" (i), "a" (v) | 79 | : "=&a" (result), "=&a" (tmp) |
80 | : "a15", "memory" | 80 | : "a" (i), "a" (v) |
81 | ); | 81 | : "memory" |
82 | ); | ||
83 | #else | ||
84 | unsigned int vval; | ||
85 | |||
86 | __asm__ __volatile__( | ||
87 | " rsil a15, "__stringify(LOCKLEVEL)"\n" | ||
88 | " l32i %0, %2, 0\n" | ||
89 | " add %0, %0, %1\n" | ||
90 | " s32i %0, %2, 0\n" | ||
91 | " wsr a15, ps\n" | ||
92 | " rsync\n" | ||
93 | : "=&a" (vval) | ||
94 | : "a" (i), "a" (v) | ||
95 | : "a15", "memory" | ||
96 | ); | ||
97 | #endif | ||
82 | } | 98 | } |
83 | 99 | ||
84 | /** | 100 | /** |
@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v) | |||
90 | */ | 106 | */ |
91 | static inline void atomic_sub(int i, atomic_t *v) | 107 | static inline void atomic_sub(int i, atomic_t *v) |
92 | { | 108 | { |
93 | unsigned int vval; | 109 | #if XCHAL_HAVE_S32C1I |
94 | 110 | unsigned long tmp; | |
95 | __asm__ __volatile__( | 111 | int result; |
96 | "rsil a15, "__stringify(LOCKLEVEL)"\n\t" | 112 | |
97 | "l32i %0, %2, 0 \n\t" | 113 | __asm__ __volatile__( |
98 | "sub %0, %0, %1 \n\t" | 114 | "1: l32i %1, %3, 0\n" |
99 | "s32i %0, %2, 0 \n\t" | 115 | " wsr %1, scompare1\n" |
100 | "wsr a15, ps \n\t" | 116 | " sub %0, %1, %2\n" |
101 | "rsync \n" | 117 | " s32c1i %0, %3, 0\n" |
102 | : "=&a" (vval) | 118 | " bne %0, %1, 1b\n" |
103 | : "a" (i), "a" (v) | 119 | : "=&a" (result), "=&a" (tmp) |
104 | : "a15", "memory" | 120 | : "a" (i), "a" (v) |
105 | ); | 121 | : "memory" |
122 | ); | ||
123 | #else | ||
124 | unsigned int vval; | ||
125 | |||
126 | __asm__ __volatile__( | ||
127 | " rsil a15, "__stringify(LOCKLEVEL)"\n" | ||
128 | " l32i %0, %2, 0\n" | ||
129 | " sub %0, %0, %1\n" | ||
130 | " s32i %0, %2, 0\n" | ||
131 | " wsr a15, ps\n" | ||
132 | " rsync\n" | ||
133 | : "=&a" (vval) | ||
134 | : "a" (i), "a" (v) | ||
135 | : "a15", "memory" | ||
136 | ); | ||
137 | #endif | ||
106 | } | 138 | } |
107 | 139 | ||
108 | /* | 140 | /* |
@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
111 | 143 | ||
112 | static inline int atomic_add_return(int i, atomic_t * v) | 144 | static inline int atomic_add_return(int i, atomic_t * v) |
113 | { | 145 | { |
114 | unsigned int vval; | 146 | #if XCHAL_HAVE_S32C1I |
115 | 147 | unsigned long tmp; | |
116 | __asm__ __volatile__( | 148 | int result; |
117 | "rsil a15,"__stringify(LOCKLEVEL)"\n\t" | 149 | |
118 | "l32i %0, %2, 0 \n\t" | 150 | __asm__ __volatile__( |
119 | "add %0, %0, %1 \n\t" | 151 | "1: l32i %1, %3, 0\n" |
120 | "s32i %0, %2, 0 \n\t" | 152 | " wsr %1, scompare1\n" |
121 | "wsr a15, ps \n\t" | 153 | " add %0, %1, %2\n" |
122 | "rsync \n" | 154 | " s32c1i %0, %3, 0\n" |
123 | : "=&a" (vval) | 155 | " bne %0, %1, 1b\n" |
124 | : "a" (i), "a" (v) | 156 | " add %0, %0, %2\n" |
125 | : "a15", "memory" | 157 | : "=&a" (result), "=&a" (tmp) |
126 | ); | 158 | : "a" (i), "a" (v) |
127 | 159 | : "memory" | |
128 | return vval; | 160 | ); |
161 | |||
162 | return result; | ||
163 | #else | ||
164 | unsigned int vval; | ||
165 | |||
166 | __asm__ __volatile__( | ||
167 | " rsil a15,"__stringify(LOCKLEVEL)"\n" | ||
168 | " l32i %0, %2, 0\n" | ||
169 | " add %0, %0, %1\n" | ||
170 | " s32i %0, %2, 0\n" | ||
171 | " wsr a15, ps\n" | ||
172 | " rsync\n" | ||
173 | : "=&a" (vval) | ||
174 | : "a" (i), "a" (v) | ||
175 | : "a15", "memory" | ||
176 | ); | ||
177 | |||
178 | return vval; | ||
179 | #endif | ||
129 | } | 180 | } |
130 | 181 | ||
131 | static inline int atomic_sub_return(int i, atomic_t * v) | 182 | static inline int atomic_sub_return(int i, atomic_t * v) |
132 | { | 183 | { |
133 | unsigned int vval; | 184 | #if XCHAL_HAVE_S32C1I |
134 | 185 | unsigned long tmp; | |
135 | __asm__ __volatile__( | 186 | int result; |
136 | "rsil a15,"__stringify(LOCKLEVEL)"\n\t" | 187 | |
137 | "l32i %0, %2, 0 \n\t" | 188 | __asm__ __volatile__( |
138 | "sub %0, %0, %1 \n\t" | 189 | "1: l32i %1, %3, 0\n" |
139 | "s32i %0, %2, 0 \n\t" | 190 | " wsr %1, scompare1\n" |
140 | "wsr a15, ps \n\t" | 191 | " sub %0, %1, %2\n" |
141 | "rsync \n" | 192 | " s32c1i %0, %3, 0\n" |
142 | : "=&a" (vval) | 193 | " bne %0, %1, 1b\n" |
143 | : "a" (i), "a" (v) | 194 | " sub %0, %0, %2\n" |
144 | : "a15", "memory" | 195 | : "=&a" (result), "=&a" (tmp) |
145 | ); | 196 | : "a" (i), "a" (v) |
146 | 197 | : "memory" | |
147 | return vval; | 198 | ); |
199 | |||
200 | return result; | ||
201 | #else | ||
202 | unsigned int vval; | ||
203 | |||
204 | __asm__ __volatile__( | ||
205 | " rsil a15,"__stringify(LOCKLEVEL)"\n" | ||
206 | " l32i %0, %2, 0\n" | ||
207 | " sub %0, %0, %1\n" | ||
208 | " s32i %0, %2, 0\n" | ||
209 | " wsr a15, ps\n" | ||
210 | " rsync\n" | ||
211 | : "=&a" (vval) | ||
212 | : "a" (i), "a" (v) | ||
213 | : "a15", "memory" | ||
214 | ); | ||
215 | |||
216 | return vval; | ||
217 | #endif | ||
148 | } | 218 | } |
149 | 219 | ||
150 | /** | 220 | /** |
@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |||
251 | 321 | ||
252 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | 322 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |
253 | { | 323 | { |
254 | unsigned int all_f = -1; | 324 | #if XCHAL_HAVE_S32C1I |
255 | unsigned int vval; | 325 | unsigned long tmp; |
256 | 326 | int result; | |
257 | __asm__ __volatile__( | 327 | |
258 | "rsil a15,"__stringify(LOCKLEVEL)"\n\t" | 328 | __asm__ __volatile__( |
259 | "l32i %0, %2, 0 \n\t" | 329 | "1: l32i %1, %3, 0\n" |
260 | "xor %1, %4, %3 \n\t" | 330 | " wsr %1, scompare1\n" |
261 | "and %0, %0, %4 \n\t" | 331 | " and %0, %1, %2\n" |
262 | "s32i %0, %2, 0 \n\t" | 332 | " s32c1i %0, %3, 0\n" |
263 | "wsr a15, ps \n\t" | 333 | " bne %0, %1, 1b\n" |
264 | "rsync \n" | 334 | : "=&a" (result), "=&a" (tmp) |
265 | : "=&a" (vval), "=a" (mask) | 335 | : "a" (~mask), "a" (v) |
266 | : "a" (v), "a" (all_f), "1" (mask) | 336 | : "memory" |
267 | : "a15", "memory" | 337 | ); |
268 | ); | 338 | #else |
339 | unsigned int all_f = -1; | ||
340 | unsigned int vval; | ||
341 | |||
342 | __asm__ __volatile__( | ||
343 | " rsil a15,"__stringify(LOCKLEVEL)"\n" | ||
344 | " l32i %0, %2, 0\n" | ||
345 | " xor %1, %4, %3\n" | ||
346 | " and %0, %0, %4\n" | ||
347 | " s32i %0, %2, 0\n" | ||
348 | " wsr a15, ps\n" | ||
349 | " rsync\n" | ||
350 | : "=&a" (vval), "=a" (mask) | ||
351 | : "a" (v), "a" (all_f), "1" (mask) | ||
352 | : "a15", "memory" | ||
353 | ); | ||
354 | #endif | ||
269 | } | 355 | } |
270 | 356 | ||
271 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | 357 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) |
272 | { | 358 | { |
273 | unsigned int vval; | 359 | #if XCHAL_HAVE_S32C1I |
274 | 360 | unsigned long tmp; | |
275 | __asm__ __volatile__( | 361 | int result; |
276 | "rsil a15,"__stringify(LOCKLEVEL)"\n\t" | 362 | |
277 | "l32i %0, %2, 0 \n\t" | 363 | __asm__ __volatile__( |
278 | "or %0, %0, %1 \n\t" | 364 | "1: l32i %1, %3, 0\n" |
279 | "s32i %0, %2, 0 \n\t" | 365 | " wsr %1, scompare1\n" |
280 | "wsr a15, ps \n\t" | 366 | " or %0, %1, %2\n" |
281 | "rsync \n" | 367 | " s32c1i %0, %3, 0\n" |
282 | : "=&a" (vval) | 368 | " bne %0, %1, 1b\n" |
283 | : "a" (mask), "a" (v) | 369 | : "=&a" (result), "=&a" (tmp) |
284 | : "a15", "memory" | 370 | : "a" (mask), "a" (v) |
285 | ); | 371 | : "memory" |
372 | ); | ||
373 | #else | ||
374 | unsigned int vval; | ||
375 | |||
376 | __asm__ __volatile__( | ||
377 | " rsil a15,"__stringify(LOCKLEVEL)"\n" | ||
378 | " l32i %0, %2, 0\n" | ||
379 | " or %0, %0, %1\n" | ||
380 | " s32i %0, %2, 0\n" | ||
381 | " wsr a15, ps\n" | ||
382 | " rsync\n" | ||
383 | : "=&a" (vval) | ||
384 | : "a" (mask), "a" (v) | ||
385 | : "a15", "memory" | ||
386 | ); | ||
387 | #endif | ||
286 | } | 388 | } |
287 | 389 | ||
288 | /* Atomic operations are already serializing */ | 390 | /* Atomic operations are already serializing */ |
@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | |||
294 | #endif /* __KERNEL__ */ | 396 | #endif /* __KERNEL__ */ |
295 | 397 | ||
296 | #endif /* _XTENSA_ATOMIC_H */ | 398 | #endif /* _XTENSA_ATOMIC_H */ |
297 | |||
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index 55707a8009d3..ef021677d536 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 6 | * Copyright (C) 2001 - 2012 Tensilica Inc. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef _XTENSA_SYSTEM_H | 9 | #ifndef _XTENSA_SYSTEM_H |
@@ -12,8 +12,8 @@ | |||
12 | #define smp_read_barrier_depends() do { } while(0) | 12 | #define smp_read_barrier_depends() do { } while(0) |
13 | #define read_barrier_depends() do { } while(0) | 13 | #define read_barrier_depends() do { } while(0) |
14 | 14 | ||
15 | #define mb() barrier() | 15 | #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) |
16 | #define rmb() mb() | 16 | #define rmb() barrier() |
17 | #define wmb() mb() | 17 | #define wmb() mb() |
18 | 18 | ||
19 | #ifdef CONFIG_SMP | 19 | #ifdef CONFIG_SMP |
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 5270197ddd36..84afe58d5d37 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h | |||
@@ -29,7 +29,6 @@ | |||
29 | #define smp_mb__before_clear_bit() barrier() | 29 | #define smp_mb__before_clear_bit() barrier() |
30 | #define smp_mb__after_clear_bit() barrier() | 30 | #define smp_mb__after_clear_bit() barrier() |
31 | 31 | ||
32 | #include <asm-generic/bitops/atomic.h> | ||
33 | #include <asm-generic/bitops/non-atomic.h> | 32 | #include <asm-generic/bitops/non-atomic.h> |
34 | 33 | ||
35 | #if XCHAL_HAVE_NSA | 34 | #if XCHAL_HAVE_NSA |
@@ -104,6 +103,132 @@ static inline unsigned long __fls(unsigned long word) | |||
104 | #endif | 103 | #endif |
105 | 104 | ||
106 | #include <asm-generic/bitops/fls64.h> | 105 | #include <asm-generic/bitops/fls64.h> |
106 | |||
107 | #if XCHAL_HAVE_S32C1I | ||
108 | |||
109 | static inline void set_bit(unsigned int bit, volatile unsigned long *p) | ||
110 | { | ||
111 | unsigned long tmp, value; | ||
112 | unsigned long mask = 1UL << (bit & 31); | ||
113 | |||
114 | p += bit >> 5; | ||
115 | |||
116 | __asm__ __volatile__( | ||
117 | "1: l32i %1, %3, 0\n" | ||
118 | " wsr %1, scompare1\n" | ||
119 | " or %0, %1, %2\n" | ||
120 | " s32c1i %0, %3, 0\n" | ||
121 | " bne %0, %1, 1b\n" | ||
122 | : "=&a" (tmp), "=&a" (value) | ||
123 | : "a" (mask), "a" (p) | ||
124 | : "memory"); | ||
125 | } | ||
126 | |||
127 | static inline void clear_bit(unsigned int bit, volatile unsigned long *p) | ||
128 | { | ||
129 | unsigned long tmp, value; | ||
130 | unsigned long mask = 1UL << (bit & 31); | ||
131 | |||
132 | p += bit >> 5; | ||
133 | |||
134 | __asm__ __volatile__( | ||
135 | "1: l32i %1, %3, 0\n" | ||
136 | " wsr %1, scompare1\n" | ||
137 | " and %0, %1, %2\n" | ||
138 | " s32c1i %0, %3, 0\n" | ||
139 | " bne %0, %1, 1b\n" | ||
140 | : "=&a" (tmp), "=&a" (value) | ||
141 | : "a" (~mask), "a" (p) | ||
142 | : "memory"); | ||
143 | } | ||
144 | |||
145 | static inline void change_bit(unsigned int bit, volatile unsigned long *p) | ||
146 | { | ||
147 | unsigned long tmp, value; | ||
148 | unsigned long mask = 1UL << (bit & 31); | ||
149 | |||
150 | p += bit >> 5; | ||
151 | |||
152 | __asm__ __volatile__( | ||
153 | "1: l32i %1, %3, 0\n" | ||
154 | " wsr %1, scompare1\n" | ||
155 | " xor %0, %1, %2\n" | ||
156 | " s32c1i %0, %3, 0\n" | ||
157 | " bne %0, %1, 1b\n" | ||
158 | : "=&a" (tmp), "=&a" (value) | ||
159 | : "a" (mask), "a" (p) | ||
160 | : "memory"); | ||
161 | } | ||
162 | |||
163 | static inline int | ||
164 | test_and_set_bit(unsigned int bit, volatile unsigned long *p) | ||
165 | { | ||
166 | unsigned long tmp, value; | ||
167 | unsigned long mask = 1UL << (bit & 31); | ||
168 | |||
169 | p += bit >> 5; | ||
170 | |||
171 | __asm__ __volatile__( | ||
172 | "1: l32i %1, %3, 0\n" | ||
173 | " wsr %1, scompare1\n" | ||
174 | " or %0, %1, %2\n" | ||
175 | " s32c1i %0, %3, 0\n" | ||
176 | " bne %0, %1, 1b\n" | ||
177 | : "=&a" (tmp), "=&a" (value) | ||
178 | : "a" (mask), "a" (p) | ||
179 | : "memory"); | ||
180 | |||
181 | return tmp & mask; | ||
182 | } | ||
183 | |||
184 | static inline int | ||
185 | test_and_clear_bit(unsigned int bit, volatile unsigned long *p) | ||
186 | { | ||
187 | unsigned long tmp, value; | ||
188 | unsigned long mask = 1UL << (bit & 31); | ||
189 | |||
190 | p += bit >> 5; | ||
191 | |||
192 | __asm__ __volatile__( | ||
193 | "1: l32i %1, %3, 0\n" | ||
194 | " wsr %1, scompare1\n" | ||
195 | " and %0, %1, %2\n" | ||
196 | " s32c1i %0, %3, 0\n" | ||
197 | " bne %0, %1, 1b\n" | ||
198 | : "=&a" (tmp), "=&a" (value) | ||
199 | : "a" (~mask), "a" (p) | ||
200 | : "memory"); | ||
201 | |||
202 | return tmp & mask; | ||
203 | } | ||
204 | |||
205 | static inline int | ||
206 | test_and_change_bit(unsigned int bit, volatile unsigned long *p) | ||
207 | { | ||
208 | unsigned long tmp, value; | ||
209 | unsigned long mask = 1UL << (bit & 31); | ||
210 | |||
211 | p += bit >> 5; | ||
212 | |||
213 | __asm__ __volatile__( | ||
214 | "1: l32i %1, %3, 0\n" | ||
215 | " wsr %1, scompare1\n" | ||
216 | " xor %0, %1, %2\n" | ||
217 | " s32c1i %0, %3, 0\n" | ||
218 | " bne %0, %1, 1b\n" | ||
219 | : "=&a" (tmp), "=&a" (value) | ||
220 | : "a" (mask), "a" (p) | ||
221 | : "memory"); | ||
222 | |||
223 | return tmp & mask; | ||
224 | } | ||
225 | |||
226 | #else | ||
227 | |||
228 | #include <asm-generic/bitops/atomic.h> | ||
229 | |||
230 | #endif /* XCHAL_HAVE_S32C1I */ | ||
231 | |||
107 | #include <asm-generic/bitops/find.h> | 232 | #include <asm-generic/bitops/find.h> |
108 | #include <asm-generic/bitops/le.h> | 233 | #include <asm-generic/bitops/le.h> |
109 | 234 | ||
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h index 9983f2c1b7ee..0c25799facab 100644 --- a/arch/xtensa/include/asm/bootparam.h +++ b/arch/xtensa/include/asm/bootparam.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ | 22 | #define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ |
23 | #define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */ | 23 | #define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */ |
24 | #define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ | 24 | #define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ |
25 | #define BP_TAG_FDT 0x1006 /* flat device tree addr */ | ||
25 | 26 | ||
26 | #define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ | 27 | #define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ |
27 | #define BP_TAG_LAST 0x7E0B /* last tag */ | 28 | #define BP_TAG_LAST 0x7E0B /* last tag */ |
@@ -31,15 +32,15 @@ | |||
31 | /* All records are aligned to 4 bytes */ | 32 | /* All records are aligned to 4 bytes */ |
32 | 33 | ||
33 | typedef struct bp_tag { | 34 | typedef struct bp_tag { |
34 | unsigned short id; /* tag id */ | 35 | unsigned short id; /* tag id */ |
35 | unsigned short size; /* size of this record excluding the structure*/ | 36 | unsigned short size; /* size of this record excluding the structure*/ |
36 | unsigned long data[0]; /* data */ | 37 | unsigned long data[0]; /* data */ |
37 | } bp_tag_t; | 38 | } bp_tag_t; |
38 | 39 | ||
39 | typedef struct meminfo { | 40 | typedef struct meminfo { |
40 | unsigned long type; | 41 | unsigned long type; |
41 | unsigned long start; | 42 | unsigned long start; |
42 | unsigned long end; | 43 | unsigned long end; |
43 | } meminfo_t; | 44 | } meminfo_t; |
44 | 45 | ||
45 | #define SYSMEM_BANKS_MAX 5 | 46 | #define SYSMEM_BANKS_MAX 5 |
@@ -48,14 +49,11 @@ typedef struct meminfo { | |||
48 | #define MEMORY_TYPE_NONE 0x2000 | 49 | #define MEMORY_TYPE_NONE 0x2000 |
49 | 50 | ||
50 | typedef struct sysmem_info { | 51 | typedef struct sysmem_info { |
51 | int nr_banks; | 52 | int nr_banks; |
52 | meminfo_t bank[SYSMEM_BANKS_MAX]; | 53 | meminfo_t bank[SYSMEM_BANKS_MAX]; |
53 | } sysmem_info_t; | 54 | } sysmem_info_t; |
54 | 55 | ||
55 | extern sysmem_info_t sysmem; | 56 | extern sysmem_info_t sysmem; |
56 | 57 | ||
57 | #endif | 58 | #endif |
58 | #endif | 59 | #endif |
59 | |||
60 | |||
61 | |||
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h index 2c20a58f94cd..60e18773ecb8 100644 --- a/arch/xtensa/include/asm/cacheasm.h +++ b/arch/xtensa/include/asm/cacheasm.h | |||
@@ -174,4 +174,3 @@ | |||
174 | __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH | 174 | __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH |
175 | 175 | ||
176 | .endm | 176 | .endm |
177 | |||
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 569fec4f9a20..127cd48883c4 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h | |||
@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, | |||
104 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | 104 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
105 | extern void flush_dcache_page(struct page*); | 105 | extern void flush_dcache_page(struct page*); |
106 | extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); | 106 | extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); |
107 | extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); | 107 | extern void flush_cache_page(struct vm_area_struct*, |
108 | unsigned long, unsigned long); | ||
108 | 109 | ||
109 | #else | 110 | #else |
110 | 111 | ||
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index e4d831a30772..aed7ad68ca46 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h | |||
@@ -36,8 +36,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); | |||
36 | * better 64-bit) boundary | 36 | * better 64-bit) boundary |
37 | */ | 37 | */ |
38 | 38 | ||
39 | asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, | 39 | asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, |
40 | int *src_err_ptr, int *dst_err_ptr); | 40 | int len, __wsum sum, |
41 | int *src_err_ptr, int *dst_err_ptr); | ||
41 | 42 | ||
42 | /* | 43 | /* |
43 | * Note: when you get a NULL pointer exception here this means someone | 44 | * Note: when you get a NULL pointer exception here this means someone |
@@ -54,7 +55,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, | |||
54 | 55 | ||
55 | static inline | 56 | static inline |
56 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | 57 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
57 | int len, __wsum sum, int *err_ptr) | 58 | int len, __wsum sum, int *err_ptr) |
58 | { | 59 | { |
59 | return csum_partial_copy_generic((__force const void *)src, dst, | 60 | return csum_partial_copy_generic((__force const void *)src, dst, |
60 | len, sum, err_ptr, NULL); | 61 | len, sum, err_ptr, NULL); |
@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
112 | /* Since the input registers which are loaded with iph and ihl | 113 | /* Since the input registers which are loaded with iph and ihl |
113 | are modified, we must also specify them as outputs, or gcc | 114 | are modified, we must also specify them as outputs, or gcc |
114 | will assume they contain their original values. */ | 115 | will assume they contain their original values. */ |
115 | : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) | 116 | : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), |
117 | "=&r" (endaddr) | ||
116 | : "1" (iph), "2" (ihl) | 118 | : "1" (iph), "2" (ihl) |
117 | : "memory"); | 119 | : "memory"); |
118 | 120 | ||
@@ -168,7 +170,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | |||
168 | 170 | ||
169 | static __inline__ __sum16 ip_compute_csum(const void *buff, int len) | 171 | static __inline__ __sum16 ip_compute_csum(const void *buff, int len) |
170 | { | 172 | { |
171 | return csum_fold (csum_partial(buff, len, 0)); | 173 | return csum_fold (csum_partial(buff, len, 0)); |
172 | } | 174 | } |
173 | 175 | ||
174 | #define _HAVE_ARCH_IPV6_CSUM | 176 | #define _HAVE_ARCH_IPV6_CSUM |
@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
238 | * Copy and checksum to user | 240 | * Copy and checksum to user |
239 | */ | 241 | */ |
240 | #define HAVE_CSUM_COPY_USER | 242 | #define HAVE_CSUM_COPY_USER |
241 | static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, | 243 | static __inline__ __wsum csum_and_copy_to_user(const void *src, |
242 | int len, __wsum sum, int *err_ptr) | 244 | void __user *dst, int len, |
245 | __wsum sum, int *err_ptr) | ||
243 | { | 246 | { |
244 | if (access_ok(VERIFY_WRITE, dst, len)) | 247 | if (access_ok(VERIFY_WRITE, dst, len)) |
245 | return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); | 248 | return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr); |
246 | 249 | ||
247 | if (len) | 250 | if (len) |
248 | *err_ptr = -EFAULT; | 251 | *err_ptr = -EFAULT; |
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 64dad04a9d27..d9ab131bc1aa 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h | |||
@@ -22,17 +22,30 @@ | |||
22 | static inline unsigned long | 22 | static inline unsigned long |
23 | __cmpxchg_u32(volatile int *p, int old, int new) | 23 | __cmpxchg_u32(volatile int *p, int old, int new) |
24 | { | 24 | { |
25 | __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" | 25 | #if XCHAL_HAVE_S32C1I |
26 | "l32i %0, %1, 0 \n\t" | 26 | __asm__ __volatile__( |
27 | "bne %0, %2, 1f \n\t" | 27 | " wsr %2, scompare1\n" |
28 | "s32i %3, %1, 0 \n\t" | 28 | " s32c1i %0, %1, 0\n" |
29 | "1: \n\t" | 29 | : "+a" (new) |
30 | "wsr a15, ps \n\t" | 30 | : "a" (p), "a" (old) |
31 | "rsync \n\t" | 31 | : "memory" |
32 | : "=&a" (old) | 32 | ); |
33 | : "a" (p), "a" (old), "r" (new) | 33 | |
34 | : "a15", "memory"); | 34 | return new; |
35 | return old; | 35 | #else |
36 | __asm__ __volatile__( | ||
37 | " rsil a15, "__stringify(LOCKLEVEL)"\n" | ||
38 | " l32i %0, %1, 0\n" | ||
39 | " bne %0, %2, 1f\n" | ||
40 | " s32i %3, %1, 0\n" | ||
41 | "1:\n" | ||
42 | " wsr a15, ps\n" | ||
43 | " rsync\n" | ||
44 | : "=&a" (old) | ||
45 | : "a" (p), "a" (old), "r" (new) | ||
46 | : "a15", "memory"); | ||
47 | return old; | ||
48 | #endif | ||
36 | } | 49 | } |
37 | /* This function doesn't exist, so you'll get a linker error | 50 | /* This function doesn't exist, so you'll get a linker error |
38 | * if something tries to do an invalid cmpxchg(). */ | 51 | * if something tries to do an invalid cmpxchg(). */ |
@@ -93,19 +106,36 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
93 | 106 | ||
94 | static inline unsigned long xchg_u32(volatile int * m, unsigned long val) | 107 | static inline unsigned long xchg_u32(volatile int * m, unsigned long val) |
95 | { | 108 | { |
96 | unsigned long tmp; | 109 | #if XCHAL_HAVE_S32C1I |
97 | __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" | 110 | unsigned long tmp, result; |
98 | "l32i %0, %1, 0 \n\t" | 111 | __asm__ __volatile__( |
99 | "s32i %2, %1, 0 \n\t" | 112 | "1: l32i %1, %2, 0\n" |
100 | "wsr a15, ps \n\t" | 113 | " mov %0, %3\n" |
101 | "rsync \n\t" | 114 | " wsr %1, scompare1\n" |
102 | : "=&a" (tmp) | 115 | " s32c1i %0, %2, 0\n" |
103 | : "a" (m), "a" (val) | 116 | " bne %0, %1, 1b\n" |
104 | : "a15", "memory"); | 117 | : "=&a" (result), "=&a" (tmp) |
105 | return tmp; | 118 | : "a" (m), "a" (val) |
119 | : "memory" | ||
120 | ); | ||
121 | return result; | ||
122 | #else | ||
123 | unsigned long tmp; | ||
124 | __asm__ __volatile__( | ||
125 | " rsil a15, "__stringify(LOCKLEVEL)"\n" | ||
126 | " l32i %0, %1, 0\n" | ||
127 | " s32i %2, %1, 0\n" | ||
128 | " wsr a15, ps\n" | ||
129 | " rsync\n" | ||
130 | : "=&a" (tmp) | ||
131 | : "a" (m), "a" (val) | ||
132 | : "a15", "memory"); | ||
133 | return tmp; | ||
134 | #endif | ||
106 | } | 135 | } |
107 | 136 | ||
108 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 137 | #define xchg(ptr,x) \ |
138 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
109 | 139 | ||
110 | /* | 140 | /* |
111 | * This only works if the compiler isn't horribly bad at optimizing. | 141 | * This only works if the compiler isn't horribly bad at optimizing. |
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h index 8d1eb5d78649..47e46dcf5d49 100644 --- a/arch/xtensa/include/asm/current.h +++ b/arch/xtensa/include/asm/current.h | |||
@@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void) | |||
30 | 30 | ||
31 | #define GET_CURRENT(reg,sp) \ | 31 | #define GET_CURRENT(reg,sp) \ |
32 | GET_THREAD_INFO(reg,sp); \ | 32 | GET_THREAD_INFO(reg,sp); \ |
33 | l32i reg, reg, TI_TASK \ | 33 | l32i reg, reg, TI_TASK \ |
34 | 34 | ||
35 | #endif | 35 | #endif |
36 | 36 | ||
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h index 58c0a4fd4003..61fc5faeb46c 100644 --- a/arch/xtensa/include/asm/delay.h +++ b/arch/xtensa/include/asm/delay.h | |||
@@ -19,9 +19,9 @@ extern unsigned long loops_per_jiffy; | |||
19 | 19 | ||
20 | static inline void __delay(unsigned long loops) | 20 | static inline void __delay(unsigned long loops) |
21 | { | 21 | { |
22 | /* 2 cycles per loop. */ | 22 | /* 2 cycles per loop. */ |
23 | __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" | 23 | __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" |
24 | : "=r" (loops) : "0" (loops)); | 24 | : "=r" (loops) : "0" (loops)); |
25 | } | 25 | } |
26 | 26 | ||
27 | static __inline__ u32 xtensa_get_ccount(void) | 27 | static __inline__ u32 xtensa_get_ccount(void) |
@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | #endif | 48 | #endif |
49 | |||
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 492c95790ad5..4acb5feba1fb 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
18 | 18 | ||
19 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
20 | |||
19 | /* | 21 | /* |
20 | * DMA-consistent mapping functions. | 22 | * DMA-consistent mapping functions. |
21 | */ | 23 | */ |
@@ -98,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | |||
98 | } | 100 | } |
99 | 101 | ||
100 | static inline void | 102 | static inline void |
101 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | 103 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
102 | enum dma_data_direction direction) | 104 | size_t size, enum dma_data_direction direction) |
103 | { | 105 | { |
104 | consistent_sync((void *)bus_to_virt(dma_handle), size, direction); | 106 | consistent_sync((void *)bus_to_virt(dma_handle), size, direction); |
105 | } | 107 | } |
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h index 5293312bc6a4..264d5fa450d8 100644 --- a/arch/xtensa/include/asm/elf.h +++ b/arch/xtensa/include/asm/elf.h | |||
@@ -168,11 +168,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); | |||
168 | */ | 168 | */ |
169 | 169 | ||
170 | #define ELF_PLAT_INIT(_r, load_addr) \ | 170 | #define ELF_PLAT_INIT(_r, load_addr) \ |
171 | do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ | 171 | do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ |
172 | _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ | 172 | _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ |
173 | _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ | 173 | _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ |
174 | _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ | 174 | _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ |
175 | } while (0) | 175 | } while (0) |
176 | 176 | ||
177 | typedef struct { | 177 | typedef struct { |
178 | xtregs_opt_t opt; | 178 | xtregs_opt_t opt; |
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 0a046ca5a687..80be15124697 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h | |||
@@ -14,4 +14,3 @@ | |||
14 | extern void flush_cache_kmaps(void); | 14 | extern void flush_cache_kmaps(void); |
15 | 15 | ||
16 | #endif | 16 | #endif |
17 | |||
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h new file mode 100644 index 000000000000..e1f8ba4061ed --- /dev/null +++ b/arch/xtensa/include/asm/initialize_mmu.h | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * arch/xtensa/include/asm/initialize_mmu.h | ||
3 | * | ||
4 | * Initializes MMU: | ||
5 | * | ||
6 | * For the new V3 MMU we remap the TLB from virtual == physical | ||
7 | * to the standard Linux mapping used in earlier MMU's. | ||
8 | * | ||
9 | * The the MMU we also support a new configuration register that | ||
10 | * specifies how the S32C1I instruction operates with the cache | ||
11 | * controller. | ||
12 | * | ||
13 | * This file is subject to the terms and conditions of the GNU General | ||
14 | * Public License. See the file "COPYING" in the main directory of | ||
15 | * this archive for more details. | ||
16 | * | ||
17 | * Copyright (C) 2008 - 2012 Tensilica, Inc. | ||
18 | * | ||
19 | * Marc Gauthier <marc@tensilica.com> | ||
20 | * Pete Delaney <piet@tensilica.com> | ||
21 | */ | ||
22 | |||
23 | #ifndef _XTENSA_INITIALIZE_MMU_H | ||
24 | #define _XTENSA_INITIALIZE_MMU_H | ||
25 | |||
26 | #ifdef __ASSEMBLY__ | ||
27 | |||
28 | #define XTENSA_HWVERSION_RC_2009_0 230000 | ||
29 | |||
30 | .macro initialize_mmu | ||
31 | |||
32 | #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) | ||
33 | /* | ||
34 | * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it. | ||
35 | * For details see Documentation/xtensa/atomctl.txt | ||
36 | */ | ||
37 | #if XCHAL_DCACHE_IS_COHERENT | ||
38 | movi a3, 0x25 /* For SMP/MX -- internal for writeback, | ||
39 | * RCW otherwise | ||
40 | */ | ||
41 | #else | ||
42 | movi a3, 0x29 /* non-MX -- Most cores use Std Memory | ||
43 | * Controlers which usually can't use RCW | ||
44 | */ | ||
45 | #endif | ||
46 | wsr a3, atomctl | ||
47 | #endif /* XCHAL_HAVE_S32C1I && | ||
48 | * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) | ||
49 | */ | ||
50 | |||
51 | .endm | ||
52 | |||
53 | #endif /*__ASSEMBLY__*/ | ||
54 | |||
55 | #endif /* _XTENSA_INITIALIZE_MMU_H */ | ||
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index feb10af96519..d43525a286bb 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h | |||
@@ -107,7 +107,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
107 | 107 | ||
108 | 108 | ||
109 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 109 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
110 | struct task_struct *tsk) | 110 | struct task_struct *tsk) |
111 | { | 111 | { |
112 | unsigned long asid = asid_cache; | 112 | unsigned long asid = asid_cache; |
113 | 113 | ||
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h index 599e7a2e729d..3407cf7989b7 100644 --- a/arch/xtensa/include/asm/nommu_context.h +++ b/arch/xtensa/include/asm/nommu_context.h | |||
@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
2 | { | 2 | { |
3 | } | 3 | } |
4 | 4 | ||
5 | static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 5 | static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm) |
6 | { | 6 | { |
7 | return 0; | 7 | return 0; |
8 | } | 8 | } |
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 7a5591a71f85..47f582333f6b 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h | |||
@@ -29,19 +29,19 @@ | |||
29 | * PAGE_SHIFT determines the page size | 29 | * PAGE_SHIFT determines the page size |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #define PAGE_SHIFT 12 | 32 | #define PAGE_SHIFT 12 |
33 | #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) | 33 | #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) |
34 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 34 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
35 | 35 | ||
36 | #ifdef CONFIG_MMU | 36 | #ifdef CONFIG_MMU |
37 | #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR | 37 | #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR |
38 | #define MAX_MEM_PFN XCHAL_KSEG_SIZE | 38 | #define MAX_MEM_PFN XCHAL_KSEG_SIZE |
39 | #else | 39 | #else |
40 | #define PAGE_OFFSET 0 | 40 | #define PAGE_OFFSET 0 |
41 | #define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) | 41 | #define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #define PGTABLE_START 0x80000000 | 44 | #define PGTABLE_START 0x80000000 |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * Cache aliasing: | 47 | * Cache aliasing: |
@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*); | |||
161 | 161 | ||
162 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) | 162 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) |
163 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) | 163 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
164 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) | 164 | #define pfn_valid(pfn) \ |
165 | ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) | ||
166 | |||
165 | #ifdef CONFIG_DISCONTIGMEM | 167 | #ifdef CONFIG_DISCONTIGMEM |
166 | # error CONFIG_DISCONTIGMEM not supported | 168 | # error CONFIG_DISCONTIGMEM not supported |
167 | #endif | 169 | #endif |
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h index 00fcbd7c534a..0b68c76ec1e6 100644 --- a/arch/xtensa/include/asm/pci-bridge.h +++ b/arch/xtensa/include/asm/pci-bridge.h | |||
@@ -35,7 +35,7 @@ struct pci_space { | |||
35 | struct pci_controller { | 35 | struct pci_controller { |
36 | int index; /* used for pci_controller_num */ | 36 | int index; /* used for pci_controller_num */ |
37 | struct pci_controller *next; | 37 | struct pci_controller *next; |
38 | struct pci_bus *bus; | 38 | struct pci_bus *bus; |
39 | void *arch_data; | 39 | void *arch_data; |
40 | 40 | ||
41 | int first_busno; | 41 | int first_busno; |
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index 05244f07dd31..614be031a79a 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h | |||
@@ -53,7 +53,7 @@ struct pci_dev; | |||
53 | 53 | ||
54 | /* Map a range of PCI memory or I/O space for a device into user space */ | 54 | /* Map a range of PCI memory or I/O space for a device into user space */ |
55 | int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, | 55 | int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, |
56 | enum pci_mmap_state mmap_state, int write_combine); | 56 | enum pci_mmap_state mmap_state, int write_combine); |
57 | 57 | ||
58 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ | 58 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ |
59 | #define HAVE_PCI_MMAP 1 | 59 | #define HAVE_PCI_MMAP 1 |
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index 40cf9bceda2c..cf914c8c249a 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h | |||
@@ -42,7 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
42 | 42 | ||
43 | extern struct kmem_cache *pgtable_cache; | 43 | extern struct kmem_cache *pgtable_cache; |
44 | 44 | ||
45 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 45 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
46 | unsigned long address) | 46 | unsigned long address) |
47 | { | 47 | { |
48 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); | 48 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); |
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index b03c043ce75b..c90ea5bfa1b4 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h | |||
@@ -284,7 +284,7 @@ struct vm_area_struct; | |||
284 | 284 | ||
285 | static inline int | 285 | static inline int |
286 | ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, | 286 | ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, |
287 | pte_t *ptep) | 287 | pte_t *ptep) |
288 | { | 288 | { |
289 | pte_t pte = *ptep; | 289 | pte_t pte = *ptep; |
290 | if (!pte_young(pte)) | 290 | if (!pte_young(pte)) |
@@ -304,8 +304,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |||
304 | static inline void | 304 | static inline void |
305 | ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 305 | ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
306 | { | 306 | { |
307 | pte_t pte = *ptep; | 307 | pte_t pte = *ptep; |
308 | update_pte(ptep, pte_wrprotect(pte)); | 308 | update_pte(ptep, pte_wrprotect(pte)); |
309 | } | 309 | } |
310 | 310 | ||
311 | /* to find an entry in a kernel page-table-directory */ | 311 | /* to find an entry in a kernel page-table-directory */ |
@@ -399,7 +399,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma, | |||
399 | */ | 399 | */ |
400 | 400 | ||
401 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ | 401 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ |
402 | remap_pfn_range(vma, from, pfn, size, prot) | 402 | remap_pfn_range(vma, from, pfn, size, prot) |
403 | 403 | ||
404 | typedef pte_t *pte_addr_t; | 404 | typedef pte_t *pte_addr_t; |
405 | 405 | ||
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h index 7d936e58e9be..ec098b68fb9a 100644 --- a/arch/xtensa/include/asm/platform.h +++ b/arch/xtensa/include/asm/platform.h | |||
@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void); | |||
75 | extern void platform_calibrate_ccount (void); | 75 | extern void platform_calibrate_ccount (void); |
76 | 76 | ||
77 | #endif /* _XTENSA_PLATFORM_H */ | 77 | #endif /* _XTENSA_PLATFORM_H */ |
78 | |||
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 2d630e7399ca..e5fb6b0abdf4 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h | |||
@@ -89,7 +89,7 @@ | |||
89 | #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) | 89 | #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) |
90 | 90 | ||
91 | typedef struct { | 91 | typedef struct { |
92 | unsigned long seg; | 92 | unsigned long seg; |
93 | } mm_segment_t; | 93 | } mm_segment_t; |
94 | 94 | ||
95 | struct thread_struct { | 95 | struct thread_struct { |
@@ -145,10 +145,10 @@ struct thread_struct { | |||
145 | * set_thread_state in signal.c depends on it. | 145 | * set_thread_state in signal.c depends on it. |
146 | */ | 146 | */ |
147 | #define USER_PS_VALUE ((1 << PS_WOE_BIT) | \ | 147 | #define USER_PS_VALUE ((1 << PS_WOE_BIT) | \ |
148 | (1 << PS_CALLINC_SHIFT) | \ | 148 | (1 << PS_CALLINC_SHIFT) | \ |
149 | (USER_RING << PS_RING_SHIFT) | \ | 149 | (USER_RING << PS_RING_SHIFT) | \ |
150 | (1 << PS_UM_BIT) | \ | 150 | (1 << PS_UM_BIT) | \ |
151 | (1 << PS_EXCM_BIT)) | 151 | (1 << PS_EXCM_BIT)) |
152 | 152 | ||
153 | /* Clearing a0 terminates the backtrace. */ | 153 | /* Clearing a0 terminates the backtrace. */ |
154 | #define start_thread(regs, new_pc, new_sp) \ | 154 | #define start_thread(regs, new_pc, new_sp) \ |
diff --git a/arch/xtensa/include/asm/prom.h b/arch/xtensa/include/asm/prom.h new file mode 100644 index 000000000000..f3d7cd2c0de7 --- /dev/null +++ b/arch/xtensa/include/asm/prom.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _XTENSA_ASM_PROM_H | ||
2 | #define _XTENSA_ASM_PROM_H | ||
3 | |||
4 | #define HAVE_ARCH_DEVTREE_FIXUPS | ||
5 | |||
6 | #endif /* _XTENSA_ASM_PROM_H */ | ||
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index da21c17f23aa..58bf6fd3f913 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h | |||
@@ -37,7 +37,7 @@ struct pt_regs { | |||
37 | unsigned long windowstart; /* 52 */ | 37 | unsigned long windowstart; /* 52 */ |
38 | unsigned long syscall; /* 56 */ | 38 | unsigned long syscall; /* 56 */ |
39 | unsigned long icountlevel; /* 60 */ | 39 | unsigned long icountlevel; /* 60 */ |
40 | int reserved[1]; /* 64 */ | 40 | unsigned long scompare1; /* 64 */ |
41 | 41 | ||
42 | /* Additional configurable registers that are used by the compiler. */ | 42 | /* Additional configurable registers that are used by the compiler. */ |
43 | xtregs_opt_t xtregs_opt; | 43 | xtregs_opt_t xtregs_opt; |
@@ -55,7 +55,7 @@ struct pt_regs { | |||
55 | 55 | ||
56 | # define arch_has_single_step() (1) | 56 | # define arch_has_single_step() (1) |
57 | # define task_pt_regs(tsk) ((struct pt_regs*) \ | 57 | # define task_pt_regs(tsk) ((struct pt_regs*) \ |
58 | (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) | 58 | (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) |
59 | # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) | 59 | # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) |
60 | # define instruction_pointer(regs) ((regs)->pc) | 60 | # define instruction_pointer(regs) ((regs)->pc) |
61 | 61 | ||
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h index 8a8aa61ccc8d..76096a4e5b8d 100644 --- a/arch/xtensa/include/asm/regs.h +++ b/arch/xtensa/include/asm/regs.h | |||
@@ -52,6 +52,10 @@ | |||
52 | #define EXCCAUSE_SPECULATION 7 | 52 | #define EXCCAUSE_SPECULATION 7 |
53 | #define EXCCAUSE_PRIVILEGED 8 | 53 | #define EXCCAUSE_PRIVILEGED 8 |
54 | #define EXCCAUSE_UNALIGNED 9 | 54 | #define EXCCAUSE_UNALIGNED 9 |
55 | #define EXCCAUSE_INSTR_DATA_ERROR 12 | ||
56 | #define EXCCAUSE_LOAD_STORE_DATA_ERROR 13 | ||
57 | #define EXCCAUSE_INSTR_ADDR_ERROR 14 | ||
58 | #define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15 | ||
55 | #define EXCCAUSE_ITLB_MISS 16 | 59 | #define EXCCAUSE_ITLB_MISS 16 |
56 | #define EXCCAUSE_ITLB_MULTIHIT 17 | 60 | #define EXCCAUSE_ITLB_MULTIHIT 17 |
57 | #define EXCCAUSE_ITLB_PRIVILEGE 18 | 61 | #define EXCCAUSE_ITLB_PRIVILEGE 18 |
@@ -105,4 +109,3 @@ | |||
105 | #define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */ | 109 | #define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */ |
106 | 110 | ||
107 | #endif /* _XTENSA_SPECREG_H */ | 111 | #endif /* _XTENSA_SPECREG_H */ |
108 | |||
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 8ff23649581b..03975906b36f 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h | |||
@@ -11,6 +11,192 @@ | |||
11 | #ifndef _XTENSA_SPINLOCK_H | 11 | #ifndef _XTENSA_SPINLOCK_H |
12 | #define _XTENSA_SPINLOCK_H | 12 | #define _XTENSA_SPINLOCK_H |
13 | 13 | ||
14 | #include <linux/spinlock.h> | 14 | /* |
15 | * spinlock | ||
16 | * | ||
17 | * There is at most one owner of a spinlock. There are not different | ||
18 | * types of spinlock owners like there are for rwlocks (see below). | ||
19 | * | ||
20 | * When trying to obtain a spinlock, the function "spins" forever, or busy- | ||
21 | * waits, until the lock is obtained. When spinning, presumably some other | ||
22 | * owner will soon give up the spinlock making it available to others. Use | ||
23 | * the trylock functions to avoid spinning forever. | ||
24 | * | ||
25 | * possible values: | ||
26 | * | ||
27 | * 0 nobody owns the spinlock | ||
28 | * 1 somebody owns the spinlock | ||
29 | */ | ||
30 | |||
31 | #define __raw_spin_is_locked(x) ((x)->slock != 0) | ||
32 | #define __raw_spin_unlock_wait(lock) \ | ||
33 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
34 | |||
35 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
36 | |||
37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
38 | { | ||
39 | unsigned long tmp; | ||
40 | |||
41 | __asm__ __volatile__( | ||
42 | " movi %0, 0\n" | ||
43 | " wsr %0, scompare1\n" | ||
44 | "1: movi %0, 1\n" | ||
45 | " s32c1i %0, %1, 0\n" | ||
46 | " bnez %0, 1b\n" | ||
47 | : "=&a" (tmp) | ||
48 | : "a" (&lock->slock) | ||
49 | : "memory"); | ||
50 | } | ||
51 | |||
52 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | ||
53 | |||
54 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
55 | { | ||
56 | unsigned long tmp; | ||
57 | |||
58 | __asm__ __volatile__( | ||
59 | " movi %0, 0\n" | ||
60 | " wsr %0, scompare1\n" | ||
61 | " movi %0, 1\n" | ||
62 | " s32c1i %0, %1, 0\n" | ||
63 | : "=&a" (tmp) | ||
64 | : "a" (&lock->slock) | ||
65 | : "memory"); | ||
66 | |||
67 | return tmp == 0 ? 1 : 0; | ||
68 | } | ||
69 | |||
70 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
71 | { | ||
72 | unsigned long tmp; | ||
73 | |||
74 | __asm__ __volatile__( | ||
75 | " movi %0, 0\n" | ||
76 | " s32ri %0, %1, 0\n" | ||
77 | : "=&a" (tmp) | ||
78 | : "a" (&lock->slock) | ||
79 | : "memory"); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * rwlock | ||
84 | * | ||
85 | * Read-write locks are really a more flexible spinlock. They allow | ||
86 | * multiple readers but only one writer. Write ownership is exclusive | ||
87 | * (i.e., all other readers and writers are blocked from ownership while | ||
88 | * there is a write owner). These rwlocks are unfair to writers. Writers | ||
89 | * can be starved for an indefinite time by readers. | ||
90 | * | ||
91 | * possible values: | ||
92 | * | ||
93 | * 0 nobody owns the rwlock | ||
94 | * >0 one or more readers own the rwlock | ||
95 | * (the positive value is the actual number of readers) | ||
96 | * 0x80000000 one writer owns the rwlock, no other writers, no readers | ||
97 | */ | ||
98 | |||
99 | #define __raw_write_can_lock(x) ((x)->lock == 0) | ||
100 | |||
101 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
102 | { | ||
103 | unsigned long tmp; | ||
104 | |||
105 | __asm__ __volatile__( | ||
106 | " movi %0, 0\n" | ||
107 | " wsr %0, scompare1\n" | ||
108 | "1: movi %0, 1\n" | ||
109 | " slli %0, %0, 31\n" | ||
110 | " s32c1i %0, %1, 0\n" | ||
111 | " bnez %0, 1b\n" | ||
112 | : "=&a" (tmp) | ||
113 | : "a" (&rw->lock) | ||
114 | : "memory"); | ||
115 | } | ||
116 | |||
117 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | ||
118 | |||
119 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | ||
120 | { | ||
121 | unsigned long tmp; | ||
122 | |||
123 | __asm__ __volatile__( | ||
124 | " movi %0, 0\n" | ||
125 | " wsr %0, scompare1\n" | ||
126 | " movi %0, 1\n" | ||
127 | " slli %0, %0, 31\n" | ||
128 | " s32c1i %0, %1, 0\n" | ||
129 | : "=&a" (tmp) | ||
130 | : "a" (&rw->lock) | ||
131 | : "memory"); | ||
132 | |||
133 | return tmp == 0 ? 1 : 0; | ||
134 | } | ||
135 | |||
136 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
137 | { | ||
138 | unsigned long tmp; | ||
139 | |||
140 | __asm__ __volatile__( | ||
141 | " movi %0, 0\n" | ||
142 | " s32ri %0, %1, 0\n" | ||
143 | : "=&a" (tmp) | ||
144 | : "a" (&rw->lock) | ||
145 | : "memory"); | ||
146 | } | ||
147 | |||
148 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
149 | { | ||
150 | unsigned long tmp; | ||
151 | unsigned long result; | ||
152 | |||
153 | __asm__ __volatile__( | ||
154 | "1: l32i %1, %2, 0\n" | ||
155 | " bltz %1, 1b\n" | ||
156 | " wsr %1, scompare1\n" | ||
157 | " addi %0, %1, 1\n" | ||
158 | " s32c1i %0, %2, 0\n" | ||
159 | " bne %0, %1, 1b\n" | ||
160 | : "=&a" (result), "=&a" (tmp) | ||
161 | : "a" (&rw->lock) | ||
162 | : "memory"); | ||
163 | } | ||
164 | |||
165 | /* Returns 1 if the lock is obtained, 0 otherwise. */ | ||
166 | |||
167 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | ||
168 | { | ||
169 | unsigned long result; | ||
170 | unsigned long tmp; | ||
171 | |||
172 | __asm__ __volatile__( | ||
173 | " l32i %1, %2, 0\n" | ||
174 | " addi %0, %1, 1\n" | ||
175 | " bltz %0, 1f\n" | ||
176 | " wsr %1, scompare1\n" | ||
177 | " s32c1i %0, %2, 0\n" | ||
178 | " sub %0, %0, %1\n" | ||
179 | "1:\n" | ||
180 | : "=&a" (result), "=&a" (tmp) | ||
181 | : "a" (&rw->lock) | ||
182 | : "memory"); | ||
183 | |||
184 | return result == 0; | ||
185 | } | ||
186 | |||
187 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
188 | { | ||
189 | unsigned long tmp1, tmp2; | ||
190 | |||
191 | __asm__ __volatile__( | ||
192 | "1: l32i %1, %2, 0\n" | ||
193 | " addi %0, %1, -1\n" | ||
194 | " wsr %1, scompare1\n" | ||
195 | " s32c1i %0, %2, 0\n" | ||
196 | " bne %0, %1, 1b\n" | ||
197 | : "=&a" (tmp1), "=&a" (tmp2) | ||
198 | : "a" (&rw->lock) | ||
199 | : "memory"); | ||
200 | } | ||
15 | 201 | ||
16 | #endif /* _XTENSA_SPINLOCK_H */ | 202 | #endif /* _XTENSA_SPINLOCK_H */ |
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index b00c928d4cce..8d5e47fad095 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h | |||
@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int, | |||
25 | /* Should probably move to linux/syscalls.h */ | 25 | /* Should probably move to linux/syscalls.h */ |
26 | struct pollfd; | 26 | struct pollfd; |
27 | asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, | 27 | asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, |
28 | fd_set __user *exp, struct timespec __user *tsp, void __user *sig); | 28 | fd_set __user *exp, struct timespec __user *tsp, |
29 | void __user *sig); | ||
29 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, | 30 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, |
30 | struct timespec __user *tsp, const sigset_t __user *sigmask, | 31 | struct timespec __user *tsp, |
31 | size_t sigsetsize); | 32 | const sigset_t __user *sigmask, |
32 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, | 33 | size_t sigsetsize); |
33 | size_t sigsetsize); | 34 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); |
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h new file mode 100644 index 000000000000..54f70440185e --- /dev/null +++ b/arch/xtensa/include/asm/traps.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * arch/xtensa/include/asm/traps.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2012 Tensilica Inc. | ||
9 | */ | ||
10 | #ifndef _XTENSA_TRAPS_H | ||
11 | #define _XTENSA_TRAPS_H | ||
12 | |||
13 | #include <asm/ptrace.h> | ||
14 | |||
15 | /* | ||
16 | * handler must be either of the following: | ||
17 | * void (*)(struct pt_regs *regs); | ||
18 | * void (*)(struct pt_regs *regs, unsigned long exccause); | ||
19 | */ | ||
20 | extern void * __init trap_set_handler(int cause, void *handler); | ||
21 | extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); | ||
22 | |||
23 | #endif /* _XTENSA_TRAPS_H */ | ||
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 6e4bb3b791ab..fd686dc45d1a 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h | |||
@@ -180,7 +180,8 @@ | |||
180 | #define segment_eq(a,b) ((a).seg == (b).seg) | 180 | #define segment_eq(a,b) ((a).seg == (b).seg) |
181 | 181 | ||
182 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | 182 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) |
183 | #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) | 183 | #define __user_ok(addr,size) \ |
184 | (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) | ||
184 | #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) | 185 | #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) |
185 | #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) | 186 | #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) |
186 | 187 | ||
@@ -234,10 +235,10 @@ do { \ | |||
234 | int __cb; \ | 235 | int __cb; \ |
235 | retval = 0; \ | 236 | retval = 0; \ |
236 | switch (size) { \ | 237 | switch (size) { \ |
237 | case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ | 238 | case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ |
238 | case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ | 239 | case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ |
239 | case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ | 240 | case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ |
240 | case 8: { \ | 241 | case 8: { \ |
241 | __typeof__(*ptr) __v64 = x; \ | 242 | __typeof__(*ptr) __v64 = x; \ |
242 | retval = __copy_to_user(ptr,&__v64,8); \ | 243 | retval = __copy_to_user(ptr,&__v64,8); \ |
243 | break; \ | 244 | break; \ |
@@ -291,7 +292,7 @@ do { \ | |||
291 | * __check_align_* macros still work. | 292 | * __check_align_* macros still work. |
292 | */ | 293 | */ |
293 | #define __put_user_asm(x, addr, err, align, insn, cb) \ | 294 | #define __put_user_asm(x, addr, err, align, insn, cb) \ |
294 | __asm__ __volatile__( \ | 295 | __asm__ __volatile__( \ |
295 | __check_align_##align \ | 296 | __check_align_##align \ |
296 | "1: "insn" %2, %3, 0 \n" \ | 297 | "1: "insn" %2, %3, 0 \n" \ |
297 | "2: \n" \ | 298 | "2: \n" \ |
@@ -301,8 +302,8 @@ do { \ | |||
301 | " .long 2b \n" \ | 302 | " .long 2b \n" \ |
302 | "5: \n" \ | 303 | "5: \n" \ |
303 | " l32r %1, 4b \n" \ | 304 | " l32r %1, 4b \n" \ |
304 | " movi %0, %4 \n" \ | 305 | " movi %0, %4 \n" \ |
305 | " jx %1 \n" \ | 306 | " jx %1 \n" \ |
306 | " .previous \n" \ | 307 | " .previous \n" \ |
307 | " .section __ex_table,\"a\" \n" \ | 308 | " .section __ex_table,\"a\" \n" \ |
308 | " .long 1b, 5b \n" \ | 309 | " .long 1b, 5b \n" \ |
@@ -334,13 +335,13 @@ extern long __get_user_bad(void); | |||
334 | do { \ | 335 | do { \ |
335 | int __cb; \ | 336 | int __cb; \ |
336 | retval = 0; \ | 337 | retval = 0; \ |
337 | switch (size) { \ | 338 | switch (size) { \ |
338 | case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ | 339 | case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ |
339 | case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ | 340 | case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ |
340 | case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ | 341 | case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ |
341 | case 8: retval = __copy_from_user(&x,ptr,8); break; \ | 342 | case 8: retval = __copy_from_user(&x,ptr,8); break; \ |
342 | default: (x) = __get_user_bad(); \ | 343 | default: (x) = __get_user_bad(); \ |
343 | } \ | 344 | } \ |
344 | } while (0) | 345 | } while (0) |
345 | 346 | ||
346 | 347 | ||
@@ -349,7 +350,7 @@ do { \ | |||
349 | * __check_align_* macros still work. | 350 | * __check_align_* macros still work. |
350 | */ | 351 | */ |
351 | #define __get_user_asm(x, addr, err, align, insn, cb) \ | 352 | #define __get_user_asm(x, addr, err, align, insn, cb) \ |
352 | __asm__ __volatile__( \ | 353 | __asm__ __volatile__( \ |
353 | __check_align_##align \ | 354 | __check_align_##align \ |
354 | "1: "insn" %2, %3, 0 \n" \ | 355 | "1: "insn" %2, %3, 0 \n" \ |
355 | "2: \n" \ | 356 | "2: \n" \ |
@@ -360,8 +361,8 @@ do { \ | |||
360 | "5: \n" \ | 361 | "5: \n" \ |
361 | " l32r %1, 4b \n" \ | 362 | " l32r %1, 4b \n" \ |
362 | " movi %2, 0 \n" \ | 363 | " movi %2, 0 \n" \ |
363 | " movi %0, %4 \n" \ | 364 | " movi %0, %4 \n" \ |
364 | " jx %1 \n" \ | 365 | " jx %1 \n" \ |
365 | " .previous \n" \ | 366 | " .previous \n" \ |
366 | " .section __ex_table,\"a\" \n" \ | 367 | " .section __ex_table,\"a\" \n" \ |
367 | " .long 1b, 5b \n" \ | 368 | " .long 1b, 5b \n" \ |
@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) | |||
421 | 422 | ||
422 | #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) | 423 | #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) |
423 | #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) | 424 | #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) |
424 | #define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) | 425 | #define __copy_to_user(to,from,n) \ |
425 | #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) | 426 | __generic_copy_to_user_nocheck((to),(from),(n)) |
427 | #define __copy_from_user(to,from,n) \ | ||
428 | __generic_copy_from_user_nocheck((to),(from),(n)) | ||
426 | #define __copy_to_user_inatomic __copy_to_user | 429 | #define __copy_to_user_inatomic __copy_to_user |
427 | #define __copy_from_user_inatomic __copy_from_user | 430 | #define __copy_from_user_inatomic __copy_from_user |
428 | 431 | ||