diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-powerpc/bitops.h | 437 | ||||
-rw-r--r-- | include/asm-powerpc/bug.h | 34 | ||||
-rw-r--r-- | include/asm-powerpc/futex.h (renamed from include/asm-ppc64/futex.h) | 45 | ||||
-rw-r--r-- | include/asm-powerpc/ioctls.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/ipcbuf.h | 33 | ||||
-rw-r--r-- | include/asm-powerpc/irq.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/ppc_asm.h | 7 | ||||
-rw-r--r-- | include/asm-powerpc/processor.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/rtas.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/termios.h | 135 | ||||
-rw-r--r-- | include/asm-powerpc/uaccess.h | 468 | ||||
-rw-r--r-- | include/asm-ppc/bitops.h | 460 | ||||
-rw-r--r-- | include/asm-ppc/futex.h | 53 | ||||
-rw-r--r-- | include/asm-ppc/ipcbuf.h | 29 | ||||
-rw-r--r-- | include/asm-ppc/uaccess.h | 393 | ||||
-rw-r--r-- | include/asm-ppc64/bitops.h | 360 | ||||
-rw-r--r-- | include/asm-ppc64/ipcbuf.h | 28 | ||||
-rw-r--r-- | include/asm-ppc64/mmu_context.h | 15 | ||||
-rw-r--r-- | include/asm-ppc64/nvram.h | 2 | ||||
-rw-r--r-- | include/asm-ppc64/smp.h | 1 | ||||
-rw-r--r-- | include/asm-ppc64/uaccess.h | 341 |
21 files changed, 994 insertions, 1857 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h new file mode 100644 index 000000000000..dc25c53704d5 --- /dev/null +++ b/include/asm-powerpc/bitops.h | |||
@@ -0,0 +1,437 @@ | |||
1 | /* | ||
2 | * PowerPC atomic bit operations. | ||
3 | * | ||
4 | * Merged version by David Gibson <david@gibson.dropbear.id.au>. | ||
5 | * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don | ||
6 | * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They | ||
7 | * originally took it from the ppc32 code. | ||
8 | * | ||
9 | * Within a word, bits are numbered LSB first. Lot's of places make | ||
10 | * this assumption by directly testing bits with (val & (1<<nr)). | ||
11 | * This can cause confusion for large (> 1 word) bitmaps on a | ||
12 | * big-endian system because, unlike little endian, the number of each | ||
13 | * bit depends on the word size. | ||
14 | * | ||
15 | * The bitop functions are defined to work on unsigned longs, so for a | ||
16 | * ppc64 system the bits end up numbered: | ||
17 | * |63..............0|127............64|191...........128|255...........196| | ||
18 | * and on ppc32: | ||
19 | * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| | ||
20 | * | ||
21 | * There are a few little-endian macros used mostly for filesystem | ||
22 | * bitmaps, these work on similar bit arrays layouts, but | ||
23 | * byte-oriented: | ||
24 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| | ||
25 | * | ||
26 | * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit | ||
27 | * number field needs to be reversed compared to the big-endian bit | ||
28 | * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). | ||
29 | * | ||
30 | * This program is free software; you can redistribute it and/or | ||
31 | * modify it under the terms of the GNU General Public License | ||
32 | * as published by the Free Software Foundation; either version | ||
33 | * 2 of the License, or (at your option) any later version. | ||
34 | */ | ||
35 | |||
36 | #ifndef _ASM_POWERPC_BITOPS_H | ||
37 | #define _ASM_POWERPC_BITOPS_H | ||
38 | |||
39 | #ifdef __KERNEL__ | ||
40 | |||
41 | #include <linux/compiler.h> | ||
42 | #include <asm/atomic.h> | ||
43 | #include <asm/synch.h> | ||
44 | |||
45 | /* | ||
46 | * clear_bit doesn't imply a memory barrier | ||
47 | */ | ||
48 | #define smp_mb__before_clear_bit() smp_mb() | ||
49 | #define smp_mb__after_clear_bit() smp_mb() | ||
50 | |||
51 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
52 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | ||
53 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | ||
54 | |||
55 | #ifdef CONFIG_PPC64 | ||
56 | #define LARXL "ldarx" | ||
57 | #define STCXL "stdcx." | ||
58 | #define CNTLZL "cntlzd" | ||
59 | #else | ||
60 | #define LARXL "lwarx" | ||
61 | #define STCXL "stwcx." | ||
62 | #define CNTLZL "cntlzw" | ||
63 | #endif | ||
64 | |||
65 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) | ||
66 | { | ||
67 | unsigned long old; | ||
68 | unsigned long mask = BITOP_MASK(nr); | ||
69 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
70 | |||
71 | __asm__ __volatile__( | ||
72 | "1:" LARXL " %0,0,%3 # set_bit\n" | ||
73 | "or %0,%0,%2\n" | ||
74 | PPC405_ERR77(0,%3) | ||
75 | STCXL " %0,0,%3\n" | ||
76 | "bne- 1b" | ||
77 | : "=&r"(old), "=m"(*p) | ||
78 | : "r"(mask), "r"(p), "m"(*p) | ||
79 | : "cc" ); | ||
80 | } | ||
81 | |||
82 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | ||
83 | { | ||
84 | unsigned long old; | ||
85 | unsigned long mask = BITOP_MASK(nr); | ||
86 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
87 | |||
88 | __asm__ __volatile__( | ||
89 | "1:" LARXL " %0,0,%3 # set_bit\n" | ||
90 | "andc %0,%0,%2\n" | ||
91 | PPC405_ERR77(0,%3) | ||
92 | STCXL " %0,0,%3\n" | ||
93 | "bne- 1b" | ||
94 | : "=&r"(old), "=m"(*p) | ||
95 | : "r"(mask), "r"(p), "m"(*p) | ||
96 | : "cc" ); | ||
97 | } | ||
98 | |||
99 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | ||
100 | { | ||
101 | unsigned long old; | ||
102 | unsigned long mask = BITOP_MASK(nr); | ||
103 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
104 | |||
105 | __asm__ __volatile__( | ||
106 | "1:" LARXL " %0,0,%3 # set_bit\n" | ||
107 | "xor %0,%0,%2\n" | ||
108 | PPC405_ERR77(0,%3) | ||
109 | STCXL " %0,0,%3\n" | ||
110 | "bne- 1b" | ||
111 | : "=&r"(old), "=m"(*p) | ||
112 | : "r"(mask), "r"(p), "m"(*p) | ||
113 | : "cc" ); | ||
114 | } | ||
115 | |||
116 | static __inline__ int test_and_set_bit(unsigned long nr, | ||
117 | volatile unsigned long *addr) | ||
118 | { | ||
119 | unsigned long old, t; | ||
120 | unsigned long mask = BITOP_MASK(nr); | ||
121 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
122 | |||
123 | __asm__ __volatile__( | ||
124 | EIEIO_ON_SMP | ||
125 | "1:" LARXL " %0,0,%3 # test_and_set_bit\n" | ||
126 | "or %1,%0,%2 \n" | ||
127 | PPC405_ERR77(0,%3) | ||
128 | STCXL " %1,0,%3 \n" | ||
129 | "bne- 1b" | ||
130 | ISYNC_ON_SMP | ||
131 | : "=&r" (old), "=&r" (t) | ||
132 | : "r" (mask), "r" (p) | ||
133 | : "cc", "memory"); | ||
134 | |||
135 | return (old & mask) != 0; | ||
136 | } | ||
137 | |||
138 | static __inline__ int test_and_clear_bit(unsigned long nr, | ||
139 | volatile unsigned long *addr) | ||
140 | { | ||
141 | unsigned long old, t; | ||
142 | unsigned long mask = BITOP_MASK(nr); | ||
143 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
144 | |||
145 | __asm__ __volatile__( | ||
146 | EIEIO_ON_SMP | ||
147 | "1:" LARXL " %0,0,%3 # test_and_clear_bit\n" | ||
148 | "andc %1,%0,%2 \n" | ||
149 | PPC405_ERR77(0,%3) | ||
150 | STCXL " %1,0,%3 \n" | ||
151 | "bne- 1b" | ||
152 | ISYNC_ON_SMP | ||
153 | : "=&r" (old), "=&r" (t) | ||
154 | : "r" (mask), "r" (p) | ||
155 | : "cc", "memory"); | ||
156 | |||
157 | return (old & mask) != 0; | ||
158 | } | ||
159 | |||
160 | static __inline__ int test_and_change_bit(unsigned long nr, | ||
161 | volatile unsigned long *addr) | ||
162 | { | ||
163 | unsigned long old, t; | ||
164 | unsigned long mask = BITOP_MASK(nr); | ||
165 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
166 | |||
167 | __asm__ __volatile__( | ||
168 | EIEIO_ON_SMP | ||
169 | "1:" LARXL " %0,0,%3 # test_and_change_bit\n" | ||
170 | "xor %1,%0,%2 \n" | ||
171 | PPC405_ERR77(0,%3) | ||
172 | STCXL " %1,0,%3 \n" | ||
173 | "bne- 1b" | ||
174 | ISYNC_ON_SMP | ||
175 | : "=&r" (old), "=&r" (t) | ||
176 | : "r" (mask), "r" (p) | ||
177 | : "cc", "memory"); | ||
178 | |||
179 | return (old & mask) != 0; | ||
180 | } | ||
181 | |||
182 | static __inline__ void set_bits(unsigned long mask, unsigned long *addr) | ||
183 | { | ||
184 | unsigned long old; | ||
185 | |||
186 | __asm__ __volatile__( | ||
187 | "1:" LARXL " %0,0,%3 # set_bit\n" | ||
188 | "or %0,%0,%2\n" | ||
189 | STCXL " %0,0,%3\n" | ||
190 | "bne- 1b" | ||
191 | : "=&r" (old), "=m" (*addr) | ||
192 | : "r" (mask), "r" (addr), "m" (*addr) | ||
193 | : "cc"); | ||
194 | } | ||
195 | |||
196 | /* Non-atomic versions */ | ||
197 | static __inline__ int test_bit(unsigned long nr, | ||
198 | __const__ volatile unsigned long *addr) | ||
199 | { | ||
200 | return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | ||
201 | } | ||
202 | |||
203 | static __inline__ void __set_bit(unsigned long nr, | ||
204 | volatile unsigned long *addr) | ||
205 | { | ||
206 | unsigned long mask = BITOP_MASK(nr); | ||
207 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
208 | |||
209 | *p |= mask; | ||
210 | } | ||
211 | |||
212 | static __inline__ void __clear_bit(unsigned long nr, | ||
213 | volatile unsigned long *addr) | ||
214 | { | ||
215 | unsigned long mask = BITOP_MASK(nr); | ||
216 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
217 | |||
218 | *p &= ~mask; | ||
219 | } | ||
220 | |||
221 | static __inline__ void __change_bit(unsigned long nr, | ||
222 | volatile unsigned long *addr) | ||
223 | { | ||
224 | unsigned long mask = BITOP_MASK(nr); | ||
225 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
226 | |||
227 | *p ^= mask; | ||
228 | } | ||
229 | |||
230 | static __inline__ int __test_and_set_bit(unsigned long nr, | ||
231 | volatile unsigned long *addr) | ||
232 | { | ||
233 | unsigned long mask = BITOP_MASK(nr); | ||
234 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
235 | unsigned long old = *p; | ||
236 | |||
237 | *p = old | mask; | ||
238 | return (old & mask) != 0; | ||
239 | } | ||
240 | |||
241 | static __inline__ int __test_and_clear_bit(unsigned long nr, | ||
242 | volatile unsigned long *addr) | ||
243 | { | ||
244 | unsigned long mask = BITOP_MASK(nr); | ||
245 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
246 | unsigned long old = *p; | ||
247 | |||
248 | *p = old & ~mask; | ||
249 | return (old & mask) != 0; | ||
250 | } | ||
251 | |||
252 | static __inline__ int __test_and_change_bit(unsigned long nr, | ||
253 | volatile unsigned long *addr) | ||
254 | { | ||
255 | unsigned long mask = BITOP_MASK(nr); | ||
256 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
257 | unsigned long old = *p; | ||
258 | |||
259 | *p = old ^ mask; | ||
260 | return (old & mask) != 0; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Return the zero-based bit position (LE, not IBM bit numbering) of | ||
265 | * the most significant 1-bit in a double word. | ||
266 | */ | ||
267 | static __inline__ int __ilog2(unsigned long x) | ||
268 | { | ||
269 | int lz; | ||
270 | |||
271 | asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x)); | ||
272 | return BITS_PER_LONG - 1 - lz; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Determines the bit position of the least significant 0 bit in the | ||
277 | * specified double word. The returned bit position will be | ||
278 | * zero-based, starting from the right side (63/31 - 0). | ||
279 | */ | ||
280 | static __inline__ unsigned long ffz(unsigned long x) | ||
281 | { | ||
282 | /* no zero exists anywhere in the 8 byte area. */ | ||
283 | if ((x = ~x) == 0) | ||
284 | return BITS_PER_LONG; | ||
285 | |||
286 | /* | ||
287 | * Calculate the bit position of the least signficant '1' bit in x | ||
288 | * (since x has been changed this will actually be the least signficant | ||
289 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that | ||
290 | * is the least significant * (RIGHT-most) 1-bit of the value in x. | ||
291 | */ | ||
292 | return __ilog2(x & -x); | ||
293 | } | ||
294 | |||
295 | static __inline__ int __ffs(unsigned long x) | ||
296 | { | ||
297 | return __ilog2(x & -x); | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * ffs: find first bit set. This is defined the same way as | ||
302 | * the libc and compiler builtin ffs routines, therefore | ||
303 | * differs in spirit from the above ffz (man ffs). | ||
304 | */ | ||
305 | static __inline__ int ffs(int x) | ||
306 | { | ||
307 | unsigned long i = (unsigned long)x; | ||
308 | return __ilog2(i & -i) + 1; | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * fls: find last (most-significant) bit set. | ||
313 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
314 | */ | ||
315 | static __inline__ int fls(unsigned int x) | ||
316 | { | ||
317 | int lz; | ||
318 | |||
319 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | ||
320 | return 32 - lz; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * hweightN: returns the hamming weight (i.e. the number | ||
325 | * of bits set) of a N-bit word | ||
326 | */ | ||
327 | #define hweight64(x) generic_hweight64(x) | ||
328 | #define hweight32(x) generic_hweight32(x) | ||
329 | #define hweight16(x) generic_hweight16(x) | ||
330 | #define hweight8(x) generic_hweight8(x) | ||
331 | |||
332 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | ||
333 | unsigned long find_next_zero_bit(const unsigned long *addr, | ||
334 | unsigned long size, unsigned long offset); | ||
335 | /** | ||
336 | * find_first_bit - find the first set bit in a memory region | ||
337 | * @addr: The address to start the search at | ||
338 | * @size: The maximum size to search | ||
339 | * | ||
340 | * Returns the bit-number of the first set bit, not the number of the byte | ||
341 | * containing a bit. | ||
342 | */ | ||
343 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | ||
344 | unsigned long find_next_bit(const unsigned long *addr, | ||
345 | unsigned long size, unsigned long offset); | ||
346 | |||
347 | /* Little-endian versions */ | ||
348 | |||
349 | static __inline__ int test_le_bit(unsigned long nr, | ||
350 | __const__ unsigned long *addr) | ||
351 | { | ||
352 | __const__ unsigned char *tmp = (__const__ unsigned char *) addr; | ||
353 | return (tmp[nr >> 3] >> (nr & 7)) & 1; | ||
354 | } | ||
355 | |||
356 | #define __set_le_bit(nr, addr) \ | ||
357 | __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
358 | #define __clear_le_bit(nr, addr) \ | ||
359 | __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
360 | |||
361 | #define test_and_set_le_bit(nr, addr) \ | ||
362 | test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
363 | #define test_and_clear_le_bit(nr, addr) \ | ||
364 | test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
365 | |||
366 | #define __test_and_set_le_bit(nr, addr) \ | ||
367 | __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
368 | #define __test_and_clear_le_bit(nr, addr) \ | ||
369 | __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
370 | |||
371 | #define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0) | ||
372 | unsigned long find_next_zero_le_bit(const unsigned long *addr, | ||
373 | unsigned long size, unsigned long offset); | ||
374 | |||
375 | /* Bitmap functions for the ext2 filesystem */ | ||
376 | |||
377 | #define ext2_set_bit(nr,addr) \ | ||
378 | __test_and_set_le_bit((nr), (unsigned long*)addr) | ||
379 | #define ext2_clear_bit(nr, addr) \ | ||
380 | __test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
381 | |||
382 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
383 | test_and_set_le_bit((nr), (unsigned long*)addr) | ||
384 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
385 | test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
386 | |||
387 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | ||
388 | |||
389 | #define ext2_find_first_zero_bit(addr, size) \ | ||
390 | find_first_zero_le_bit((unsigned long*)addr, size) | ||
391 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
392 | find_next_zero_le_bit((unsigned long*)addr, size, off) | ||
393 | |||
394 | /* Bitmap functions for the minix filesystem. */ | ||
395 | |||
396 | #define minix_test_and_set_bit(nr,addr) \ | ||
397 | __test_and_set_le_bit(nr, (unsigned long *)addr) | ||
398 | #define minix_set_bit(nr,addr) \ | ||
399 | __set_le_bit(nr, (unsigned long *)addr) | ||
400 | #define minix_test_and_clear_bit(nr,addr) \ | ||
401 | __test_and_clear_le_bit(nr, (unsigned long *)addr) | ||
402 | #define minix_test_bit(nr,addr) \ | ||
403 | test_le_bit(nr, (unsigned long *)addr) | ||
404 | |||
405 | #define minix_find_first_zero_bit(addr,size) \ | ||
406 | find_first_zero_le_bit((unsigned long *)addr, size) | ||
407 | |||
408 | /* | ||
409 | * Every architecture must define this function. It's the fastest | ||
410 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
411 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
412 | * bits is cleared. | ||
413 | */ | ||
414 | static inline int sched_find_first_bit(const unsigned long *b) | ||
415 | { | ||
416 | #ifdef CONFIG_PPC64 | ||
417 | if (unlikely(b[0])) | ||
418 | return __ffs(b[0]); | ||
419 | if (unlikely(b[1])) | ||
420 | return __ffs(b[1]) + 64; | ||
421 | return __ffs(b[2]) + 128; | ||
422 | #else | ||
423 | if (unlikely(b[0])) | ||
424 | return __ffs(b[0]); | ||
425 | if (unlikely(b[1])) | ||
426 | return __ffs(b[1]) + 32; | ||
427 | if (unlikely(b[2])) | ||
428 | return __ffs(b[2]) + 64; | ||
429 | if (b[3]) | ||
430 | return __ffs(b[3]) + 96; | ||
431 | return __ffs(b[4]) + 128; | ||
432 | #endif | ||
433 | } | ||
434 | |||
435 | #endif /* __KERNEL__ */ | ||
436 | |||
437 | #endif /* _ASM_POWERPC_BITOPS_H */ | ||
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h index e4d028e87020..d625ee55f957 100644 --- a/include/asm-powerpc/bug.h +++ b/include/asm-powerpc/bug.h | |||
@@ -12,20 +12,16 @@ | |||
12 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
13 | 13 | ||
14 | #ifdef __powerpc64__ | 14 | #ifdef __powerpc64__ |
15 | #define BUG_TABLE_ENTRY(label, line, file, func) \ | 15 | #define BUG_TABLE_ENTRY ".llong" |
16 | ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n" | 16 | #define BUG_TRAP_OP "tdnei" |
17 | #define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n" | ||
18 | #define DATA_TYPE long long | ||
19 | #else | 17 | #else |
20 | #define BUG_TABLE_ENTRY(label, line, file, func) \ | 18 | #define BUG_TABLE_ENTRY ".long" |
21 | ".long " #label ", " #line ", " #file ", " #func "\n" | 19 | #define BUG_TRAP_OP "twnei" |
22 | #define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n" | ||
23 | #define DATA_TYPE int | ||
24 | #endif /* __powerpc64__ */ | 20 | #endif /* __powerpc64__ */ |
25 | 21 | ||
26 | struct bug_entry { | 22 | struct bug_entry { |
27 | unsigned long bug_addr; | 23 | unsigned long bug_addr; |
28 | int line; | 24 | long line; |
29 | const char *file; | 25 | const char *file; |
30 | const char *function; | 26 | const char *function; |
31 | }; | 27 | }; |
@@ -43,29 +39,29 @@ struct bug_entry *find_bug(unsigned long bugaddr); | |||
43 | #define BUG() do { \ | 39 | #define BUG() do { \ |
44 | __asm__ __volatile__( \ | 40 | __asm__ __volatile__( \ |
45 | "1: twi 31,0,0\n" \ | 41 | "1: twi 31,0,0\n" \ |
46 | ".section __bug_table,\"a\"\n\t" \ | 42 | ".section __bug_table,\"a\"\n" \ |
47 | BUG_TABLE_ENTRY(1b,%0,%1,%2) \ | 43 | "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \ |
48 | ".previous" \ | 44 | ".previous" \ |
49 | : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ | 45 | : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ |
50 | } while (0) | 46 | } while (0) |
51 | 47 | ||
52 | #define BUG_ON(x) do { \ | 48 | #define BUG_ON(x) do { \ |
53 | __asm__ __volatile__( \ | 49 | __asm__ __volatile__( \ |
54 | TRAP_OP(%0,0) \ | 50 | "1: "BUG_TRAP_OP" %0,0\n" \ |
55 | ".section __bug_table,\"a\"\n\t" \ | 51 | ".section __bug_table,\"a\"\n" \ |
56 | BUG_TABLE_ENTRY(1b,%1,%2,%3) \ | 52 | "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \ |
57 | ".previous" \ | 53 | ".previous" \ |
58 | : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \ | 54 | : : "r" ((long)(x)), "i" (__LINE__), \ |
59 | "i" (__FILE__), "i" (__FUNCTION__)); \ | 55 | "i" (__FILE__), "i" (__FUNCTION__)); \ |
60 | } while (0) | 56 | } while (0) |
61 | 57 | ||
62 | #define WARN_ON(x) do { \ | 58 | #define WARN_ON(x) do { \ |
63 | __asm__ __volatile__( \ | 59 | __asm__ __volatile__( \ |
64 | TRAP_OP(%0,0) \ | 60 | "1: "BUG_TRAP_OP" %0,0\n" \ |
65 | ".section __bug_table,\"a\"\n\t" \ | 61 | ".section __bug_table,\"a\"\n" \ |
66 | BUG_TABLE_ENTRY(1b,%1,%2,%3) \ | 62 | "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \ |
67 | ".previous" \ | 63 | ".previous" \ |
68 | : : "r" ((DATA_TYPE)(x)), \ | 64 | : : "r" ((long)(x)), \ |
69 | "i" (__LINE__ + BUG_WARNING_TRAP), \ | 65 | "i" (__LINE__ + BUG_WARNING_TRAP), \ |
70 | "i" (__FILE__), "i" (__FUNCTION__)); \ | 66 | "i" (__FILE__), "i" (__FUNCTION__)); \ |
71 | } while (0) | 67 | } while (0) |
diff --git a/include/asm-ppc64/futex.h b/include/asm-powerpc/futex.h index 266b460de44e..37c94e52ab6d 100644 --- a/include/asm-ppc64/futex.h +++ b/include/asm-powerpc/futex.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_FUTEX_H | 1 | #ifndef _ASM_POWERPC_FUTEX_H |
2 | #define _ASM_FUTEX_H | 2 | #define _ASM_POWERPC_FUTEX_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
@@ -7,28 +7,29 @@ | |||
7 | #include <asm/errno.h> | 7 | #include <asm/errno.h> |
8 | #include <asm/synch.h> | 8 | #include <asm/synch.h> |
9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
10 | #include <asm/ppc_asm.h> | ||
10 | 11 | ||
11 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 12 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ |
12 | __asm__ __volatile (SYNC_ON_SMP \ | 13 | __asm__ __volatile ( \ |
13 | "1: lwarx %0,0,%2\n" \ | 14 | SYNC_ON_SMP \ |
14 | insn \ | 15 | "1: lwarx %0,0,%2\n" \ |
15 | "2: stwcx. %1,0,%2\n\ | 16 | insn \ |
16 | bne- 1b\n\ | 17 | "2: stwcx. %1,0,%2\n" \ |
17 | li %1,0\n\ | 18 | "bne- 1b\n" \ |
18 | 3: .section .fixup,\"ax\"\n\ | 19 | "li %1,0\n" \ |
19 | 4: li %1,%3\n\ | 20 | "3: .section .fixup,\"ax\"\n" \ |
20 | b 3b\n\ | 21 | "4: li %1,%3\n" \ |
21 | .previous\n\ | 22 | "b 3b\n" \ |
22 | .section __ex_table,\"a\"\n\ | 23 | ".previous\n" \ |
23 | .align 3\n\ | 24 | ".section __ex_table,\"a\"\n" \ |
24 | .llong 1b,4b,2b,4b\n\ | 25 | ".align 3\n" \ |
25 | .previous" \ | 26 | DATAL " 1b,4b,2b,4b\n" \ |
26 | : "=&r" (oldval), "=&r" (ret) \ | 27 | ".previous" \ |
27 | : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ | 28 | : "=&r" (oldval), "=&r" (ret) \ |
29 | : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ | ||
28 | : "cr0", "memory") | 30 | : "cr0", "memory") |
29 | 31 | ||
30 | static inline int | 32 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) |
31 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
32 | { | 33 | { |
33 | int op = (encoded_op >> 28) & 7; | 34 | int op = (encoded_op >> 28) & 7; |
34 | int cmp = (encoded_op >> 24) & 15; | 35 | int cmp = (encoded_op >> 24) & 15; |
@@ -79,5 +80,5 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
79 | return ret; | 80 | return ret; |
80 | } | 81 | } |
81 | 82 | ||
82 | #endif | 83 | #endif /* __KERNEL__ */ |
83 | #endif | 84 | #endif /* _ASM_POWERPC_FUTEX_H */ |
diff --git a/include/asm-powerpc/ioctls.h b/include/asm-powerpc/ioctls.h index 5b94ff489b8b..279a6229584b 100644 --- a/include/asm-powerpc/ioctls.h +++ b/include/asm-powerpc/ioctls.h | |||
@@ -62,6 +62,9 @@ | |||
62 | # define TIOCM_DSR 0x100 | 62 | # define TIOCM_DSR 0x100 |
63 | # define TIOCM_CD TIOCM_CAR | 63 | # define TIOCM_CD TIOCM_CAR |
64 | # define TIOCM_RI TIOCM_RNG | 64 | # define TIOCM_RI TIOCM_RNG |
65 | #define TIOCM_OUT1 0x2000 | ||
66 | #define TIOCM_OUT2 0x4000 | ||
67 | #define TIOCM_LOOP 0x8000 | ||
65 | 68 | ||
66 | #define TIOCGSOFTCAR 0x5419 | 69 | #define TIOCGSOFTCAR 0x5419 |
67 | #define TIOCSSOFTCAR 0x541A | 70 | #define TIOCSSOFTCAR 0x541A |
diff --git a/include/asm-powerpc/ipcbuf.h b/include/asm-powerpc/ipcbuf.h new file mode 100644 index 000000000000..71382c1ec6e3 --- /dev/null +++ b/include/asm-powerpc/ipcbuf.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef _ASM_POWERPC_IPCBUF_H | ||
2 | #define _ASM_POWERPC_IPCBUF_H | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for the powerpc is identical to | ||
6 | * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the | ||
7 | * kernel. Note extra padding because this structure is passed back | ||
8 | * and forth between kernel and user space. Pad space is left for: | ||
9 | * - 1 32-bit value to fill up for 8-byte alignment | ||
10 | * - 2 miscellaneous 64-bit values | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | |||
20 | struct ipc64_perm | ||
21 | { | ||
22 | __kernel_key_t key; | ||
23 | __kernel_uid_t uid; | ||
24 | __kernel_gid_t gid; | ||
25 | __kernel_uid_t cuid; | ||
26 | __kernel_gid_t cgid; | ||
27 | __kernel_mode_t mode; | ||
28 | unsigned int seq; | ||
29 | unsigned int __pad1; | ||
30 | __u32 __unused[4]; | ||
31 | }; | ||
32 | |||
33 | #endif /* _ASM_POWERPC_IPCBUF_H */ | ||
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index c7c3f912a3c2..b3935ea28fff 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h | |||
@@ -73,7 +73,7 @@ extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); | |||
73 | #define IC_INVALID 0 | 73 | #define IC_INVALID 0 |
74 | #define IC_OPEN_PIC 1 | 74 | #define IC_OPEN_PIC 1 |
75 | #define IC_PPC_XIC 2 | 75 | #define IC_PPC_XIC 2 |
76 | #define IC_BPA_IIC 3 | 76 | #define IC_CELL_PIC 3 |
77 | #define IC_ISERIES 4 | 77 | #define IC_ISERIES 4 |
78 | 78 | ||
79 | extern u64 ppc64_interrupt_controller; | 79 | extern u64 ppc64_interrupt_controller; |
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index f99f2af82ca5..c534ca41224b 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h | |||
@@ -506,6 +506,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) | |||
506 | #else | 506 | #else |
507 | #define __ASM_CONST(x) x##UL | 507 | #define __ASM_CONST(x) x##UL |
508 | #define ASM_CONST(x) __ASM_CONST(x) | 508 | #define ASM_CONST(x) __ASM_CONST(x) |
509 | |||
510 | #ifdef CONFIG_PPC64 | ||
511 | #define DATAL ".llong" | ||
512 | #else | ||
513 | #define DATAL ".long" | ||
514 | #endif | ||
515 | |||
509 | #endif /* __ASSEMBLY__ */ | 516 | #endif /* __ASSEMBLY__ */ |
510 | 517 | ||
511 | #endif /* _ASM_POWERPC_PPC_ASM_H */ | 518 | #endif /* _ASM_POWERPC_PPC_ASM_H */ |
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index eee954a001fd..1dc4bf7b52b3 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h | |||
@@ -70,7 +70,7 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin; | |||
70 | #define PLATFORM_LPAR 0x0001 | 70 | #define PLATFORM_LPAR 0x0001 |
71 | #define PLATFORM_POWERMAC 0x0400 | 71 | #define PLATFORM_POWERMAC 0x0400 |
72 | #define PLATFORM_MAPLE 0x0500 | 72 | #define PLATFORM_MAPLE 0x0500 |
73 | #define PLATFORM_BPA 0x1000 | 73 | #define PLATFORM_CELL 0x1000 |
74 | 74 | ||
75 | /* Compatibility with drivers coming from PPC32 world */ | 75 | /* Compatibility with drivers coming from PPC32 world */ |
76 | #define _machine (systemcfg->platform) | 76 | #define _machine (systemcfg->platform) |
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h index 2c050332471d..d9fd7866927f 100644 --- a/include/asm-powerpc/rtas.h +++ b/include/asm-powerpc/rtas.h | |||
@@ -171,6 +171,9 @@ struct flash_block_list_header { /* just the header of flash_block_list */ | |||
171 | struct flash_block_list *next; | 171 | struct flash_block_list *next; |
172 | }; | 172 | }; |
173 | extern struct flash_block_list_header rtas_firmware_flash_list; | 173 | extern struct flash_block_list_header rtas_firmware_flash_list; |
174 | void rtas_fw_restart(char *cmd); | ||
175 | void rtas_fw_power_off(void); | ||
176 | void rtas_fw_halt(void); | ||
174 | 177 | ||
175 | extern struct rtas_t rtas; | 178 | extern struct rtas_t rtas; |
176 | 179 | ||
diff --git a/include/asm-powerpc/termios.h b/include/asm-powerpc/termios.h index c5b8e5358f83..7f80a019b6a0 100644 --- a/include/asm-powerpc/termios.h +++ b/include/asm-powerpc/termios.h | |||
@@ -94,142 +94,9 @@ struct termio { | |||
94 | #define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" | 94 | #define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" |
95 | #endif | 95 | #endif |
96 | 96 | ||
97 | #define FIOCLEX _IO('f', 1) | ||
98 | #define FIONCLEX _IO('f', 2) | ||
99 | #define FIOASYNC _IOW('f', 125, int) | ||
100 | #define FIONBIO _IOW('f', 126, int) | ||
101 | #define FIONREAD _IOR('f', 127, int) | ||
102 | #define TIOCINQ FIONREAD | ||
103 | |||
104 | #define TIOCGETP _IOR('t', 8, struct sgttyb) | ||
105 | #define TIOCSETP _IOW('t', 9, struct sgttyb) | ||
106 | #define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ | ||
107 | |||
108 | #define TIOCSETC _IOW('t', 17, struct tchars) | ||
109 | #define TIOCGETC _IOR('t', 18, struct tchars) | ||
110 | #define TCGETS _IOR('t', 19, struct termios) | ||
111 | #define TCSETS _IOW('t', 20, struct termios) | ||
112 | #define TCSETSW _IOW('t', 21, struct termios) | ||
113 | #define TCSETSF _IOW('t', 22, struct termios) | ||
114 | |||
115 | #define TCGETA _IOR('t', 23, struct termio) | ||
116 | #define TCSETA _IOW('t', 24, struct termio) | ||
117 | #define TCSETAW _IOW('t', 25, struct termio) | ||
118 | #define TCSETAF _IOW('t', 28, struct termio) | ||
119 | |||
120 | #define TCSBRK _IO('t', 29) | ||
121 | #define TCXONC _IO('t', 30) | ||
122 | #define TCFLSH _IO('t', 31) | ||
123 | |||
124 | #define TIOCSWINSZ _IOW('t', 103, struct winsize) | ||
125 | #define TIOCGWINSZ _IOR('t', 104, struct winsize) | ||
126 | #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ | ||
127 | #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ | ||
128 | #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ | ||
129 | |||
130 | #define TIOCGLTC _IOR('t', 116, struct ltchars) | ||
131 | #define TIOCSLTC _IOW('t', 117, struct ltchars) | ||
132 | #define TIOCSPGRP _IOW('t', 118, int) | ||
133 | #define TIOCGPGRP _IOR('t', 119, int) | ||
134 | |||
135 | #define TIOCEXCL 0x540C | ||
136 | #define TIOCNXCL 0x540D | ||
137 | #define TIOCSCTTY 0x540E | ||
138 | |||
139 | #define TIOCSTI 0x5412 | ||
140 | #define TIOCMGET 0x5415 | ||
141 | #define TIOCMBIS 0x5416 | ||
142 | #define TIOCMBIC 0x5417 | ||
143 | #define TIOCMSET 0x5418 | ||
144 | #define TIOCGSOFTCAR 0x5419 | ||
145 | #define TIOCSSOFTCAR 0x541A | ||
146 | #define TIOCLINUX 0x541C | ||
147 | #define TIOCCONS 0x541D | ||
148 | #define TIOCGSERIAL 0x541E | ||
149 | #define TIOCSSERIAL 0x541F | ||
150 | #define TIOCPKT 0x5420 | ||
151 | |||
152 | #define TIOCNOTTY 0x5422 | ||
153 | #define TIOCSETD 0x5423 | ||
154 | #define TIOCGETD 0x5424 | ||
155 | #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ | ||
156 | |||
157 | #define TIOCSERCONFIG 0x5453 | ||
158 | #define TIOCSERGWILD 0x5454 | ||
159 | #define TIOCSERSWILD 0x5455 | ||
160 | #define TIOCGLCKTRMIOS 0x5456 | ||
161 | #define TIOCSLCKTRMIOS 0x5457 | ||
162 | #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ | ||
163 | #define TIOCSERGETLSR 0x5459 /* Get line status register */ | ||
164 | #define TIOCSERGETMULTI 0x545A /* Get multiport config */ | ||
165 | #define TIOCSERSETMULTI 0x545B /* Set multiport config */ | ||
166 | |||
167 | #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ | ||
168 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ | ||
169 | |||
170 | /* Used for packet mode */ | ||
171 | #define TIOCPKT_DATA 0 | ||
172 | #define TIOCPKT_FLUSHREAD 1 | ||
173 | #define TIOCPKT_FLUSHWRITE 2 | ||
174 | #define TIOCPKT_STOP 4 | ||
175 | #define TIOCPKT_START 8 | ||
176 | #define TIOCPKT_NOSTOP 16 | ||
177 | #define TIOCPKT_DOSTOP 32 | ||
178 | |||
179 | /* modem lines */ | ||
180 | #define TIOCM_LE 0x001 | ||
181 | #define TIOCM_DTR 0x002 | ||
182 | #define TIOCM_RTS 0x004 | ||
183 | #define TIOCM_ST 0x008 | ||
184 | #define TIOCM_SR 0x010 | ||
185 | #define TIOCM_CTS 0x020 | ||
186 | #define TIOCM_CAR 0x040 | ||
187 | #define TIOCM_RNG 0x080 | ||
188 | #define TIOCM_DSR 0x100 | ||
189 | #define TIOCM_CD TIOCM_CAR | ||
190 | #define TIOCM_RI TIOCM_RNG | ||
191 | #define TIOCM_OUT1 0x2000 | ||
192 | #define TIOCM_OUT2 0x4000 | ||
193 | #define TIOCM_LOOP 0x8000 | ||
194 | |||
195 | /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ | ||
196 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | ||
197 | |||
198 | #ifdef __KERNEL__ | 97 | #ifdef __KERNEL__ |
199 | 98 | ||
200 | /* | 99 | #include <asm-generic/termios.h> |
201 | * Translate a "termio" structure into a "termios". Ugh. | ||
202 | */ | ||
203 | #define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ | ||
204 | unsigned short __tmp; \ | ||
205 | get_user(__tmp,&(termio)->x); \ | ||
206 | (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \ | ||
207 | } | ||
208 | |||
209 | #define user_termio_to_kernel_termios(termios, termio) \ | ||
210 | ({ \ | ||
211 | SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ | ||
212 | SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ | ||
213 | SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ | ||
214 | SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ | ||
215 | copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ | ||
216 | }) | ||
217 | |||
218 | /* | ||
219 | * Translate a "termios" structure into a "termio". Ugh. | ||
220 | */ | ||
221 | #define kernel_termios_to_user_termio(termio, termios) \ | ||
222 | ({ \ | ||
223 | put_user((termios)->c_iflag, &(termio)->c_iflag); \ | ||
224 | put_user((termios)->c_oflag, &(termio)->c_oflag); \ | ||
225 | put_user((termios)->c_cflag, &(termio)->c_cflag); \ | ||
226 | put_user((termios)->c_lflag, &(termio)->c_lflag); \ | ||
227 | put_user((termios)->c_line, &(termio)->c_line); \ | ||
228 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ | ||
229 | }) | ||
230 | |||
231 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) | ||
232 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) | ||
233 | 100 | ||
234 | #endif /* __KERNEL__ */ | 101 | #endif /* __KERNEL__ */ |
235 | 102 | ||
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h new file mode 100644 index 000000000000..33af730f0d19 --- /dev/null +++ b/include/asm-powerpc/uaccess.h | |||
@@ -0,0 +1,468 @@ | |||
1 | #ifndef _ARCH_POWERPC_UACCESS_H | ||
2 | #define _ARCH_POWERPC_UACCESS_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #ifndef __ASSEMBLY__ | ||
6 | |||
7 | #include <linux/sched.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <asm/processor.h> | ||
10 | |||
11 | #define VERIFY_READ 0 | ||
12 | #define VERIFY_WRITE 1 | ||
13 | |||
14 | /* | ||
15 | * The fs value determines whether argument validity checking should be | ||
16 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
17 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
18 | * | ||
19 | * For historical reasons, these macros are grossly misnamed. | ||
20 | * | ||
21 | * The fs/ds values are now the highest legal address in the "segment". | ||
22 | * This simplifies the checking in the routines below. | ||
23 | */ | ||
24 | |||
25 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
26 | |||
27 | #define KERNEL_DS MAKE_MM_SEG(~0UL) | ||
28 | #ifdef __powerpc64__ | ||
29 | /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ | ||
30 | #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) | ||
31 | #else | ||
32 | #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) | ||
33 | #endif | ||
34 | |||
35 | #define get_ds() (KERNEL_DS) | ||
36 | #define get_fs() (current->thread.fs) | ||
37 | #define set_fs(val) (current->thread.fs = (val)) | ||
38 | |||
39 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
40 | |||
41 | #ifdef __powerpc64__ | ||
42 | /* | ||
43 | * This check is sufficient because there is a large enough | ||
44 | * gap between user addresses and the kernel addresses | ||
45 | */ | ||
46 | #define __access_ok(addr, size, segment) \ | ||
47 | (((addr) <= (segment).seg) && ((size) <= (segment).seg)) | ||
48 | |||
49 | #else | ||
50 | |||
51 | #define __access_ok(addr, size, segment) \ | ||
52 | (((addr) <= (segment).seg) && \ | ||
53 | (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr))))) | ||
54 | |||
55 | #endif | ||
56 | |||
57 | #define access_ok(type, addr, size) \ | ||
58 | (__chk_user_ptr(addr), \ | ||
59 | __access_ok((__force unsigned long)(addr), (size), get_fs())) | ||
60 | |||
61 | /* | ||
62 | * The exception table consists of pairs of addresses: the first is the | ||
63 | * address of an instruction that is allowed to fault, and the second is | ||
64 | * the address at which the program should continue. No registers are | ||
65 | * modified, so it is entirely up to the continuation code to figure out | ||
66 | * what to do. | ||
67 | * | ||
68 | * All the routines below use bits of fixup code that are out of line | ||
69 | * with the main instruction path. This means when everything is well, | ||
70 | * we don't even have to jump over them. Further, they do not intrude | ||
71 | * on our cache or tlb entries. | ||
72 | */ | ||
73 | |||
74 | struct exception_table_entry { | ||
75 | unsigned long insn; | ||
76 | unsigned long fixup; | ||
77 | }; | ||
78 | |||
79 | /* | ||
80 | * These are the main single-value transfer routines. They automatically | ||
81 | * use the right size if we just have the right pointer type. | ||
82 | * | ||
83 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | ||
84 | * and yet we don't want to do any pointers, because that is too much | ||
85 | * of a performance impact. Thus we have a few rather ugly macros here, | ||
86 | * and hide all the ugliness from the user. | ||
87 | * | ||
88 | * The "__xxx" versions of the user access functions are versions that | ||
89 | * do not verify the address space, that must have been done previously | ||
90 | * with a separate "access_ok()" call (this is used when we do multiple | ||
91 | * accesses to the same area of user memory). | ||
92 | * | ||
93 | * As we use the same address space for kernel and user data on the | ||
94 | * PowerPC, we can just do these as direct assignments. (Of course, the | ||
95 | * exception handling means that it's no longer "just"...) | ||
96 | * | ||
97 | * The "user64" versions of the user access functions are versions that | ||
98 | * allow access of 64-bit data. The "get_user" functions do not | ||
99 | * properly handle 64-bit data because the value gets down cast to a long. | ||
100 | * The "put_user" functions already handle 64-bit data properly but we add | ||
101 | * "user64" versions for completeness | ||
102 | */ | ||
103 | #define get_user(x, ptr) \ | ||
104 | __get_user_check((x), (ptr), sizeof(*(ptr))) | ||
105 | #define put_user(x, ptr) \ | ||
106 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
107 | |||
108 | #define __get_user(x, ptr) \ | ||
109 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | ||
110 | #define __put_user(x, ptr) \ | ||
111 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
112 | #ifndef __powerpc64__ | ||
113 | #define __get_user64(x, ptr) \ | ||
114 | __get_user64_nocheck((x), (ptr), sizeof(*(ptr))) | ||
115 | #define __put_user64(x, ptr) __put_user(x, ptr) | ||
116 | #endif | ||
117 | |||
118 | #define __get_user_unaligned __get_user | ||
119 | #define __put_user_unaligned __put_user | ||
120 | |||
121 | extern long __put_user_bad(void); | ||
122 | |||
123 | #ifdef __powerpc64__ | ||
124 | #define __EX_TABLE_ALIGN "3" | ||
125 | #define __EX_TABLE_TYPE "llong" | ||
126 | #else | ||
127 | #define __EX_TABLE_ALIGN "2" | ||
128 | #define __EX_TABLE_TYPE "long" | ||
129 | #endif | ||
130 | |||
131 | /* | ||
132 | * We don't tell gcc that we are accessing memory, but this is OK | ||
133 | * because we do not write to any memory gcc knows about, so there | ||
134 | * are no aliasing issues. | ||
135 | */ | ||
136 | #define __put_user_asm(x, addr, err, op) \ | ||
137 | __asm__ __volatile__( \ | ||
138 | "1: " op " %1,0(%2) # put_user\n" \ | ||
139 | "2:\n" \ | ||
140 | ".section .fixup,\"ax\"\n" \ | ||
141 | "3: li %0,%3\n" \ | ||
142 | " b 2b\n" \ | ||
143 | ".previous\n" \ | ||
144 | ".section __ex_table,\"a\"\n" \ | ||
145 | " .align " __EX_TABLE_ALIGN "\n" \ | ||
146 | " ."__EX_TABLE_TYPE" 1b,3b\n" \ | ||
147 | ".previous" \ | ||
148 | : "=r" (err) \ | ||
149 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) | ||
150 | |||
151 | #ifdef __powerpc64__ | ||
152 | #define __put_user_asm2(x, ptr, retval) \ | ||
153 | __put_user_asm(x, ptr, retval, "std") | ||
154 | #else /* __powerpc64__ */ | ||
155 | #define __put_user_asm2(x, addr, err) \ | ||
156 | __asm__ __volatile__( \ | ||
157 | "1: stw %1,0(%2)\n" \ | ||
158 | "2: stw %1+1,4(%2)\n" \ | ||
159 | "3:\n" \ | ||
160 | ".section .fixup,\"ax\"\n" \ | ||
161 | "4: li %0,%3\n" \ | ||
162 | " b 3b\n" \ | ||
163 | ".previous\n" \ | ||
164 | ".section __ex_table,\"a\"\n" \ | ||
165 | " .align " __EX_TABLE_ALIGN "\n" \ | ||
166 | " ." __EX_TABLE_TYPE " 1b,4b\n" \ | ||
167 | " ." __EX_TABLE_TYPE " 2b,4b\n" \ | ||
168 | ".previous" \ | ||
169 | : "=r" (err) \ | ||
170 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) | ||
171 | #endif /* __powerpc64__ */ | ||
172 | |||
173 | #define __put_user_size(x, ptr, size, retval) \ | ||
174 | do { \ | ||
175 | retval = 0; \ | ||
176 | switch (size) { \ | ||
177 | case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ | ||
178 | case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ | ||
179 | case 4: __put_user_asm(x, ptr, retval, "stw"); break; \ | ||
180 | case 8: __put_user_asm2(x, ptr, retval); break; \ | ||
181 | default: __put_user_bad(); \ | ||
182 | } \ | ||
183 | } while (0) | ||
184 | |||
185 | #define __put_user_nocheck(x, ptr, size) \ | ||
186 | ({ \ | ||
187 | long __pu_err; \ | ||
188 | might_sleep(); \ | ||
189 | __chk_user_ptr(ptr); \ | ||
190 | __put_user_size((x), (ptr), (size), __pu_err); \ | ||
191 | __pu_err; \ | ||
192 | }) | ||
193 | |||
194 | #define __put_user_check(x, ptr, size) \ | ||
195 | ({ \ | ||
196 | long __pu_err = -EFAULT; \ | ||
197 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
198 | might_sleep(); \ | ||
199 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ | ||
200 | __put_user_size((x), __pu_addr, (size), __pu_err); \ | ||
201 | __pu_err; \ | ||
202 | }) | ||
203 | |||
204 | extern long __get_user_bad(void); | ||
205 | |||
206 | #define __get_user_asm(x, addr, err, op) \ | ||
207 | __asm__ __volatile__( \ | ||
208 | "1: "op" %1,0(%2) # get_user\n" \ | ||
209 | "2:\n" \ | ||
210 | ".section .fixup,\"ax\"\n" \ | ||
211 | "3: li %0,%3\n" \ | ||
212 | " li %1,0\n" \ | ||
213 | " b 2b\n" \ | ||
214 | ".previous\n" \ | ||
215 | ".section __ex_table,\"a\"\n" \ | ||
216 | " .align "__EX_TABLE_ALIGN "\n" \ | ||
217 | " ." __EX_TABLE_TYPE " 1b,3b\n" \ | ||
218 | ".previous" \ | ||
219 | : "=r" (err), "=r" (x) \ | ||
220 | : "b" (addr), "i" (-EFAULT), "0" (err)) | ||
221 | |||
222 | #ifdef __powerpc64__ | ||
223 | #define __get_user_asm2(x, addr, err) \ | ||
224 | __get_user_asm(x, addr, err, "ld") | ||
225 | #else /* __powerpc64__ */ | ||
226 | #define __get_user_asm2(x, addr, err) \ | ||
227 | __asm__ __volatile__( \ | ||
228 | "1: lwz %1,0(%2)\n" \ | ||
229 | "2: lwz %1+1,4(%2)\n" \ | ||
230 | "3:\n" \ | ||
231 | ".section .fixup,\"ax\"\n" \ | ||
232 | "4: li %0,%3\n" \ | ||
233 | " li %1,0\n" \ | ||
234 | " li %1+1,0\n" \ | ||
235 | " b 3b\n" \ | ||
236 | ".previous\n" \ | ||
237 | ".section __ex_table,\"a\"\n" \ | ||
238 | " .align " __EX_TABLE_ALIGN "\n" \ | ||
239 | " ." __EX_TABLE_TYPE " 1b,4b\n" \ | ||
240 | " ." __EX_TABLE_TYPE " 2b,4b\n" \ | ||
241 | ".previous" \ | ||
242 | : "=r" (err), "=&r" (x) \ | ||
243 | : "b" (addr), "i" (-EFAULT), "0" (err)) | ||
244 | #endif /* __powerpc64__ */ | ||
245 | |||
246 | #define __get_user_size(x, ptr, size, retval) \ | ||
247 | do { \ | ||
248 | retval = 0; \ | ||
249 | __chk_user_ptr(ptr); \ | ||
250 | if (size > sizeof(x)) \ | ||
251 | (x) = __get_user_bad(); \ | ||
252 | switch (size) { \ | ||
253 | case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ | ||
254 | case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ | ||
255 | case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \ | ||
256 | case 8: __get_user_asm2(x, ptr, retval); break; \ | ||
257 | default: (x) = __get_user_bad(); \ | ||
258 | } \ | ||
259 | } while (0) | ||
260 | |||
261 | #define __get_user_nocheck(x, ptr, size) \ | ||
262 | ({ \ | ||
263 | long __gu_err; \ | ||
264 | unsigned long __gu_val; \ | ||
265 | __chk_user_ptr(ptr); \ | ||
266 | might_sleep(); \ | ||
267 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
268 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
269 | __gu_err; \ | ||
270 | }) | ||
271 | |||
272 | #ifndef __powerpc64__ | ||
273 | #define __get_user64_nocheck(x, ptr, size) \ | ||
274 | ({ \ | ||
275 | long __gu_err; \ | ||
276 | long long __gu_val; \ | ||
277 | __chk_user_ptr(ptr); \ | ||
278 | might_sleep(); \ | ||
279 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
280 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
281 | __gu_err; \ | ||
282 | }) | ||
283 | #endif /* __powerpc64__ */ | ||
284 | |||
285 | #define __get_user_check(x, ptr, size) \ | ||
286 | ({ \ | ||
287 | long __gu_err = -EFAULT; \ | ||
288 | unsigned long __gu_val = 0; \ | ||
289 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
290 | might_sleep(); \ | ||
291 | if (access_ok(VERIFY_READ, __gu_addr, (size))) \ | ||
292 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | ||
293 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
294 | __gu_err; \ | ||
295 | }) | ||
296 | |||
297 | /* more complex routines */ | ||
298 | |||
299 | extern unsigned long __copy_tofrom_user(void __user *to, | ||
300 | const void __user *from, unsigned long size); | ||
301 | |||
302 | #ifndef __powerpc64__ | ||
303 | |||
304 | extern inline unsigned long copy_from_user(void *to, | ||
305 | const void __user *from, unsigned long n) | ||
306 | { | ||
307 | unsigned long over; | ||
308 | |||
309 | if (access_ok(VERIFY_READ, from, n)) | ||
310 | return __copy_tofrom_user((__force void __user *)to, from, n); | ||
311 | if ((unsigned long)from < TASK_SIZE) { | ||
312 | over = (unsigned long)from + n - TASK_SIZE; | ||
313 | return __copy_tofrom_user((__force void __user *)to, from, | ||
314 | n - over) + over; | ||
315 | } | ||
316 | return n; | ||
317 | } | ||
318 | |||
319 | extern inline unsigned long copy_to_user(void __user *to, | ||
320 | const void *from, unsigned long n) | ||
321 | { | ||
322 | unsigned long over; | ||
323 | |||
324 | if (access_ok(VERIFY_WRITE, to, n)) | ||
325 | return __copy_tofrom_user(to, (__force void __user *)from, n); | ||
326 | if ((unsigned long)to < TASK_SIZE) { | ||
327 | over = (unsigned long)to + n - TASK_SIZE; | ||
328 | return __copy_tofrom_user(to, (__force void __user *)from, | ||
329 | n - over) + over; | ||
330 | } | ||
331 | return n; | ||
332 | } | ||
333 | |||
334 | #else /* __powerpc64__ */ | ||
335 | |||
336 | #define __copy_in_user(to, from, size) \ | ||
337 | __copy_tofrom_user((to), (from), (size)) | ||
338 | |||
339 | extern unsigned long copy_from_user(void *to, const void __user *from, | ||
340 | unsigned long n); | ||
341 | extern unsigned long copy_to_user(void __user *to, const void *from, | ||
342 | unsigned long n); | ||
343 | extern unsigned long copy_in_user(void __user *to, const void __user *from, | ||
344 | unsigned long n); | ||
345 | |||
346 | #endif /* __powerpc64__ */ | ||
347 | |||
348 | static inline unsigned long __copy_from_user_inatomic(void *to, | ||
349 | const void __user *from, unsigned long n) | ||
350 | { | ||
351 | if (__builtin_constant_p(n) && (n <= 8)) { | ||
352 | unsigned long ret; | ||
353 | |||
354 | switch (n) { | ||
355 | case 1: | ||
356 | __get_user_size(*(u8 *)to, from, 1, ret); | ||
357 | break; | ||
358 | case 2: | ||
359 | __get_user_size(*(u16 *)to, from, 2, ret); | ||
360 | break; | ||
361 | case 4: | ||
362 | __get_user_size(*(u32 *)to, from, 4, ret); | ||
363 | break; | ||
364 | case 8: | ||
365 | __get_user_size(*(u64 *)to, from, 8, ret); | ||
366 | break; | ||
367 | } | ||
368 | if (ret == 0) | ||
369 | return 0; | ||
370 | } | ||
371 | return __copy_tofrom_user((__force void __user *)to, from, n); | ||
372 | } | ||
373 | |||
374 | static inline unsigned long __copy_to_user_inatomic(void __user *to, | ||
375 | const void *from, unsigned long n) | ||
376 | { | ||
377 | if (__builtin_constant_p(n) && (n <= 8)) { | ||
378 | unsigned long ret; | ||
379 | |||
380 | switch (n) { | ||
381 | case 1: | ||
382 | __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); | ||
383 | break; | ||
384 | case 2: | ||
385 | __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); | ||
386 | break; | ||
387 | case 4: | ||
388 | __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); | ||
389 | break; | ||
390 | case 8: | ||
391 | __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); | ||
392 | break; | ||
393 | } | ||
394 | if (ret == 0) | ||
395 | return 0; | ||
396 | } | ||
397 | return __copy_tofrom_user(to, (__force const void __user *)from, n); | ||
398 | } | ||
399 | |||
400 | static inline unsigned long __copy_from_user(void *to, | ||
401 | const void __user *from, unsigned long size) | ||
402 | { | ||
403 | might_sleep(); | ||
404 | return __copy_from_user_inatomic(to, from, size); | ||
405 | } | ||
406 | |||
407 | static inline unsigned long __copy_to_user(void __user *to, | ||
408 | const void *from, unsigned long size) | ||
409 | { | ||
410 | might_sleep(); | ||
411 | return __copy_to_user_inatomic(to, from, size); | ||
412 | } | ||
413 | |||
414 | extern unsigned long __clear_user(void __user *addr, unsigned long size); | ||
415 | |||
416 | static inline unsigned long clear_user(void __user *addr, unsigned long size) | ||
417 | { | ||
418 | might_sleep(); | ||
419 | if (likely(access_ok(VERIFY_WRITE, addr, size))) | ||
420 | return __clear_user(addr, size); | ||
421 | if ((unsigned long)addr < TASK_SIZE) { | ||
422 | unsigned long over = (unsigned long)addr + size - TASK_SIZE; | ||
423 | return __clear_user(addr, size - over) + over; | ||
424 | } | ||
425 | return size; | ||
426 | } | ||
427 | |||
428 | extern int __strncpy_from_user(char *dst, const char __user *src, long count); | ||
429 | |||
430 | static inline long strncpy_from_user(char *dst, const char __user *src, | ||
431 | long count) | ||
432 | { | ||
433 | might_sleep(); | ||
434 | if (likely(access_ok(VERIFY_READ, src, 1))) | ||
435 | return __strncpy_from_user(dst, src, count); | ||
436 | return -EFAULT; | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Return the size of a string (including the ending 0) | ||
441 | * | ||
442 | * Return 0 for error | ||
443 | */ | ||
444 | extern int __strnlen_user(const char __user *str, long len, unsigned long top); | ||
445 | |||
446 | /* | ||
447 | * Returns the length of the string at str (including the null byte), | ||
448 | * or 0 if we hit a page we can't access, | ||
449 | * or something > len if we didn't find a null byte. | ||
450 | * | ||
451 | * The `top' parameter to __strnlen_user is to make sure that | ||
452 | * we can never overflow from the user area into kernel space. | ||
453 | */ | ||
454 | static inline int strnlen_user(const char __user *str, long len) | ||
455 | { | ||
456 | unsigned long top = current->thread.fs.seg; | ||
457 | |||
458 | if ((unsigned long)str > top) | ||
459 | return 0; | ||
460 | return __strnlen_user(str, len, top); | ||
461 | } | ||
462 | |||
463 | #define strlen_user(str) strnlen_user((str), 0x7ffffffe) | ||
464 | |||
465 | #endif /* __ASSEMBLY__ */ | ||
466 | #endif /* __KERNEL__ */ | ||
467 | |||
468 | #endif /* _ARCH_POWERPC_UACCESS_H */ | ||
diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h deleted file mode 100644 index e30f536fd830..000000000000 --- a/include/asm-ppc/bitops.h +++ /dev/null | |||
@@ -1,460 +0,0 @@ | |||
1 | /* | ||
2 | * bitops.h: Bit string operations on the ppc | ||
3 | */ | ||
4 | |||
5 | #ifdef __KERNEL__ | ||
6 | #ifndef _PPC_BITOPS_H | ||
7 | #define _PPC_BITOPS_H | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/compiler.h> | ||
11 | #include <asm/byteorder.h> | ||
12 | #include <asm/atomic.h> | ||
13 | |||
14 | /* | ||
15 | * The test_and_*_bit operations are taken to imply a memory barrier | ||
16 | * on SMP systems. | ||
17 | */ | ||
18 | #ifdef CONFIG_SMP | ||
19 | #define SMP_WMB "eieio\n" | ||
20 | #define SMP_MB "\nsync" | ||
21 | #else | ||
22 | #define SMP_WMB | ||
23 | #define SMP_MB | ||
24 | #endif /* CONFIG_SMP */ | ||
25 | |||
26 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | ||
27 | { | ||
28 | unsigned long old; | ||
29 | unsigned long mask = 1 << (nr & 0x1f); | ||
30 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
31 | |||
32 | __asm__ __volatile__("\n\ | ||
33 | 1: lwarx %0,0,%3 \n\ | ||
34 | or %0,%0,%2 \n" | ||
35 | PPC405_ERR77(0,%3) | ||
36 | " stwcx. %0,0,%3 \n\ | ||
37 | bne- 1b" | ||
38 | : "=&r" (old), "=m" (*p) | ||
39 | : "r" (mask), "r" (p), "m" (*p) | ||
40 | : "cc" ); | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * non-atomic version | ||
45 | */ | ||
46 | static __inline__ void __set_bit(int nr, volatile unsigned long *addr) | ||
47 | { | ||
48 | unsigned long mask = 1 << (nr & 0x1f); | ||
49 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
50 | |||
51 | *p |= mask; | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * clear_bit doesn't imply a memory barrier | ||
56 | */ | ||
57 | #define smp_mb__before_clear_bit() smp_mb() | ||
58 | #define smp_mb__after_clear_bit() smp_mb() | ||
59 | |||
60 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | ||
61 | { | ||
62 | unsigned long old; | ||
63 | unsigned long mask = 1 << (nr & 0x1f); | ||
64 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
65 | |||
66 | __asm__ __volatile__("\n\ | ||
67 | 1: lwarx %0,0,%3 \n\ | ||
68 | andc %0,%0,%2 \n" | ||
69 | PPC405_ERR77(0,%3) | ||
70 | " stwcx. %0,0,%3 \n\ | ||
71 | bne- 1b" | ||
72 | : "=&r" (old), "=m" (*p) | ||
73 | : "r" (mask), "r" (p), "m" (*p) | ||
74 | : "cc"); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * non-atomic version | ||
79 | */ | ||
80 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) | ||
81 | { | ||
82 | unsigned long mask = 1 << (nr & 0x1f); | ||
83 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
84 | |||
85 | *p &= ~mask; | ||
86 | } | ||
87 | |||
88 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | ||
89 | { | ||
90 | unsigned long old; | ||
91 | unsigned long mask = 1 << (nr & 0x1f); | ||
92 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
93 | |||
94 | __asm__ __volatile__("\n\ | ||
95 | 1: lwarx %0,0,%3 \n\ | ||
96 | xor %0,%0,%2 \n" | ||
97 | PPC405_ERR77(0,%3) | ||
98 | " stwcx. %0,0,%3 \n\ | ||
99 | bne- 1b" | ||
100 | : "=&r" (old), "=m" (*p) | ||
101 | : "r" (mask), "r" (p), "m" (*p) | ||
102 | : "cc"); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * non-atomic version | ||
107 | */ | ||
108 | static __inline__ void __change_bit(int nr, volatile unsigned long *addr) | ||
109 | { | ||
110 | unsigned long mask = 1 << (nr & 0x1f); | ||
111 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
112 | |||
113 | *p ^= mask; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * test_and_*_bit do imply a memory barrier (?) | ||
118 | */ | ||
119 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long *addr) | ||
120 | { | ||
121 | unsigned int old, t; | ||
122 | unsigned int mask = 1 << (nr & 0x1f); | ||
123 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
124 | |||
125 | __asm__ __volatile__(SMP_WMB "\n\ | ||
126 | 1: lwarx %0,0,%4 \n\ | ||
127 | or %1,%0,%3 \n" | ||
128 | PPC405_ERR77(0,%4) | ||
129 | " stwcx. %1,0,%4 \n\ | ||
130 | bne 1b" | ||
131 | SMP_MB | ||
132 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
133 | : "r" (mask), "r" (p), "m" (*p) | ||
134 | : "cc", "memory"); | ||
135 | |||
136 | return (old & mask) != 0; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * non-atomic version | ||
141 | */ | ||
142 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
143 | { | ||
144 | unsigned long mask = 1 << (nr & 0x1f); | ||
145 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
146 | unsigned long old = *p; | ||
147 | |||
148 | *p = old | mask; | ||
149 | return (old & mask) != 0; | ||
150 | } | ||
151 | |||
152 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
153 | { | ||
154 | unsigned int old, t; | ||
155 | unsigned int mask = 1 << (nr & 0x1f); | ||
156 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
157 | |||
158 | __asm__ __volatile__(SMP_WMB "\n\ | ||
159 | 1: lwarx %0,0,%4 \n\ | ||
160 | andc %1,%0,%3 \n" | ||
161 | PPC405_ERR77(0,%4) | ||
162 | " stwcx. %1,0,%4 \n\ | ||
163 | bne 1b" | ||
164 | SMP_MB | ||
165 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
166 | : "r" (mask), "r" (p), "m" (*p) | ||
167 | : "cc", "memory"); | ||
168 | |||
169 | return (old & mask) != 0; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * non-atomic version | ||
174 | */ | ||
175 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
176 | { | ||
177 | unsigned long mask = 1 << (nr & 0x1f); | ||
178 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
179 | unsigned long old = *p; | ||
180 | |||
181 | *p = old & ~mask; | ||
182 | return (old & mask) != 0; | ||
183 | } | ||
184 | |||
185 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) | ||
186 | { | ||
187 | unsigned int old, t; | ||
188 | unsigned int mask = 1 << (nr & 0x1f); | ||
189 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
190 | |||
191 | __asm__ __volatile__(SMP_WMB "\n\ | ||
192 | 1: lwarx %0,0,%4 \n\ | ||
193 | xor %1,%0,%3 \n" | ||
194 | PPC405_ERR77(0,%4) | ||
195 | " stwcx. %1,0,%4 \n\ | ||
196 | bne 1b" | ||
197 | SMP_MB | ||
198 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
199 | : "r" (mask), "r" (p), "m" (*p) | ||
200 | : "cc", "memory"); | ||
201 | |||
202 | return (old & mask) != 0; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * non-atomic version | ||
207 | */ | ||
208 | static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
209 | { | ||
210 | unsigned long mask = 1 << (nr & 0x1f); | ||
211 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
212 | unsigned long old = *p; | ||
213 | |||
214 | *p = old ^ mask; | ||
215 | return (old & mask) != 0; | ||
216 | } | ||
217 | |||
218 | static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) | ||
219 | { | ||
220 | return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0; | ||
221 | } | ||
222 | |||
223 | /* Return the bit position of the most significant 1 bit in a word */ | ||
224 | static __inline__ int __ilog2(unsigned long x) | ||
225 | { | ||
226 | int lz; | ||
227 | |||
228 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | ||
229 | return 31 - lz; | ||
230 | } | ||
231 | |||
232 | static __inline__ int ffz(unsigned long x) | ||
233 | { | ||
234 | if ((x = ~x) == 0) | ||
235 | return 32; | ||
236 | return __ilog2(x & -x); | ||
237 | } | ||
238 | |||
239 | static inline int __ffs(unsigned long x) | ||
240 | { | ||
241 | return __ilog2(x & -x); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * ffs: find first bit set. This is defined the same way as | ||
246 | * the libc and compiler builtin ffs routines, therefore | ||
247 | * differs in spirit from the above ffz (man ffs). | ||
248 | */ | ||
249 | static __inline__ int ffs(int x) | ||
250 | { | ||
251 | return __ilog2(x & -x) + 1; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * fls: find last (most-significant) bit set. | ||
256 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
257 | */ | ||
258 | static __inline__ int fls(unsigned int x) | ||
259 | { | ||
260 | int lz; | ||
261 | |||
262 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | ||
263 | return 32 - lz; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * hweightN: returns the hamming weight (i.e. the number | ||
268 | * of bits set) of a N-bit word | ||
269 | */ | ||
270 | |||
271 | #define hweight32(x) generic_hweight32(x) | ||
272 | #define hweight16(x) generic_hweight16(x) | ||
273 | #define hweight8(x) generic_hweight8(x) | ||
274 | |||
275 | /* | ||
276 | * Find the first bit set in a 140-bit bitmap. | ||
277 | * The first 100 bits are unlikely to be set. | ||
278 | */ | ||
279 | static inline int sched_find_first_bit(const unsigned long *b) | ||
280 | { | ||
281 | if (unlikely(b[0])) | ||
282 | return __ffs(b[0]); | ||
283 | if (unlikely(b[1])) | ||
284 | return __ffs(b[1]) + 32; | ||
285 | if (unlikely(b[2])) | ||
286 | return __ffs(b[2]) + 64; | ||
287 | if (b[3]) | ||
288 | return __ffs(b[3]) + 96; | ||
289 | return __ffs(b[4]) + 128; | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * find_next_bit - find the next set bit in a memory region | ||
294 | * @addr: The address to base the search on | ||
295 | * @offset: The bitnumber to start searching at | ||
296 | * @size: The maximum size to search | ||
297 | */ | ||
298 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
299 | unsigned long size, unsigned long offset) | ||
300 | { | ||
301 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
302 | unsigned int result = offset & ~31UL; | ||
303 | unsigned int tmp; | ||
304 | |||
305 | if (offset >= size) | ||
306 | return size; | ||
307 | size -= result; | ||
308 | offset &= 31UL; | ||
309 | if (offset) { | ||
310 | tmp = *p++; | ||
311 | tmp &= ~0UL << offset; | ||
312 | if (size < 32) | ||
313 | goto found_first; | ||
314 | if (tmp) | ||
315 | goto found_middle; | ||
316 | size -= 32; | ||
317 | result += 32; | ||
318 | } | ||
319 | while (size >= 32) { | ||
320 | if ((tmp = *p++) != 0) | ||
321 | goto found_middle; | ||
322 | result += 32; | ||
323 | size -= 32; | ||
324 | } | ||
325 | if (!size) | ||
326 | return result; | ||
327 | tmp = *p; | ||
328 | |||
329 | found_first: | ||
330 | tmp &= ~0UL >> (32 - size); | ||
331 | if (tmp == 0UL) /* Are any bits set? */ | ||
332 | return result + size; /* Nope. */ | ||
333 | found_middle: | ||
334 | return result + __ffs(tmp); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * find_first_bit - find the first set bit in a memory region | ||
339 | * @addr: The address to start the search at | ||
340 | * @size: The maximum size to search | ||
341 | * | ||
342 | * Returns the bit-number of the first set bit, not the number of the byte | ||
343 | * containing a bit. | ||
344 | */ | ||
345 | #define find_first_bit(addr, size) \ | ||
346 | find_next_bit((addr), (size), 0) | ||
347 | |||
348 | /* | ||
349 | * This implementation of find_{first,next}_zero_bit was stolen from | ||
350 | * Linus' asm-alpha/bitops.h. | ||
351 | */ | ||
352 | #define find_first_zero_bit(addr, size) \ | ||
353 | find_next_zero_bit((addr), (size), 0) | ||
354 | |||
355 | static __inline__ unsigned long find_next_zero_bit(const unsigned long *addr, | ||
356 | unsigned long size, unsigned long offset) | ||
357 | { | ||
358 | unsigned int * p = ((unsigned int *) addr) + (offset >> 5); | ||
359 | unsigned int result = offset & ~31UL; | ||
360 | unsigned int tmp; | ||
361 | |||
362 | if (offset >= size) | ||
363 | return size; | ||
364 | size -= result; | ||
365 | offset &= 31UL; | ||
366 | if (offset) { | ||
367 | tmp = *p++; | ||
368 | tmp |= ~0UL >> (32-offset); | ||
369 | if (size < 32) | ||
370 | goto found_first; | ||
371 | if (tmp != ~0U) | ||
372 | goto found_middle; | ||
373 | size -= 32; | ||
374 | result += 32; | ||
375 | } | ||
376 | while (size >= 32) { | ||
377 | if ((tmp = *p++) != ~0U) | ||
378 | goto found_middle; | ||
379 | result += 32; | ||
380 | size -= 32; | ||
381 | } | ||
382 | if (!size) | ||
383 | return result; | ||
384 | tmp = *p; | ||
385 | found_first: | ||
386 | tmp |= ~0UL << size; | ||
387 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
388 | return result + size; /* Nope. */ | ||
389 | found_middle: | ||
390 | return result + ffz(tmp); | ||
391 | } | ||
392 | |||
393 | |||
394 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
395 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
396 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
397 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
398 | |||
399 | static __inline__ int ext2_test_bit(int nr, __const__ void * addr) | ||
400 | { | ||
401 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
402 | |||
403 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * This implementation of ext2_find_{first,next}_zero_bit was stolen from | ||
408 | * Linus' asm-alpha/bitops.h and modified for a big-endian machine. | ||
409 | */ | ||
410 | |||
411 | #define ext2_find_first_zero_bit(addr, size) \ | ||
412 | ext2_find_next_zero_bit((addr), (size), 0) | ||
413 | |||
414 | static __inline__ unsigned long ext2_find_next_zero_bit(const void *addr, | ||
415 | unsigned long size, unsigned long offset) | ||
416 | { | ||
417 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
418 | unsigned int result = offset & ~31UL; | ||
419 | unsigned int tmp; | ||
420 | |||
421 | if (offset >= size) | ||
422 | return size; | ||
423 | size -= result; | ||
424 | offset &= 31UL; | ||
425 | if (offset) { | ||
426 | tmp = cpu_to_le32p(p++); | ||
427 | tmp |= ~0UL >> (32-offset); | ||
428 | if (size < 32) | ||
429 | goto found_first; | ||
430 | if (tmp != ~0U) | ||
431 | goto found_middle; | ||
432 | size -= 32; | ||
433 | result += 32; | ||
434 | } | ||
435 | while (size >= 32) { | ||
436 | if ((tmp = cpu_to_le32p(p++)) != ~0U) | ||
437 | goto found_middle; | ||
438 | result += 32; | ||
439 | size -= 32; | ||
440 | } | ||
441 | if (!size) | ||
442 | return result; | ||
443 | tmp = cpu_to_le32p(p); | ||
444 | found_first: | ||
445 | tmp |= ~0U << size; | ||
446 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
447 | return result + size; /* Nope. */ | ||
448 | found_middle: | ||
449 | return result + ffz(tmp); | ||
450 | } | ||
451 | |||
452 | /* Bitmap functions for the minix filesystem. */ | ||
453 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
454 | #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) | ||
455 | #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | ||
456 | #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) | ||
457 | #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) | ||
458 | |||
459 | #endif /* _PPC_BITOPS_H */ | ||
460 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-ppc/futex.h b/include/asm-ppc/futex.h deleted file mode 100644 index 9feff4ce1424..000000000000 --- a/include/asm-ppc/futex.h +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | #ifndef _ASM_FUTEX_H | ||
2 | #define _ASM_FUTEX_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/futex.h> | ||
7 | #include <asm/errno.h> | ||
8 | #include <asm/uaccess.h> | ||
9 | |||
10 | static inline int | ||
11 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
12 | { | ||
13 | int op = (encoded_op >> 28) & 7; | ||
14 | int cmp = (encoded_op >> 24) & 15; | ||
15 | int oparg = (encoded_op << 8) >> 20; | ||
16 | int cmparg = (encoded_op << 20) >> 20; | ||
17 | int oldval = 0, ret; | ||
18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
19 | oparg = 1 << oparg; | ||
20 | |||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | ||
22 | return -EFAULT; | ||
23 | |||
24 | inc_preempt_count(); | ||
25 | |||
26 | switch (op) { | ||
27 | case FUTEX_OP_SET: | ||
28 | case FUTEX_OP_ADD: | ||
29 | case FUTEX_OP_OR: | ||
30 | case FUTEX_OP_ANDN: | ||
31 | case FUTEX_OP_XOR: | ||
32 | default: | ||
33 | ret = -ENOSYS; | ||
34 | } | ||
35 | |||
36 | dec_preempt_count(); | ||
37 | |||
38 | if (!ret) { | ||
39 | switch (cmp) { | ||
40 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
41 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
42 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
43 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
44 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
45 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
46 | default: ret = -ENOSYS; | ||
47 | } | ||
48 | } | ||
49 | return ret; | ||
50 | } | ||
51 | |||
52 | #endif | ||
53 | #endif | ||
diff --git a/include/asm-ppc/ipcbuf.h b/include/asm-ppc/ipcbuf.h deleted file mode 100644 index fab6752c7480..000000000000 --- a/include/asm-ppc/ipcbuf.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | #ifndef __PPC_IPCBUF_H__ | ||
2 | #define __PPC_IPCBUF_H__ | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for PPC architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 1 32-bit value to fill up for 8-byte alignment | ||
11 | * - 2 miscellaneous 64-bit values (so that this structure matches | ||
12 | * PPC64 ipc64_perm) | ||
13 | */ | ||
14 | |||
15 | struct ipc64_perm | ||
16 | { | ||
17 | __kernel_key_t key; | ||
18 | __kernel_uid_t uid; | ||
19 | __kernel_gid_t gid; | ||
20 | __kernel_uid_t cuid; | ||
21 | __kernel_gid_t cgid; | ||
22 | __kernel_mode_t mode; | ||
23 | unsigned long seq; | ||
24 | unsigned int __pad2; | ||
25 | unsigned long long __unused1; | ||
26 | unsigned long long __unused2; | ||
27 | }; | ||
28 | |||
29 | #endif /* __PPC_IPCBUF_H__ */ | ||
diff --git a/include/asm-ppc/uaccess.h b/include/asm-ppc/uaccess.h deleted file mode 100644 index 63f56224da8c..000000000000 --- a/include/asm-ppc/uaccess.h +++ /dev/null | |||
@@ -1,393 +0,0 @@ | |||
1 | #ifdef __KERNEL__ | ||
2 | #ifndef _PPC_UACCESS_H | ||
3 | #define _PPC_UACCESS_H | ||
4 | |||
5 | #ifndef __ASSEMBLY__ | ||
6 | #include <linux/sched.h> | ||
7 | #include <linux/errno.h> | ||
8 | #include <asm/processor.h> | ||
9 | |||
10 | #define VERIFY_READ 0 | ||
11 | #define VERIFY_WRITE 1 | ||
12 | |||
13 | /* | ||
14 | * The fs value determines whether argument validity checking should be | ||
15 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
16 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
17 | * | ||
18 | * For historical reasons, these macros are grossly misnamed. | ||
19 | * | ||
20 | * The fs/ds values are now the highest legal address in the "segment". | ||
21 | * This simplifies the checking in the routines below. | ||
22 | */ | ||
23 | |||
24 | #define KERNEL_DS ((mm_segment_t) { ~0UL }) | ||
25 | #define USER_DS ((mm_segment_t) { TASK_SIZE - 1 }) | ||
26 | |||
27 | #define get_ds() (KERNEL_DS) | ||
28 | #define get_fs() (current->thread.fs) | ||
29 | #define set_fs(val) (current->thread.fs = (val)) | ||
30 | |||
31 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
32 | |||
33 | #define __access_ok(addr,size) \ | ||
34 | ((addr) <= current->thread.fs.seg \ | ||
35 | && ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr))) | ||
36 | |||
37 | #define access_ok(type, addr, size) \ | ||
38 | (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size))) | ||
39 | |||
40 | /* | ||
41 | * The exception table consists of pairs of addresses: the first is the | ||
42 | * address of an instruction that is allowed to fault, and the second is | ||
43 | * the address at which the program should continue. No registers are | ||
44 | * modified, so it is entirely up to the continuation code to figure out | ||
45 | * what to do. | ||
46 | * | ||
47 | * All the routines below use bits of fixup code that are out of line | ||
48 | * with the main instruction path. This means when everything is well, | ||
49 | * we don't even have to jump over them. Further, they do not intrude | ||
50 | * on our cache or tlb entries. | ||
51 | */ | ||
52 | |||
53 | struct exception_table_entry | ||
54 | { | ||
55 | unsigned long insn, fixup; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * These are the main single-value transfer routines. They automatically | ||
60 | * use the right size if we just have the right pointer type. | ||
61 | * | ||
62 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | ||
63 | * and yet we don't want to do any pointers, because that is too much | ||
64 | * of a performance impact. Thus we have a few rather ugly macros here, | ||
65 | * and hide all the ugliness from the user. | ||
66 | * | ||
67 | * The "__xxx" versions of the user access functions are versions that | ||
68 | * do not verify the address space, that must have been done previously | ||
69 | * with a separate "access_ok()" call (this is used when we do multiple | ||
70 | * accesses to the same area of user memory). | ||
71 | * | ||
72 | * As we use the same address space for kernel and user data on the | ||
73 | * PowerPC, we can just do these as direct assignments. (Of course, the | ||
74 | * exception handling means that it's no longer "just"...) | ||
75 | * | ||
76 | * The "user64" versions of the user access functions are versions that | ||
77 | * allow access of 64-bit data. The "get_user" functions do not | ||
78 | * properly handle 64-bit data because the value gets down cast to a long. | ||
79 | * The "put_user" functions already handle 64-bit data properly but we add | ||
80 | * "user64" versions for completeness | ||
81 | */ | ||
82 | #define get_user(x,ptr) \ | ||
83 | __get_user_check((x),(ptr),sizeof(*(ptr))) | ||
84 | #define get_user64(x,ptr) \ | ||
85 | __get_user64_check((x),(ptr),sizeof(*(ptr))) | ||
86 | #define put_user(x,ptr) \ | ||
87 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
88 | #define put_user64(x,ptr) put_user(x,ptr) | ||
89 | |||
90 | #define __get_user(x,ptr) \ | ||
91 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | ||
92 | #define __get_user64(x,ptr) \ | ||
93 | __get_user64_nocheck((x),(ptr),sizeof(*(ptr))) | ||
94 | #define __put_user(x,ptr) \ | ||
95 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
96 | #define __put_user64(x,ptr) __put_user(x,ptr) | ||
97 | |||
98 | extern long __put_user_bad(void); | ||
99 | |||
100 | #define __put_user_nocheck(x,ptr,size) \ | ||
101 | ({ \ | ||
102 | long __pu_err; \ | ||
103 | __chk_user_ptr(ptr); \ | ||
104 | __put_user_size((x),(ptr),(size),__pu_err); \ | ||
105 | __pu_err; \ | ||
106 | }) | ||
107 | |||
108 | #define __put_user_check(x,ptr,size) \ | ||
109 | ({ \ | ||
110 | long __pu_err = -EFAULT; \ | ||
111 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
112 | if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ | ||
113 | __put_user_size((x),__pu_addr,(size),__pu_err); \ | ||
114 | __pu_err; \ | ||
115 | }) | ||
116 | |||
117 | #define __put_user_size(x,ptr,size,retval) \ | ||
118 | do { \ | ||
119 | retval = 0; \ | ||
120 | switch (size) { \ | ||
121 | case 1: \ | ||
122 | __put_user_asm(x, ptr, retval, "stb"); \ | ||
123 | break; \ | ||
124 | case 2: \ | ||
125 | __put_user_asm(x, ptr, retval, "sth"); \ | ||
126 | break; \ | ||
127 | case 4: \ | ||
128 | __put_user_asm(x, ptr, retval, "stw"); \ | ||
129 | break; \ | ||
130 | case 8: \ | ||
131 | __put_user_asm2(x, ptr, retval); \ | ||
132 | break; \ | ||
133 | default: \ | ||
134 | __put_user_bad(); \ | ||
135 | } \ | ||
136 | } while (0) | ||
137 | |||
138 | /* | ||
139 | * We don't tell gcc that we are accessing memory, but this is OK | ||
140 | * because we do not write to any memory gcc knows about, so there | ||
141 | * are no aliasing issues. | ||
142 | */ | ||
143 | #define __put_user_asm(x, addr, err, op) \ | ||
144 | __asm__ __volatile__( \ | ||
145 | "1: "op" %1,0(%2)\n" \ | ||
146 | "2:\n" \ | ||
147 | ".section .fixup,\"ax\"\n" \ | ||
148 | "3: li %0,%3\n" \ | ||
149 | " b 2b\n" \ | ||
150 | ".previous\n" \ | ||
151 | ".section __ex_table,\"a\"\n" \ | ||
152 | " .align 2\n" \ | ||
153 | " .long 1b,3b\n" \ | ||
154 | ".previous" \ | ||
155 | : "=r" (err) \ | ||
156 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) | ||
157 | |||
158 | #define __put_user_asm2(x, addr, err) \ | ||
159 | __asm__ __volatile__( \ | ||
160 | "1: stw %1,0(%2)\n" \ | ||
161 | "2: stw %1+1,4(%2)\n" \ | ||
162 | "3:\n" \ | ||
163 | ".section .fixup,\"ax\"\n" \ | ||
164 | "4: li %0,%3\n" \ | ||
165 | " b 3b\n" \ | ||
166 | ".previous\n" \ | ||
167 | ".section __ex_table,\"a\"\n" \ | ||
168 | " .align 2\n" \ | ||
169 | " .long 1b,4b\n" \ | ||
170 | " .long 2b,4b\n" \ | ||
171 | ".previous" \ | ||
172 | : "=r" (err) \ | ||
173 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) | ||
174 | |||
175 | #define __get_user_nocheck(x, ptr, size) \ | ||
176 | ({ \ | ||
177 | long __gu_err; \ | ||
178 | unsigned long __gu_val; \ | ||
179 | __chk_user_ptr(ptr); \ | ||
180 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
181 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
182 | __gu_err; \ | ||
183 | }) | ||
184 | |||
185 | #define __get_user64_nocheck(x, ptr, size) \ | ||
186 | ({ \ | ||
187 | long __gu_err; \ | ||
188 | long long __gu_val; \ | ||
189 | __chk_user_ptr(ptr); \ | ||
190 | __get_user_size64(__gu_val, (ptr), (size), __gu_err); \ | ||
191 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
192 | __gu_err; \ | ||
193 | }) | ||
194 | |||
195 | #define __get_user_check(x, ptr, size) \ | ||
196 | ({ \ | ||
197 | long __gu_err = -EFAULT; \ | ||
198 | unsigned long __gu_val = 0; \ | ||
199 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
200 | if (access_ok(VERIFY_READ, __gu_addr, (size))) \ | ||
201 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | ||
202 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
203 | __gu_err; \ | ||
204 | }) | ||
205 | |||
206 | #define __get_user64_check(x, ptr, size) \ | ||
207 | ({ \ | ||
208 | long __gu_err = -EFAULT; \ | ||
209 | long long __gu_val = 0; \ | ||
210 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
211 | if (access_ok(VERIFY_READ, __gu_addr, (size))) \ | ||
212 | __get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \ | ||
213 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
214 | __gu_err; \ | ||
215 | }) | ||
216 | |||
217 | extern long __get_user_bad(void); | ||
218 | |||
219 | #define __get_user_size(x, ptr, size, retval) \ | ||
220 | do { \ | ||
221 | retval = 0; \ | ||
222 | switch (size) { \ | ||
223 | case 1: \ | ||
224 | __get_user_asm(x, ptr, retval, "lbz"); \ | ||
225 | break; \ | ||
226 | case 2: \ | ||
227 | __get_user_asm(x, ptr, retval, "lhz"); \ | ||
228 | break; \ | ||
229 | case 4: \ | ||
230 | __get_user_asm(x, ptr, retval, "lwz"); \ | ||
231 | break; \ | ||
232 | default: \ | ||
233 | x = __get_user_bad(); \ | ||
234 | } \ | ||
235 | } while (0) | ||
236 | |||
237 | #define __get_user_size64(x, ptr, size, retval) \ | ||
238 | do { \ | ||
239 | retval = 0; \ | ||
240 | switch (size) { \ | ||
241 | case 1: \ | ||
242 | __get_user_asm(x, ptr, retval, "lbz"); \ | ||
243 | break; \ | ||
244 | case 2: \ | ||
245 | __get_user_asm(x, ptr, retval, "lhz"); \ | ||
246 | break; \ | ||
247 | case 4: \ | ||
248 | __get_user_asm(x, ptr, retval, "lwz"); \ | ||
249 | break; \ | ||
250 | case 8: \ | ||
251 | __get_user_asm2(x, ptr, retval); \ | ||
252 | break; \ | ||
253 | default: \ | ||
254 | x = __get_user_bad(); \ | ||
255 | } \ | ||
256 | } while (0) | ||
257 | |||
258 | #define __get_user_asm(x, addr, err, op) \ | ||
259 | __asm__ __volatile__( \ | ||
260 | "1: "op" %1,0(%2)\n" \ | ||
261 | "2:\n" \ | ||
262 | ".section .fixup,\"ax\"\n" \ | ||
263 | "3: li %0,%3\n" \ | ||
264 | " li %1,0\n" \ | ||
265 | " b 2b\n" \ | ||
266 | ".previous\n" \ | ||
267 | ".section __ex_table,\"a\"\n" \ | ||
268 | " .align 2\n" \ | ||
269 | " .long 1b,3b\n" \ | ||
270 | ".previous" \ | ||
271 | : "=r"(err), "=r"(x) \ | ||
272 | : "b"(addr), "i"(-EFAULT), "0"(err)) | ||
273 | |||
274 | #define __get_user_asm2(x, addr, err) \ | ||
275 | __asm__ __volatile__( \ | ||
276 | "1: lwz %1,0(%2)\n" \ | ||
277 | "2: lwz %1+1,4(%2)\n" \ | ||
278 | "3:\n" \ | ||
279 | ".section .fixup,\"ax\"\n" \ | ||
280 | "4: li %0,%3\n" \ | ||
281 | " li %1,0\n" \ | ||
282 | " li %1+1,0\n" \ | ||
283 | " b 3b\n" \ | ||
284 | ".previous\n" \ | ||
285 | ".section __ex_table,\"a\"\n" \ | ||
286 | " .align 2\n" \ | ||
287 | " .long 1b,4b\n" \ | ||
288 | " .long 2b,4b\n" \ | ||
289 | ".previous" \ | ||
290 | : "=r"(err), "=&r"(x) \ | ||
291 | : "b"(addr), "i"(-EFAULT), "0"(err)) | ||
292 | |||
293 | /* more complex routines */ | ||
294 | |||
295 | extern int __copy_tofrom_user(void __user *to, const void __user *from, | ||
296 | unsigned long size); | ||
297 | |||
298 | extern inline unsigned long | ||
299 | copy_from_user(void *to, const void __user *from, unsigned long n) | ||
300 | { | ||
301 | unsigned long over; | ||
302 | |||
303 | if (access_ok(VERIFY_READ, from, n)) | ||
304 | return __copy_tofrom_user((__force void __user *)to, from, n); | ||
305 | if ((unsigned long)from < TASK_SIZE) { | ||
306 | over = (unsigned long)from + n - TASK_SIZE; | ||
307 | return __copy_tofrom_user((__force void __user *)to, from, n - over) + over; | ||
308 | } | ||
309 | return n; | ||
310 | } | ||
311 | |||
312 | extern inline unsigned long | ||
313 | copy_to_user(void __user *to, const void *from, unsigned long n) | ||
314 | { | ||
315 | unsigned long over; | ||
316 | |||
317 | if (access_ok(VERIFY_WRITE, to, n)) | ||
318 | return __copy_tofrom_user(to, (__force void __user *) from, n); | ||
319 | if ((unsigned long)to < TASK_SIZE) { | ||
320 | over = (unsigned long)to + n - TASK_SIZE; | ||
321 | return __copy_tofrom_user(to, (__force void __user *) from, n - over) + over; | ||
322 | } | ||
323 | return n; | ||
324 | } | ||
325 | |||
326 | static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long size) | ||
327 | { | ||
328 | return __copy_tofrom_user((__force void __user *)to, from, size); | ||
329 | } | ||
330 | |||
331 | static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long size) | ||
332 | { | ||
333 | return __copy_tofrom_user(to, (__force void __user *)from, size); | ||
334 | } | ||
335 | |||
336 | #define __copy_to_user_inatomic __copy_to_user | ||
337 | #define __copy_from_user_inatomic __copy_from_user | ||
338 | |||
339 | extern unsigned long __clear_user(void __user *addr, unsigned long size); | ||
340 | |||
341 | extern inline unsigned long | ||
342 | clear_user(void __user *addr, unsigned long size) | ||
343 | { | ||
344 | if (access_ok(VERIFY_WRITE, addr, size)) | ||
345 | return __clear_user(addr, size); | ||
346 | if ((unsigned long)addr < TASK_SIZE) { | ||
347 | unsigned long over = (unsigned long)addr + size - TASK_SIZE; | ||
348 | return __clear_user(addr, size - over) + over; | ||
349 | } | ||
350 | return size; | ||
351 | } | ||
352 | |||
353 | extern int __strncpy_from_user(char *dst, const char __user *src, long count); | ||
354 | |||
355 | extern inline long | ||
356 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
357 | { | ||
358 | if (access_ok(VERIFY_READ, src, 1)) | ||
359 | return __strncpy_from_user(dst, src, count); | ||
360 | return -EFAULT; | ||
361 | } | ||
362 | |||
363 | /* | ||
364 | * Return the size of a string (including the ending 0) | ||
365 | * | ||
366 | * Return 0 for error | ||
367 | */ | ||
368 | |||
369 | extern int __strnlen_user(const char __user *str, long len, unsigned long top); | ||
370 | |||
371 | /* | ||
372 | * Returns the length of the string at str (including the null byte), | ||
373 | * or 0 if we hit a page we can't access, | ||
374 | * or something > len if we didn't find a null byte. | ||
375 | * | ||
376 | * The `top' parameter to __strnlen_user is to make sure that | ||
377 | * we can never overflow from the user area into kernel space. | ||
378 | */ | ||
379 | extern __inline__ int strnlen_user(const char __user *str, long len) | ||
380 | { | ||
381 | unsigned long top = current->thread.fs.seg; | ||
382 | |||
383 | if ((unsigned long)str > top) | ||
384 | return 0; | ||
385 | return __strnlen_user(str, len, top); | ||
386 | } | ||
387 | |||
388 | #define strlen_user(str) strnlen_user((str), 0x7ffffffe) | ||
389 | |||
390 | #endif /* __ASSEMBLY__ */ | ||
391 | |||
392 | #endif /* _PPC_UACCESS_H */ | ||
393 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h deleted file mode 100644 index dbfa42ef4a99..000000000000 --- a/include/asm-ppc64/bitops.h +++ /dev/null | |||
@@ -1,360 +0,0 @@ | |||
1 | /* | ||
2 | * PowerPC64 atomic bit operations. | ||
3 | * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner, | ||
4 | * Anton Blanchard | ||
5 | * | ||
6 | * Originally taken from the 32b PPC code. Modified to use 64b values for | ||
7 | * the various counters & memory references. | ||
8 | * | ||
9 | * Bitops are odd when viewed on big-endian systems. They were designed | ||
10 | * on little endian so the size of the bitset doesn't matter (low order bytes | ||
11 | * come first) as long as the bit in question is valid. | ||
12 | * | ||
13 | * Bits are "tested" often using the C expression (val & (1<<nr)) so we do | ||
14 | * our best to stay compatible with that. The assumption is that val will | ||
15 | * be unsigned long for such tests. As such, we assume the bits are stored | ||
16 | * as an array of unsigned long (the usual case is a single unsigned long, | ||
17 | * of course). Here's an example bitset with bit numbering: | ||
18 | * | ||
19 | * |63..........0|127........64|195.......128|255.......196| | ||
20 | * | ||
21 | * This leads to a problem. If an int, short or char is passed as a bitset | ||
22 | * it will be a bad memory reference since we want to store in chunks | ||
23 | * of unsigned long (64 bits here) size. | ||
24 | * | ||
25 | * There are a few little-endian macros used mostly for filesystem bitmaps, | ||
26 | * these work on similar bit arrays layouts, but byte-oriented: | ||
27 | * | ||
28 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| | ||
29 | * | ||
30 | * The main difference is that bit 3-5 in the bit number field needs to be | ||
31 | * reversed compared to the big-endian bit fields. This can be achieved | ||
32 | * by XOR with 0b111000 (0x38). | ||
33 | * | ||
34 | * This program is free software; you can redistribute it and/or | ||
35 | * modify it under the terms of the GNU General Public License | ||
36 | * as published by the Free Software Foundation; either version | ||
37 | * 2 of the License, or (at your option) any later version. | ||
38 | */ | ||
39 | |||
40 | #ifndef _PPC64_BITOPS_H | ||
41 | #define _PPC64_BITOPS_H | ||
42 | |||
43 | #ifdef __KERNEL__ | ||
44 | |||
45 | #include <asm/synch.h> | ||
46 | |||
47 | /* | ||
48 | * clear_bit doesn't imply a memory barrier | ||
49 | */ | ||
50 | #define smp_mb__before_clear_bit() smp_mb() | ||
51 | #define smp_mb__after_clear_bit() smp_mb() | ||
52 | |||
53 | static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr) | ||
54 | { | ||
55 | return (1UL & (addr[nr >> 6] >> (nr & 63))); | ||
56 | } | ||
57 | |||
58 | static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr) | ||
59 | { | ||
60 | unsigned long old; | ||
61 | unsigned long mask = 1UL << (nr & 0x3f); | ||
62 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
63 | |||
64 | __asm__ __volatile__( | ||
65 | "1: ldarx %0,0,%3 # set_bit\n\ | ||
66 | or %0,%0,%2\n\ | ||
67 | stdcx. %0,0,%3\n\ | ||
68 | bne- 1b" | ||
69 | : "=&r" (old), "=m" (*p) | ||
70 | : "r" (mask), "r" (p), "m" (*p) | ||
71 | : "cc"); | ||
72 | } | ||
73 | |||
74 | static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
75 | { | ||
76 | unsigned long old; | ||
77 | unsigned long mask = 1UL << (nr & 0x3f); | ||
78 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
79 | |||
80 | __asm__ __volatile__( | ||
81 | "1: ldarx %0,0,%3 # clear_bit\n\ | ||
82 | andc %0,%0,%2\n\ | ||
83 | stdcx. %0,0,%3\n\ | ||
84 | bne- 1b" | ||
85 | : "=&r" (old), "=m" (*p) | ||
86 | : "r" (mask), "r" (p), "m" (*p) | ||
87 | : "cc"); | ||
88 | } | ||
89 | |||
90 | static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr) | ||
91 | { | ||
92 | unsigned long old; | ||
93 | unsigned long mask = 1UL << (nr & 0x3f); | ||
94 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
95 | |||
96 | __asm__ __volatile__( | ||
97 | "1: ldarx %0,0,%3 # change_bit\n\ | ||
98 | xor %0,%0,%2\n\ | ||
99 | stdcx. %0,0,%3\n\ | ||
100 | bne- 1b" | ||
101 | : "=&r" (old), "=m" (*p) | ||
102 | : "r" (mask), "r" (p), "m" (*p) | ||
103 | : "cc"); | ||
104 | } | ||
105 | |||
106 | static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) | ||
107 | { | ||
108 | unsigned long old, t; | ||
109 | unsigned long mask = 1UL << (nr & 0x3f); | ||
110 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
111 | |||
112 | __asm__ __volatile__( | ||
113 | EIEIO_ON_SMP | ||
114 | "1: ldarx %0,0,%3 # test_and_set_bit\n\ | ||
115 | or %1,%0,%2 \n\ | ||
116 | stdcx. %1,0,%3 \n\ | ||
117 | bne- 1b" | ||
118 | ISYNC_ON_SMP | ||
119 | : "=&r" (old), "=&r" (t) | ||
120 | : "r" (mask), "r" (p) | ||
121 | : "cc", "memory"); | ||
122 | |||
123 | return (old & mask) != 0; | ||
124 | } | ||
125 | |||
126 | static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
127 | { | ||
128 | unsigned long old, t; | ||
129 | unsigned long mask = 1UL << (nr & 0x3f); | ||
130 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
131 | |||
132 | __asm__ __volatile__( | ||
133 | EIEIO_ON_SMP | ||
134 | "1: ldarx %0,0,%3 # test_and_clear_bit\n\ | ||
135 | andc %1,%0,%2\n\ | ||
136 | stdcx. %1,0,%3\n\ | ||
137 | bne- 1b" | ||
138 | ISYNC_ON_SMP | ||
139 | : "=&r" (old), "=&r" (t) | ||
140 | : "r" (mask), "r" (p) | ||
141 | : "cc", "memory"); | ||
142 | |||
143 | return (old & mask) != 0; | ||
144 | } | ||
145 | |||
146 | static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | ||
147 | { | ||
148 | unsigned long old, t; | ||
149 | unsigned long mask = 1UL << (nr & 0x3f); | ||
150 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
151 | |||
152 | __asm__ __volatile__( | ||
153 | EIEIO_ON_SMP | ||
154 | "1: ldarx %0,0,%3 # test_and_change_bit\n\ | ||
155 | xor %1,%0,%2\n\ | ||
156 | stdcx. %1,0,%3\n\ | ||
157 | bne- 1b" | ||
158 | ISYNC_ON_SMP | ||
159 | : "=&r" (old), "=&r" (t) | ||
160 | : "r" (mask), "r" (p) | ||
161 | : "cc", "memory"); | ||
162 | |||
163 | return (old & mask) != 0; | ||
164 | } | ||
165 | |||
166 | static __inline__ void set_bits(unsigned long mask, unsigned long *addr) | ||
167 | { | ||
168 | unsigned long old; | ||
169 | |||
170 | __asm__ __volatile__( | ||
171 | "1: ldarx %0,0,%3 # set_bit\n\ | ||
172 | or %0,%0,%2\n\ | ||
173 | stdcx. %0,0,%3\n\ | ||
174 | bne- 1b" | ||
175 | : "=&r" (old), "=m" (*addr) | ||
176 | : "r" (mask), "r" (addr), "m" (*addr) | ||
177 | : "cc"); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * non-atomic versions | ||
182 | */ | ||
183 | static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr) | ||
184 | { | ||
185 | unsigned long mask = 1UL << (nr & 0x3f); | ||
186 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
187 | |||
188 | *p |= mask; | ||
189 | } | ||
190 | |||
191 | static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
192 | { | ||
193 | unsigned long mask = 1UL << (nr & 0x3f); | ||
194 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
195 | |||
196 | *p &= ~mask; | ||
197 | } | ||
198 | |||
199 | static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr) | ||
200 | { | ||
201 | unsigned long mask = 1UL << (nr & 0x3f); | ||
202 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
203 | |||
204 | *p ^= mask; | ||
205 | } | ||
206 | |||
207 | static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr) | ||
208 | { | ||
209 | unsigned long mask = 1UL << (nr & 0x3f); | ||
210 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
211 | unsigned long old = *p; | ||
212 | |||
213 | *p = old | mask; | ||
214 | return (old & mask) != 0; | ||
215 | } | ||
216 | |||
217 | static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
218 | { | ||
219 | unsigned long mask = 1UL << (nr & 0x3f); | ||
220 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
221 | unsigned long old = *p; | ||
222 | |||
223 | *p = old & ~mask; | ||
224 | return (old & mask) != 0; | ||
225 | } | ||
226 | |||
227 | static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | ||
228 | { | ||
229 | unsigned long mask = 1UL << (nr & 0x3f); | ||
230 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
231 | unsigned long old = *p; | ||
232 | |||
233 | *p = old ^ mask; | ||
234 | return (old & mask) != 0; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the | ||
239 | * most significant (left-most) 1-bit in a double word. | ||
240 | */ | ||
241 | static __inline__ int __ilog2(unsigned long x) | ||
242 | { | ||
243 | int lz; | ||
244 | |||
245 | asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); | ||
246 | return 63 - lz; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Determines the bit position of the least significant (rightmost) 0 bit | ||
251 | * in the specified double word. The returned bit position will be zero-based, | ||
252 | * starting from the right side (63 - 0). | ||
253 | */ | ||
254 | static __inline__ unsigned long ffz(unsigned long x) | ||
255 | { | ||
256 | /* no zero exists anywhere in the 8 byte area. */ | ||
257 | if ((x = ~x) == 0) | ||
258 | return 64; | ||
259 | |||
260 | /* | ||
261 | * Calculate the bit position of the least signficant '1' bit in x | ||
262 | * (since x has been changed this will actually be the least signficant | ||
263 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that | ||
264 | * is the least significant * (RIGHT-most) 1-bit of the value in x. | ||
265 | */ | ||
266 | return __ilog2(x & -x); | ||
267 | } | ||
268 | |||
269 | static __inline__ int __ffs(unsigned long x) | ||
270 | { | ||
271 | return __ilog2(x & -x); | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * ffs: find first bit set. This is defined the same way as | ||
276 | * the libc and compiler builtin ffs routines, therefore | ||
277 | * differs in spirit from the above ffz (man ffs). | ||
278 | */ | ||
279 | static __inline__ int ffs(int x) | ||
280 | { | ||
281 | unsigned long i = (unsigned long)x; | ||
282 | return __ilog2(i & -i) + 1; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * fls: find last (most-significant) bit set. | ||
287 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
288 | */ | ||
289 | #define fls(x) generic_fls(x) | ||
290 | |||
291 | /* | ||
292 | * hweightN: returns the hamming weight (i.e. the number | ||
293 | * of bits set) of a N-bit word | ||
294 | */ | ||
295 | #define hweight64(x) generic_hweight64(x) | ||
296 | #define hweight32(x) generic_hweight32(x) | ||
297 | #define hweight16(x) generic_hweight16(x) | ||
298 | #define hweight8(x) generic_hweight8(x) | ||
299 | |||
300 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
301 | #define find_first_zero_bit(addr, size) \ | ||
302 | find_next_zero_bit((addr), (size), 0) | ||
303 | |||
304 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
305 | #define find_first_bit(addr, size) \ | ||
306 | find_next_bit((addr), (size), 0) | ||
307 | |||
308 | extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
309 | #define find_first_zero_le_bit(addr, size) \ | ||
310 | find_next_zero_le_bit((addr), (size), 0) | ||
311 | |||
312 | static __inline__ int test_le_bit(unsigned long nr, __const__ unsigned long * addr) | ||
313 | { | ||
314 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
315 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
316 | } | ||
317 | |||
318 | #define test_and_clear_le_bit(nr, addr) \ | ||
319 | test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
320 | #define test_and_set_le_bit(nr, addr) \ | ||
321 | test_and_set_bit((nr) ^ 0x38, (addr)) | ||
322 | |||
323 | /* | ||
324 | * non-atomic versions | ||
325 | */ | ||
326 | |||
327 | #define __set_le_bit(nr, addr) \ | ||
328 | __set_bit((nr) ^ 0x38, (addr)) | ||
329 | #define __clear_le_bit(nr, addr) \ | ||
330 | __clear_bit((nr) ^ 0x38, (addr)) | ||
331 | #define __test_and_clear_le_bit(nr, addr) \ | ||
332 | __test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
333 | #define __test_and_set_le_bit(nr, addr) \ | ||
334 | __test_and_set_bit((nr) ^ 0x38, (addr)) | ||
335 | |||
336 | #define ext2_set_bit(nr,addr) \ | ||
337 | __test_and_set_le_bit((nr), (unsigned long*)addr) | ||
338 | #define ext2_clear_bit(nr, addr) \ | ||
339 | __test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
340 | |||
341 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
342 | test_and_set_le_bit((nr), (unsigned long*)addr) | ||
343 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
344 | test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
345 | |||
346 | |||
347 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | ||
348 | #define ext2_find_first_zero_bit(addr, size) \ | ||
349 | find_first_zero_le_bit((unsigned long*)addr, size) | ||
350 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
351 | find_next_zero_le_bit((unsigned long*)addr, size, off) | ||
352 | |||
353 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
354 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
355 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
356 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
357 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
358 | |||
359 | #endif /* __KERNEL__ */ | ||
360 | #endif /* _PPC64_BITOPS_H */ | ||
diff --git a/include/asm-ppc64/ipcbuf.h b/include/asm-ppc64/ipcbuf.h deleted file mode 100644 index fa393c8342af..000000000000 --- a/include/asm-ppc64/ipcbuf.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | #ifndef __PPC64_IPCBUF_H__ | ||
2 | #define __PPC64_IPCBUF_H__ | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for the PPC is identical to kern_ipc_perm | ||
6 | * as we have always had 32-bit UIDs and GIDs in the kernel. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | struct ipc64_perm | ||
15 | { | ||
16 | __kernel_key_t key; | ||
17 | __kernel_uid_t uid; | ||
18 | __kernel_gid_t gid; | ||
19 | __kernel_uid_t cuid; | ||
20 | __kernel_gid_t cgid; | ||
21 | __kernel_mode_t mode; | ||
22 | unsigned int seq; | ||
23 | unsigned int __pad1; | ||
24 | unsigned long __unused1; | ||
25 | unsigned long __unused2; | ||
26 | }; | ||
27 | |||
28 | #endif /* __PPC64_IPCBUF_H__ */ | ||
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h index 77a743402db4..820dd729b895 100644 --- a/include/asm-ppc64/mmu_context.h +++ b/include/asm-ppc64/mmu_context.h | |||
@@ -16,21 +16,6 @@ | |||
16 | * 2 of the License, or (at your option) any later version. | 16 | * 2 of the License, or (at your option) any later version. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | ||
20 | * Every architecture must define this function. It's the fastest | ||
21 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
22 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
23 | * bits is cleared. | ||
24 | */ | ||
25 | static inline int sched_find_first_bit(unsigned long *b) | ||
26 | { | ||
27 | if (unlikely(b[0])) | ||
28 | return __ffs(b[0]); | ||
29 | if (unlikely(b[1])) | ||
30 | return __ffs(b[1]) + 64; | ||
31 | return __ffs(b[2]) + 128; | ||
32 | } | ||
33 | |||
34 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 19 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
35 | { | 20 | { |
36 | } | 21 | } |
diff --git a/include/asm-ppc64/nvram.h b/include/asm-ppc64/nvram.h index dfaa21566c9a..def47d720d3d 100644 --- a/include/asm-ppc64/nvram.h +++ b/include/asm-ppc64/nvram.h | |||
@@ -70,7 +70,7 @@ extern struct nvram_partition *nvram_find_partition(int sig, const char *name); | |||
70 | 70 | ||
71 | extern int pSeries_nvram_init(void); | 71 | extern int pSeries_nvram_init(void); |
72 | extern int pmac_nvram_init(void); | 72 | extern int pmac_nvram_init(void); |
73 | extern int bpa_nvram_init(void); | 73 | extern int mmio_nvram_init(void); |
74 | 74 | ||
75 | /* PowerMac specific nvram stuffs */ | 75 | /* PowerMac specific nvram stuffs */ |
76 | 76 | ||
diff --git a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h index c5e9052e7967..0f42fcc1900b 100644 --- a/include/asm-ppc64/smp.h +++ b/include/asm-ppc64/smp.h | |||
@@ -64,6 +64,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS]; | |||
64 | 64 | ||
65 | void smp_init_iSeries(void); | 65 | void smp_init_iSeries(void); |
66 | void smp_init_pSeries(void); | 66 | void smp_init_pSeries(void); |
67 | void smp_init_cell(void); | ||
67 | 68 | ||
68 | extern int __cpu_disable(void); | 69 | extern int __cpu_disable(void); |
69 | extern void __cpu_die(unsigned int cpu); | 70 | extern void __cpu_die(unsigned int cpu); |
diff --git a/include/asm-ppc64/uaccess.h b/include/asm-ppc64/uaccess.h deleted file mode 100644 index 132c1276547b..000000000000 --- a/include/asm-ppc64/uaccess.h +++ /dev/null | |||
@@ -1,341 +0,0 @@ | |||
1 | #ifndef _PPC64_UACCESS_H | ||
2 | #define _PPC64_UACCESS_H | ||
3 | |||
4 | /* | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASSEMBLY__ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <asm/processor.h> | ||
15 | |||
16 | #define VERIFY_READ 0 | ||
17 | #define VERIFY_WRITE 1 | ||
18 | |||
19 | /* | ||
20 | * The fs value determines whether argument validity checking should be | ||
21 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
22 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
23 | * | ||
24 | * For historical reasons, these macros are grossly misnamed. | ||
25 | */ | ||
26 | |||
27 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
28 | |||
29 | #define KERNEL_DS MAKE_MM_SEG(0UL) | ||
30 | #define USER_DS MAKE_MM_SEG(0xf000000000000000UL) | ||
31 | |||
32 | #define get_ds() (KERNEL_DS) | ||
33 | #define get_fs() (current->thread.fs) | ||
34 | #define set_fs(val) (current->thread.fs = (val)) | ||
35 | |||
36 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
37 | |||
38 | /* | ||
39 | * Use the alpha trick for checking ranges: | ||
40 | * | ||
41 | * Is a address valid? This does a straightforward calculation rather | ||
42 | * than tests. | ||
43 | * | ||
44 | * Address valid if: | ||
45 | * - "addr" doesn't have any high-bits set | ||
46 | * - AND "size" doesn't have any high-bits set | ||
47 | * - OR we are in kernel mode. | ||
48 | * | ||
49 | * We dont have to check for high bits in (addr+size) because the first | ||
50 | * two checks force the maximum result to be below the start of the | ||
51 | * kernel region. | ||
52 | */ | ||
53 | #define __access_ok(addr,size,segment) \ | ||
54 | (((segment).seg & (addr | size )) == 0) | ||
55 | |||
56 | #define access_ok(type,addr,size) \ | ||
57 | __access_ok(((__force unsigned long)(addr)),(size),get_fs()) | ||
58 | |||
59 | /* | ||
60 | * The exception table consists of pairs of addresses: the first is the | ||
61 | * address of an instruction that is allowed to fault, and the second is | ||
62 | * the address at which the program should continue. No registers are | ||
63 | * modified, so it is entirely up to the continuation code to figure out | ||
64 | * what to do. | ||
65 | * | ||
66 | * All the routines below use bits of fixup code that are out of line | ||
67 | * with the main instruction path. This means when everything is well, | ||
68 | * we don't even have to jump over them. Further, they do not intrude | ||
69 | * on our cache or tlb entries. | ||
70 | */ | ||
71 | |||
72 | struct exception_table_entry | ||
73 | { | ||
74 | unsigned long insn, fixup; | ||
75 | }; | ||
76 | |||
77 | /* Returns 0 if exception not found and fixup otherwise. */ | ||
78 | extern unsigned long search_exception_table(unsigned long); | ||
79 | |||
80 | /* | ||
81 | * These are the main single-value transfer routines. They automatically | ||
82 | * use the right size if we just have the right pointer type. | ||
83 | * | ||
84 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | ||
85 | * and yet we don't want to do any pointers, because that is too much | ||
86 | * of a performance impact. Thus we have a few rather ugly macros here, | ||
87 | * and hide all the ugliness from the user. | ||
88 | * | ||
89 | * The "__xxx" versions of the user access functions are versions that | ||
90 | * do not verify the address space, that must have been done previously | ||
91 | * with a separate "access_ok()" call (this is used when we do multiple | ||
92 | * accesses to the same area of user memory). | ||
93 | * | ||
94 | * As we use the same address space for kernel and user data on the | ||
95 | * PowerPC, we can just do these as direct assignments. (Of course, the | ||
96 | * exception handling means that it's no longer "just"...) | ||
97 | */ | ||
98 | #define get_user(x,ptr) \ | ||
99 | __get_user_check((x),(ptr),sizeof(*(ptr))) | ||
100 | #define put_user(x,ptr) \ | ||
101 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
102 | |||
103 | #define __get_user(x,ptr) \ | ||
104 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | ||
105 | #define __put_user(x,ptr) \ | ||
106 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
107 | |||
108 | #define __get_user_unaligned __get_user | ||
109 | #define __put_user_unaligned __put_user | ||
110 | |||
111 | extern long __put_user_bad(void); | ||
112 | |||
113 | #define __put_user_nocheck(x,ptr,size) \ | ||
114 | ({ \ | ||
115 | long __pu_err; \ | ||
116 | might_sleep(); \ | ||
117 | __chk_user_ptr(ptr); \ | ||
118 | __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ | ||
119 | __pu_err; \ | ||
120 | }) | ||
121 | |||
122 | #define __put_user_check(x,ptr,size) \ | ||
123 | ({ \ | ||
124 | long __pu_err = -EFAULT; \ | ||
125 | void __user *__pu_addr = (ptr); \ | ||
126 | might_sleep(); \ | ||
127 | if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ | ||
128 | __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \ | ||
129 | __pu_err; \ | ||
130 | }) | ||
131 | |||
132 | #define __put_user_size(x,ptr,size,retval,errret) \ | ||
133 | do { \ | ||
134 | retval = 0; \ | ||
135 | switch (size) { \ | ||
136 | case 1: __put_user_asm(x,ptr,retval,"stb",errret); break; \ | ||
137 | case 2: __put_user_asm(x,ptr,retval,"sth",errret); break; \ | ||
138 | case 4: __put_user_asm(x,ptr,retval,"stw",errret); break; \ | ||
139 | case 8: __put_user_asm(x,ptr,retval,"std",errret); break; \ | ||
140 | default: __put_user_bad(); \ | ||
141 | } \ | ||
142 | } while (0) | ||
143 | |||
144 | /* | ||
145 | * We don't tell gcc that we are accessing memory, but this is OK | ||
146 | * because we do not write to any memory gcc knows about, so there | ||
147 | * are no aliasing issues. | ||
148 | */ | ||
149 | #define __put_user_asm(x, addr, err, op, errret) \ | ||
150 | __asm__ __volatile__( \ | ||
151 | "1: "op" %1,0(%2) # put_user\n" \ | ||
152 | "2:\n" \ | ||
153 | ".section .fixup,\"ax\"\n" \ | ||
154 | "3: li %0,%3\n" \ | ||
155 | " b 2b\n" \ | ||
156 | ".previous\n" \ | ||
157 | ".section __ex_table,\"a\"\n" \ | ||
158 | " .align 3\n" \ | ||
159 | " .llong 1b,3b\n" \ | ||
160 | ".previous" \ | ||
161 | : "=r"(err) \ | ||
162 | : "r"(x), "b"(addr), "i"(errret), "0"(err)) | ||
163 | |||
164 | |||
165 | #define __get_user_nocheck(x,ptr,size) \ | ||
166 | ({ \ | ||
167 | long __gu_err; \ | ||
168 | unsigned long __gu_val; \ | ||
169 | might_sleep(); \ | ||
170 | __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ | ||
171 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
172 | __gu_err; \ | ||
173 | }) | ||
174 | |||
175 | #define __get_user_check(x,ptr,size) \ | ||
176 | ({ \ | ||
177 | long __gu_err = -EFAULT; \ | ||
178 | unsigned long __gu_val = 0; \ | ||
179 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
180 | might_sleep(); \ | ||
181 | if (access_ok(VERIFY_READ,__gu_addr,size)) \ | ||
182 | __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\ | ||
183 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
184 | __gu_err; \ | ||
185 | }) | ||
186 | |||
187 | extern long __get_user_bad(void); | ||
188 | |||
189 | #define __get_user_size(x,ptr,size,retval,errret) \ | ||
190 | do { \ | ||
191 | retval = 0; \ | ||
192 | __chk_user_ptr(ptr); \ | ||
193 | switch (size) { \ | ||
194 | case 1: __get_user_asm(x,ptr,retval,"lbz",errret); break; \ | ||
195 | case 2: __get_user_asm(x,ptr,retval,"lhz",errret); break; \ | ||
196 | case 4: __get_user_asm(x,ptr,retval,"lwz",errret); break; \ | ||
197 | case 8: __get_user_asm(x,ptr,retval,"ld",errret); break; \ | ||
198 | default: (x) = __get_user_bad(); \ | ||
199 | } \ | ||
200 | } while (0) | ||
201 | |||
202 | #define __get_user_asm(x, addr, err, op, errret) \ | ||
203 | __asm__ __volatile__( \ | ||
204 | "1: "op" %1,0(%2) # get_user\n" \ | ||
205 | "2:\n" \ | ||
206 | ".section .fixup,\"ax\"\n" \ | ||
207 | "3: li %0,%3\n" \ | ||
208 | " li %1,0\n" \ | ||
209 | " b 2b\n" \ | ||
210 | ".previous\n" \ | ||
211 | ".section __ex_table,\"a\"\n" \ | ||
212 | " .align 3\n" \ | ||
213 | " .llong 1b,3b\n" \ | ||
214 | ".previous" \ | ||
215 | : "=r"(err), "=r"(x) \ | ||
216 | : "b"(addr), "i"(errret), "0"(err)) | ||
217 | |||
218 | /* more complex routines */ | ||
219 | |||
220 | extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from, | ||
221 | unsigned long size); | ||
222 | |||
223 | static inline unsigned long | ||
224 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | ||
225 | { | ||
226 | if (__builtin_constant_p(n)) { | ||
227 | unsigned long ret; | ||
228 | |||
229 | switch (n) { | ||
230 | case 1: | ||
231 | __get_user_size(*(u8 *)to, from, 1, ret, 1); | ||
232 | return ret; | ||
233 | case 2: | ||
234 | __get_user_size(*(u16 *)to, from, 2, ret, 2); | ||
235 | return ret; | ||
236 | case 4: | ||
237 | __get_user_size(*(u32 *)to, from, 4, ret, 4); | ||
238 | return ret; | ||
239 | case 8: | ||
240 | __get_user_size(*(u64 *)to, from, 8, ret, 8); | ||
241 | return ret; | ||
242 | } | ||
243 | } | ||
244 | return __copy_tofrom_user((__force void __user *) to, from, n); | ||
245 | } | ||
246 | |||
247 | static inline unsigned long | ||
248 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
249 | { | ||
250 | might_sleep(); | ||
251 | return __copy_from_user_inatomic(to, from, n); | ||
252 | } | ||
253 | |||
254 | static inline unsigned long | ||
255 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | ||
256 | { | ||
257 | if (__builtin_constant_p(n)) { | ||
258 | unsigned long ret; | ||
259 | |||
260 | switch (n) { | ||
261 | case 1: | ||
262 | __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); | ||
263 | return ret; | ||
264 | case 2: | ||
265 | __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); | ||
266 | return ret; | ||
267 | case 4: | ||
268 | __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); | ||
269 | return ret; | ||
270 | case 8: | ||
271 | __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret, 8); | ||
272 | return ret; | ||
273 | } | ||
274 | } | ||
275 | return __copy_tofrom_user(to, (__force const void __user *) from, n); | ||
276 | } | ||
277 | |||
278 | static inline unsigned long | ||
279 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
280 | { | ||
281 | might_sleep(); | ||
282 | return __copy_to_user_inatomic(to, from, n); | ||
283 | } | ||
284 | |||
285 | #define __copy_in_user(to, from, size) \ | ||
286 | __copy_tofrom_user((to), (from), (size)) | ||
287 | |||
288 | extern unsigned long copy_from_user(void *to, const void __user *from, | ||
289 | unsigned long n); | ||
290 | extern unsigned long copy_to_user(void __user *to, const void *from, | ||
291 | unsigned long n); | ||
292 | extern unsigned long copy_in_user(void __user *to, const void __user *from, | ||
293 | unsigned long n); | ||
294 | |||
295 | extern unsigned long __clear_user(void __user *addr, unsigned long size); | ||
296 | |||
297 | static inline unsigned long | ||
298 | clear_user(void __user *addr, unsigned long size) | ||
299 | { | ||
300 | might_sleep(); | ||
301 | if (likely(access_ok(VERIFY_WRITE, addr, size))) | ||
302 | size = __clear_user(addr, size); | ||
303 | return size; | ||
304 | } | ||
305 | |||
306 | extern int __strncpy_from_user(char *dst, const char __user *src, long count); | ||
307 | |||
308 | static inline long | ||
309 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
310 | { | ||
311 | might_sleep(); | ||
312 | if (likely(access_ok(VERIFY_READ, src, 1))) | ||
313 | return __strncpy_from_user(dst, src, count); | ||
314 | return -EFAULT; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Return the size of a string (including the ending 0) | ||
319 | * | ||
320 | * Return 0 for error | ||
321 | */ | ||
322 | extern int __strnlen_user(const char __user *str, long len); | ||
323 | |||
324 | /* | ||
325 | * Returns the length of the string at str (including the null byte), | ||
326 | * or 0 if we hit a page we can't access, | ||
327 | * or something > len if we didn't find a null byte. | ||
328 | */ | ||
329 | static inline int strnlen_user(const char __user *str, long len) | ||
330 | { | ||
331 | might_sleep(); | ||
332 | if (likely(access_ok(VERIFY_READ, str, 1))) | ||
333 | return __strnlen_user(str, len); | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | #define strlen_user(str) strnlen_user((str), 0x7ffffffe) | ||
338 | |||
339 | #endif /* __ASSEMBLY__ */ | ||
340 | |||
341 | #endif /* _PPC64_UACCESS_H */ | ||