aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/bitops.h437
-rw-r--r--include/asm-powerpc/bug.h34
-rw-r--r--include/asm-powerpc/futex.h84
-rw-r--r--include/asm-powerpc/ioctls.h3
-rw-r--r--include/asm-powerpc/ipcbuf.h33
-rw-r--r--include/asm-powerpc/irq.h2
-rw-r--r--include/asm-powerpc/ppc_asm.h7
-rw-r--r--include/asm-powerpc/processor.h2
-rw-r--r--include/asm-powerpc/rtas.h3
-rw-r--r--include/asm-powerpc/termios.h135
-rw-r--r--include/asm-powerpc/uaccess.h468
11 files changed, 1053 insertions, 155 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
new file mode 100644
index 000000000000..dc25c53704d5
--- /dev/null
+++ b/include/asm-powerpc/bitops.h
@@ -0,0 +1,437 @@
1/*
2 * PowerPC atomic bit operations.
3 *
4 * Merged version by David Gibson <david@gibson.dropbear.id.au>.
5 * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
6 * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
7 * originally took it from the ppc32 code.
8 *
9 * Within a word, bits are numbered LSB first. Lot's of places make
10 * this assumption by directly testing bits with (val & (1<<nr)).
11 * This can cause confusion for large (> 1 word) bitmaps on a
12 * big-endian system because, unlike little endian, the number of each
13 * bit depends on the word size.
14 *
15 * The bitop functions are defined to work on unsigned longs, so for a
16 * ppc64 system the bits end up numbered:
17 * |63..............0|127............64|191...........128|255...........196|
18 * and on ppc32:
19 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
20 *
21 * There are a few little-endian macros used mostly for filesystem
22 * bitmaps, these work on similar bit arrays layouts, but
23 * byte-oriented:
24 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
25 *
26 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
27 * number field needs to be reversed compared to the big-endian bit
28 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
29 *
30 * This program is free software; you can redistribute it and/or
31 * modify it under the terms of the GNU General Public License
32 * as published by the Free Software Foundation; either version
33 * 2 of the License, or (at your option) any later version.
34 */
35
36#ifndef _ASM_POWERPC_BITOPS_H
37#define _ASM_POWERPC_BITOPS_H
38
39#ifdef __KERNEL__
40
41#include <linux/compiler.h>
42#include <asm/atomic.h>
43#include <asm/synch.h>
44
45/*
46 * clear_bit doesn't imply a memory barrier
47 */
48#define smp_mb__before_clear_bit() smp_mb()
49#define smp_mb__after_clear_bit() smp_mb()
50
51#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
52#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
53#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
54
55#ifdef CONFIG_PPC64
56#define LARXL "ldarx"
57#define STCXL "stdcx."
58#define CNTLZL "cntlzd"
59#else
60#define LARXL "lwarx"
61#define STCXL "stwcx."
62#define CNTLZL "cntlzw"
63#endif
64
65static __inline__ void set_bit(int nr, volatile unsigned long *addr)
66{
67 unsigned long old;
68 unsigned long mask = BITOP_MASK(nr);
69 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
70
71 __asm__ __volatile__(
72"1:" LARXL " %0,0,%3 # set_bit\n"
73 "or %0,%0,%2\n"
74 PPC405_ERR77(0,%3)
75 STCXL " %0,0,%3\n"
76 "bne- 1b"
77 : "=&r"(old), "=m"(*p)
78 : "r"(mask), "r"(p), "m"(*p)
79 : "cc" );
80}
81
82static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
83{
84 unsigned long old;
85 unsigned long mask = BITOP_MASK(nr);
86 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
87
88 __asm__ __volatile__(
89"1:" LARXL " %0,0,%3 # set_bit\n"
90 "andc %0,%0,%2\n"
91 PPC405_ERR77(0,%3)
92 STCXL " %0,0,%3\n"
93 "bne- 1b"
94 : "=&r"(old), "=m"(*p)
95 : "r"(mask), "r"(p), "m"(*p)
96 : "cc" );
97}
98
99static __inline__ void change_bit(int nr, volatile unsigned long *addr)
100{
101 unsigned long old;
102 unsigned long mask = BITOP_MASK(nr);
103 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
104
105 __asm__ __volatile__(
106"1:" LARXL " %0,0,%3 # set_bit\n"
107 "xor %0,%0,%2\n"
108 PPC405_ERR77(0,%3)
109 STCXL " %0,0,%3\n"
110 "bne- 1b"
111 : "=&r"(old), "=m"(*p)
112 : "r"(mask), "r"(p), "m"(*p)
113 : "cc" );
114}
115
116static __inline__ int test_and_set_bit(unsigned long nr,
117 volatile unsigned long *addr)
118{
119 unsigned long old, t;
120 unsigned long mask = BITOP_MASK(nr);
121 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
122
123 __asm__ __volatile__(
124 EIEIO_ON_SMP
125"1:" LARXL " %0,0,%3 # test_and_set_bit\n"
126 "or %1,%0,%2 \n"
127 PPC405_ERR77(0,%3)
128 STCXL " %1,0,%3 \n"
129 "bne- 1b"
130 ISYNC_ON_SMP
131 : "=&r" (old), "=&r" (t)
132 : "r" (mask), "r" (p)
133 : "cc", "memory");
134
135 return (old & mask) != 0;
136}
137
138static __inline__ int test_and_clear_bit(unsigned long nr,
139 volatile unsigned long *addr)
140{
141 unsigned long old, t;
142 unsigned long mask = BITOP_MASK(nr);
143 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
144
145 __asm__ __volatile__(
146 EIEIO_ON_SMP
147"1:" LARXL " %0,0,%3 # test_and_clear_bit\n"
148 "andc %1,%0,%2 \n"
149 PPC405_ERR77(0,%3)
150 STCXL " %1,0,%3 \n"
151 "bne- 1b"
152 ISYNC_ON_SMP
153 : "=&r" (old), "=&r" (t)
154 : "r" (mask), "r" (p)
155 : "cc", "memory");
156
157 return (old & mask) != 0;
158}
159
160static __inline__ int test_and_change_bit(unsigned long nr,
161 volatile unsigned long *addr)
162{
163 unsigned long old, t;
164 unsigned long mask = BITOP_MASK(nr);
165 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
166
167 __asm__ __volatile__(
168 EIEIO_ON_SMP
169"1:" LARXL " %0,0,%3 # test_and_change_bit\n"
170 "xor %1,%0,%2 \n"
171 PPC405_ERR77(0,%3)
172 STCXL " %1,0,%3 \n"
173 "bne- 1b"
174 ISYNC_ON_SMP
175 : "=&r" (old), "=&r" (t)
176 : "r" (mask), "r" (p)
177 : "cc", "memory");
178
179 return (old & mask) != 0;
180}
181
182static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
183{
184 unsigned long old;
185
186 __asm__ __volatile__(
187"1:" LARXL " %0,0,%3 # set_bit\n"
188 "or %0,%0,%2\n"
189 STCXL " %0,0,%3\n"
190 "bne- 1b"
191 : "=&r" (old), "=m" (*addr)
192 : "r" (mask), "r" (addr), "m" (*addr)
193 : "cc");
194}
195
196/* Non-atomic versions */
197static __inline__ int test_bit(unsigned long nr,
198 __const__ volatile unsigned long *addr)
199{
200 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
201}
202
203static __inline__ void __set_bit(unsigned long nr,
204 volatile unsigned long *addr)
205{
206 unsigned long mask = BITOP_MASK(nr);
207 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
208
209 *p |= mask;
210}
211
212static __inline__ void __clear_bit(unsigned long nr,
213 volatile unsigned long *addr)
214{
215 unsigned long mask = BITOP_MASK(nr);
216 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
217
218 *p &= ~mask;
219}
220
221static __inline__ void __change_bit(unsigned long nr,
222 volatile unsigned long *addr)
223{
224 unsigned long mask = BITOP_MASK(nr);
225 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
226
227 *p ^= mask;
228}
229
230static __inline__ int __test_and_set_bit(unsigned long nr,
231 volatile unsigned long *addr)
232{
233 unsigned long mask = BITOP_MASK(nr);
234 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
235 unsigned long old = *p;
236
237 *p = old | mask;
238 return (old & mask) != 0;
239}
240
241static __inline__ int __test_and_clear_bit(unsigned long nr,
242 volatile unsigned long *addr)
243{
244 unsigned long mask = BITOP_MASK(nr);
245 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
246 unsigned long old = *p;
247
248 *p = old & ~mask;
249 return (old & mask) != 0;
250}
251
252static __inline__ int __test_and_change_bit(unsigned long nr,
253 volatile unsigned long *addr)
254{
255 unsigned long mask = BITOP_MASK(nr);
256 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
257 unsigned long old = *p;
258
259 *p = old ^ mask;
260 return (old & mask) != 0;
261}
262
263/*
264 * Return the zero-based bit position (LE, not IBM bit numbering) of
265 * the most significant 1-bit in a double word.
266 */
267static __inline__ int __ilog2(unsigned long x)
268{
269 int lz;
270
271 asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x));
272 return BITS_PER_LONG - 1 - lz;
273}
274
275/*
276 * Determines the bit position of the least significant 0 bit in the
277 * specified double word. The returned bit position will be
278 * zero-based, starting from the right side (63/31 - 0).
279 */
280static __inline__ unsigned long ffz(unsigned long x)
281{
282 /* no zero exists anywhere in the 8 byte area. */
283 if ((x = ~x) == 0)
284 return BITS_PER_LONG;
285
286 /*
287 * Calculate the bit position of the least signficant '1' bit in x
288 * (since x has been changed this will actually be the least signficant
289 * '0' bit in * the original x). Note: (x & -x) gives us a mask that
290 * is the least significant * (RIGHT-most) 1-bit of the value in x.
291 */
292 return __ilog2(x & -x);
293}
294
295static __inline__ int __ffs(unsigned long x)
296{
297 return __ilog2(x & -x);
298}
299
300/*
301 * ffs: find first bit set. This is defined the same way as
302 * the libc and compiler builtin ffs routines, therefore
303 * differs in spirit from the above ffz (man ffs).
304 */
305static __inline__ int ffs(int x)
306{
307 unsigned long i = (unsigned long)x;
308 return __ilog2(i & -i) + 1;
309}
310
311/*
312 * fls: find last (most-significant) bit set.
313 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
314 */
315static __inline__ int fls(unsigned int x)
316{
317 int lz;
318
319 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
320 return 32 - lz;
321}
322
323/*
324 * hweightN: returns the hamming weight (i.e. the number
325 * of bits set) of a N-bit word
326 */
327#define hweight64(x) generic_hweight64(x)
328#define hweight32(x) generic_hweight32(x)
329#define hweight16(x) generic_hweight16(x)
330#define hweight8(x) generic_hweight8(x)
331
332#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
333unsigned long find_next_zero_bit(const unsigned long *addr,
334 unsigned long size, unsigned long offset);
335/**
336 * find_first_bit - find the first set bit in a memory region
337 * @addr: The address to start the search at
338 * @size: The maximum size to search
339 *
340 * Returns the bit-number of the first set bit, not the number of the byte
341 * containing a bit.
342 */
343#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
344unsigned long find_next_bit(const unsigned long *addr,
345 unsigned long size, unsigned long offset);
346
347/* Little-endian versions */
348
349static __inline__ int test_le_bit(unsigned long nr,
350 __const__ unsigned long *addr)
351{
352 __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
353 return (tmp[nr >> 3] >> (nr & 7)) & 1;
354}
355
356#define __set_le_bit(nr, addr) \
357 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
358#define __clear_le_bit(nr, addr) \
359 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
360
361#define test_and_set_le_bit(nr, addr) \
362 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
363#define test_and_clear_le_bit(nr, addr) \
364 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
365
366#define __test_and_set_le_bit(nr, addr) \
367 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
368#define __test_and_clear_le_bit(nr, addr) \
369 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
370
371#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0)
372unsigned long find_next_zero_le_bit(const unsigned long *addr,
373 unsigned long size, unsigned long offset);
374
375/* Bitmap functions for the ext2 filesystem */
376
377#define ext2_set_bit(nr,addr) \
378 __test_and_set_le_bit((nr), (unsigned long*)addr)
379#define ext2_clear_bit(nr, addr) \
380 __test_and_clear_le_bit((nr), (unsigned long*)addr)
381
382#define ext2_set_bit_atomic(lock, nr, addr) \
383 test_and_set_le_bit((nr), (unsigned long*)addr)
384#define ext2_clear_bit_atomic(lock, nr, addr) \
385 test_and_clear_le_bit((nr), (unsigned long*)addr)
386
387#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
388
389#define ext2_find_first_zero_bit(addr, size) \
390 find_first_zero_le_bit((unsigned long*)addr, size)
391#define ext2_find_next_zero_bit(addr, size, off) \
392 find_next_zero_le_bit((unsigned long*)addr, size, off)
393
394/* Bitmap functions for the minix filesystem. */
395
396#define minix_test_and_set_bit(nr,addr) \
397 __test_and_set_le_bit(nr, (unsigned long *)addr)
398#define minix_set_bit(nr,addr) \
399 __set_le_bit(nr, (unsigned long *)addr)
400#define minix_test_and_clear_bit(nr,addr) \
401 __test_and_clear_le_bit(nr, (unsigned long *)addr)
402#define minix_test_bit(nr,addr) \
403 test_le_bit(nr, (unsigned long *)addr)
404
405#define minix_find_first_zero_bit(addr,size) \
406 find_first_zero_le_bit((unsigned long *)addr, size)
407
408/*
409 * Every architecture must define this function. It's the fastest
410 * way of searching a 140-bit bitmap where the first 100 bits are
411 * unlikely to be set. It's guaranteed that at least one of the 140
412 * bits is cleared.
413 */
414static inline int sched_find_first_bit(const unsigned long *b)
415{
416#ifdef CONFIG_PPC64
417 if (unlikely(b[0]))
418 return __ffs(b[0]);
419 if (unlikely(b[1]))
420 return __ffs(b[1]) + 64;
421 return __ffs(b[2]) + 128;
422#else
423 if (unlikely(b[0]))
424 return __ffs(b[0]);
425 if (unlikely(b[1]))
426 return __ffs(b[1]) + 32;
427 if (unlikely(b[2]))
428 return __ffs(b[2]) + 64;
429 if (b[3])
430 return __ffs(b[3]) + 96;
431 return __ffs(b[4]) + 128;
432#endif
433}
434
435#endif /* __KERNEL__ */
436
437#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index e4d028e87020..d625ee55f957 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -12,20 +12,16 @@
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
14#ifdef __powerpc64__ 14#ifdef __powerpc64__
15#define BUG_TABLE_ENTRY(label, line, file, func) \ 15#define BUG_TABLE_ENTRY ".llong"
16 ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n" 16#define BUG_TRAP_OP "tdnei"
17#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
18#define DATA_TYPE long long
19#else 17#else
20#define BUG_TABLE_ENTRY(label, line, file, func) \ 18#define BUG_TABLE_ENTRY ".long"
21 ".long " #label ", " #line ", " #file ", " #func "\n" 19#define BUG_TRAP_OP "twnei"
22#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
23#define DATA_TYPE int
24#endif /* __powerpc64__ */ 20#endif /* __powerpc64__ */
25 21
26struct bug_entry { 22struct bug_entry {
27 unsigned long bug_addr; 23 unsigned long bug_addr;
28 int line; 24 long line;
29 const char *file; 25 const char *file;
30 const char *function; 26 const char *function;
31}; 27};
@@ -43,29 +39,29 @@ struct bug_entry *find_bug(unsigned long bugaddr);
43#define BUG() do { \ 39#define BUG() do { \
44 __asm__ __volatile__( \ 40 __asm__ __volatile__( \
45 "1: twi 31,0,0\n" \ 41 "1: twi 31,0,0\n" \
46 ".section __bug_table,\"a\"\n\t" \ 42 ".section __bug_table,\"a\"\n" \
47 BUG_TABLE_ENTRY(1b,%0,%1,%2) \ 43 "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \
48 ".previous" \ 44 ".previous" \
49 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ 45 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
50} while (0) 46} while (0)
51 47
52#define BUG_ON(x) do { \ 48#define BUG_ON(x) do { \
53 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
54 TRAP_OP(%0,0) \ 50 "1: "BUG_TRAP_OP" %0,0\n" \
55 ".section __bug_table,\"a\"\n\t" \ 51 ".section __bug_table,\"a\"\n" \
56 BUG_TABLE_ENTRY(1b,%1,%2,%3) \ 52 "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
57 ".previous" \ 53 ".previous" \
58 : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \ 54 : : "r" ((long)(x)), "i" (__LINE__), \
59 "i" (__FILE__), "i" (__FUNCTION__)); \ 55 "i" (__FILE__), "i" (__FUNCTION__)); \
60} while (0) 56} while (0)
61 57
62#define WARN_ON(x) do { \ 58#define WARN_ON(x) do { \
63 __asm__ __volatile__( \ 59 __asm__ __volatile__( \
64 TRAP_OP(%0,0) \ 60 "1: "BUG_TRAP_OP" %0,0\n" \
65 ".section __bug_table,\"a\"\n\t" \ 61 ".section __bug_table,\"a\"\n" \
66 BUG_TABLE_ENTRY(1b,%1,%2,%3) \ 62 "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
67 ".previous" \ 63 ".previous" \
68 : : "r" ((DATA_TYPE)(x)), \ 64 : : "r" ((long)(x)), \
69 "i" (__LINE__ + BUG_WARNING_TRAP), \ 65 "i" (__LINE__ + BUG_WARNING_TRAP), \
70 "i" (__FILE__), "i" (__FUNCTION__)); \ 66 "i" (__FILE__), "i" (__FUNCTION__)); \
71} while (0) 67} while (0)
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
new file mode 100644
index 000000000000..37c94e52ab6d
--- /dev/null
+++ b/include/asm-powerpc/futex.h
@@ -0,0 +1,84 @@
1#ifndef _ASM_POWERPC_FUTEX_H
2#define _ASM_POWERPC_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/synch.h>
9#include <asm/uaccess.h>
10#include <asm/ppc_asm.h>
11
12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
13 __asm__ __volatile ( \
14 SYNC_ON_SMP \
15"1: lwarx %0,0,%2\n" \
16 insn \
17"2: stwcx. %1,0,%2\n" \
18 "bne- 1b\n" \
19 "li %1,0\n" \
20"3: .section .fixup,\"ax\"\n" \
21"4: li %1,%3\n" \
22 "b 3b\n" \
23 ".previous\n" \
24 ".section __ex_table,\"a\"\n" \
25 ".align 3\n" \
26 DATAL " 1b,4b,2b,4b\n" \
27 ".previous" \
28 : "=&r" (oldval), "=&r" (ret) \
29 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
30 : "cr0", "memory")
31
32static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
33{
34 int op = (encoded_op >> 28) & 7;
35 int cmp = (encoded_op >> 24) & 15;
36 int oparg = (encoded_op << 8) >> 20;
37 int cmparg = (encoded_op << 20) >> 20;
38 int oldval = 0, ret;
39 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
40 oparg = 1 << oparg;
41
42 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
43 return -EFAULT;
44
45 inc_preempt_count();
46
47 switch (op) {
48 case FUTEX_OP_SET:
49 __futex_atomic_op("", ret, oldval, uaddr, oparg);
50 break;
51 case FUTEX_OP_ADD:
52 __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
53 break;
54 case FUTEX_OP_OR:
55 __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
56 break;
57 case FUTEX_OP_ANDN:
58 __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
59 break;
60 case FUTEX_OP_XOR:
61 __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
62 break;
63 default:
64 ret = -ENOSYS;
65 }
66
67 dec_preempt_count();
68
69 if (!ret) {
70 switch (cmp) {
71 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
72 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
73 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
74 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
75 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
76 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
77 default: ret = -ENOSYS;
78 }
79 }
80 return ret;
81}
82
83#endif /* __KERNEL__ */
84#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/include/asm-powerpc/ioctls.h b/include/asm-powerpc/ioctls.h
index 5b94ff489b8b..279a6229584b 100644
--- a/include/asm-powerpc/ioctls.h
+++ b/include/asm-powerpc/ioctls.h
@@ -62,6 +62,9 @@
62# define TIOCM_DSR 0x100 62# define TIOCM_DSR 0x100
63# define TIOCM_CD TIOCM_CAR 63# define TIOCM_CD TIOCM_CAR
64# define TIOCM_RI TIOCM_RNG 64# define TIOCM_RI TIOCM_RNG
65#define TIOCM_OUT1 0x2000
66#define TIOCM_OUT2 0x4000
67#define TIOCM_LOOP 0x8000
65 68
66#define TIOCGSOFTCAR 0x5419 69#define TIOCGSOFTCAR 0x5419
67#define TIOCSSOFTCAR 0x541A 70#define TIOCSSOFTCAR 0x541A
diff --git a/include/asm-powerpc/ipcbuf.h b/include/asm-powerpc/ipcbuf.h
new file mode 100644
index 000000000000..71382c1ec6e3
--- /dev/null
+++ b/include/asm-powerpc/ipcbuf.h
@@ -0,0 +1,33 @@
1#ifndef _ASM_POWERPC_IPCBUF_H
2#define _ASM_POWERPC_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for the powerpc is identical to
6 * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
7 * kernel. Note extra padding because this structure is passed back
8 * and forth between kernel and user space. Pad space is left for:
9 * - 1 32-bit value to fill up for 8-byte alignment
10 * - 2 miscellaneous 64-bit values
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/types.h>
19
20struct ipc64_perm
21{
22 __kernel_key_t key;
23 __kernel_uid_t uid;
24 __kernel_gid_t gid;
25 __kernel_uid_t cuid;
26 __kernel_gid_t cgid;
27 __kernel_mode_t mode;
28 unsigned int seq;
29 unsigned int __pad1;
30 __u32 __unused[4];
31};
32
33#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index c7c3f912a3c2..b3935ea28fff 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -73,7 +73,7 @@ extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
73#define IC_INVALID 0 73#define IC_INVALID 0
74#define IC_OPEN_PIC 1 74#define IC_OPEN_PIC 1
75#define IC_PPC_XIC 2 75#define IC_PPC_XIC 2
76#define IC_BPA_IIC 3 76#define IC_CELL_PIC 3
77#define IC_ISERIES 4 77#define IC_ISERIES 4
78 78
79extern u64 ppc64_interrupt_controller; 79extern u64 ppc64_interrupt_controller;
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index f99f2af82ca5..c534ca41224b 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -506,6 +506,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
506#else 506#else
507 #define __ASM_CONST(x) x##UL 507 #define __ASM_CONST(x) x##UL
508 #define ASM_CONST(x) __ASM_CONST(x) 508 #define ASM_CONST(x) __ASM_CONST(x)
509
510#ifdef CONFIG_PPC64
511#define DATAL ".llong"
512#else
513#define DATAL ".long"
514#endif
515
509#endif /* __ASSEMBLY__ */ 516#endif /* __ASSEMBLY__ */
510 517
511#endif /* _ASM_POWERPC_PPC_ASM_H */ 518#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index eee954a001fd..1dc4bf7b52b3 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -70,7 +70,7 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
70#define PLATFORM_LPAR 0x0001 70#define PLATFORM_LPAR 0x0001
71#define PLATFORM_POWERMAC 0x0400 71#define PLATFORM_POWERMAC 0x0400
72#define PLATFORM_MAPLE 0x0500 72#define PLATFORM_MAPLE 0x0500
73#define PLATFORM_BPA 0x1000 73#define PLATFORM_CELL 0x1000
74 74
75/* Compatibility with drivers coming from PPC32 world */ 75/* Compatibility with drivers coming from PPC32 world */
76#define _machine (systemcfg->platform) 76#define _machine (systemcfg->platform)
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index 2c050332471d..d9fd7866927f 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -171,6 +171,9 @@ struct flash_block_list_header { /* just the header of flash_block_list */
171 struct flash_block_list *next; 171 struct flash_block_list *next;
172}; 172};
173extern struct flash_block_list_header rtas_firmware_flash_list; 173extern struct flash_block_list_header rtas_firmware_flash_list;
174void rtas_fw_restart(char *cmd);
175void rtas_fw_power_off(void);
176void rtas_fw_halt(void);
174 177
175extern struct rtas_t rtas; 178extern struct rtas_t rtas;
176 179
diff --git a/include/asm-powerpc/termios.h b/include/asm-powerpc/termios.h
index c5b8e5358f83..7f80a019b6a0 100644
--- a/include/asm-powerpc/termios.h
+++ b/include/asm-powerpc/termios.h
@@ -94,142 +94,9 @@ struct termio {
94#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" 94#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
95#endif 95#endif
96 96
97#define FIOCLEX _IO('f', 1)
98#define FIONCLEX _IO('f', 2)
99#define FIOASYNC _IOW('f', 125, int)
100#define FIONBIO _IOW('f', 126, int)
101#define FIONREAD _IOR('f', 127, int)
102#define TIOCINQ FIONREAD
103
104#define TIOCGETP _IOR('t', 8, struct sgttyb)
105#define TIOCSETP _IOW('t', 9, struct sgttyb)
106#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
107
108#define TIOCSETC _IOW('t', 17, struct tchars)
109#define TIOCGETC _IOR('t', 18, struct tchars)
110#define TCGETS _IOR('t', 19, struct termios)
111#define TCSETS _IOW('t', 20, struct termios)
112#define TCSETSW _IOW('t', 21, struct termios)
113#define TCSETSF _IOW('t', 22, struct termios)
114
115#define TCGETA _IOR('t', 23, struct termio)
116#define TCSETA _IOW('t', 24, struct termio)
117#define TCSETAW _IOW('t', 25, struct termio)
118#define TCSETAF _IOW('t', 28, struct termio)
119
120#define TCSBRK _IO('t', 29)
121#define TCXONC _IO('t', 30)
122#define TCFLSH _IO('t', 31)
123
124#define TIOCSWINSZ _IOW('t', 103, struct winsize)
125#define TIOCGWINSZ _IOR('t', 104, struct winsize)
126#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
127#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
128#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
129
130#define TIOCGLTC _IOR('t', 116, struct ltchars)
131#define TIOCSLTC _IOW('t', 117, struct ltchars)
132#define TIOCSPGRP _IOW('t', 118, int)
133#define TIOCGPGRP _IOR('t', 119, int)
134
135#define TIOCEXCL 0x540C
136#define TIOCNXCL 0x540D
137#define TIOCSCTTY 0x540E
138
139#define TIOCSTI 0x5412
140#define TIOCMGET 0x5415
141#define TIOCMBIS 0x5416
142#define TIOCMBIC 0x5417
143#define TIOCMSET 0x5418
144#define TIOCGSOFTCAR 0x5419
145#define TIOCSSOFTCAR 0x541A
146#define TIOCLINUX 0x541C
147#define TIOCCONS 0x541D
148#define TIOCGSERIAL 0x541E
149#define TIOCSSERIAL 0x541F
150#define TIOCPKT 0x5420
151
152#define TIOCNOTTY 0x5422
153#define TIOCSETD 0x5423
154#define TIOCGETD 0x5424
155#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
156
157#define TIOCSERCONFIG 0x5453
158#define TIOCSERGWILD 0x5454
159#define TIOCSERSWILD 0x5455
160#define TIOCGLCKTRMIOS 0x5456
161#define TIOCSLCKTRMIOS 0x5457
162#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
163#define TIOCSERGETLSR 0x5459 /* Get line status register */
164#define TIOCSERGETMULTI 0x545A /* Get multiport config */
165#define TIOCSERSETMULTI 0x545B /* Set multiport config */
166
167#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
168#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
169
170/* Used for packet mode */
171#define TIOCPKT_DATA 0
172#define TIOCPKT_FLUSHREAD 1
173#define TIOCPKT_FLUSHWRITE 2
174#define TIOCPKT_STOP 4
175#define TIOCPKT_START 8
176#define TIOCPKT_NOSTOP 16
177#define TIOCPKT_DOSTOP 32
178
179/* modem lines */
180#define TIOCM_LE 0x001
181#define TIOCM_DTR 0x002
182#define TIOCM_RTS 0x004
183#define TIOCM_ST 0x008
184#define TIOCM_SR 0x010
185#define TIOCM_CTS 0x020
186#define TIOCM_CAR 0x040
187#define TIOCM_RNG 0x080
188#define TIOCM_DSR 0x100
189#define TIOCM_CD TIOCM_CAR
190#define TIOCM_RI TIOCM_RNG
191#define TIOCM_OUT1 0x2000
192#define TIOCM_OUT2 0x4000
193#define TIOCM_LOOP 0x8000
194
195/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
196#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
197
198#ifdef __KERNEL__ 97#ifdef __KERNEL__
199 98
200/* 99#include <asm-generic/termios.h>
201 * Translate a "termio" structure into a "termios". Ugh.
202 */
203#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
204 unsigned short __tmp; \
205 get_user(__tmp,&(termio)->x); \
206 (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \
207}
208
209#define user_termio_to_kernel_termios(termios, termio) \
210({ \
211 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
212 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
213 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
214 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
215 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
216})
217
218/*
219 * Translate a "termios" structure into a "termio". Ugh.
220 */
221#define kernel_termios_to_user_termio(termio, termios) \
222({ \
223 put_user((termios)->c_iflag, &(termio)->c_iflag); \
224 put_user((termios)->c_oflag, &(termio)->c_oflag); \
225 put_user((termios)->c_cflag, &(termio)->c_cflag); \
226 put_user((termios)->c_lflag, &(termio)->c_lflag); \
227 put_user((termios)->c_line, &(termio)->c_line); \
228 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
229})
230
231#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
232#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
233 100
234#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
235 102
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
new file mode 100644
index 000000000000..33af730f0d19
--- /dev/null
+++ b/include/asm-powerpc/uaccess.h
@@ -0,0 +1,468 @@
1#ifndef _ARCH_POWERPC_UACCESS_H
2#define _ARCH_POWERPC_UACCESS_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/sched.h>
8#include <linux/errno.h>
9#include <asm/processor.h>
10
11#define VERIFY_READ 0
12#define VERIFY_WRITE 1
13
14/*
15 * The fs value determines whether argument validity checking should be
16 * performed or not. If get_fs() == USER_DS, checking is performed, with
17 * get_fs() == KERNEL_DS, checking is bypassed.
18 *
19 * For historical reasons, these macros are grossly misnamed.
20 *
21 * The fs/ds values are now the highest legal address in the "segment".
22 * This simplifies the checking in the routines below.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(~0UL)
28#ifdef __powerpc64__
29/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
30#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
31#else
32#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
33#endif
34
35#define get_ds() (KERNEL_DS)
36#define get_fs() (current->thread.fs)
37#define set_fs(val) (current->thread.fs = (val))
38
39#define segment_eq(a, b) ((a).seg == (b).seg)
40
41#ifdef __powerpc64__
42/*
43 * This check is sufficient because there is a large enough
44 * gap between user addresses and the kernel addresses
45 */
46#define __access_ok(addr, size, segment) \
47 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
48
49#else
50
51#define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && \
53 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
54
55#endif
56
57#define access_ok(type, addr, size) \
58 (__chk_user_ptr(addr), \
59 __access_ok((__force unsigned long)(addr), (size), get_fs()))
60
61/*
62 * The exception table consists of pairs of addresses: the first is the
63 * address of an instruction that is allowed to fault, and the second is
64 * the address at which the program should continue. No registers are
65 * modified, so it is entirely up to the continuation code to figure out
66 * what to do.
67 *
68 * All the routines below use bits of fixup code that are out of line
69 * with the main instruction path. This means when everything is well,
70 * we don't even have to jump over them. Further, they do not intrude
71 * on our cache or tlb entries.
72 */
73
74struct exception_table_entry {
75 unsigned long insn;
76 unsigned long fixup;
77};
78
79/*
80 * These are the main single-value transfer routines. They automatically
81 * use the right size if we just have the right pointer type.
82 *
83 * This gets kind of ugly. We want to return _two_ values in "get_user()"
84 * and yet we don't want to do any pointers, because that is too much
85 * of a performance impact. Thus we have a few rather ugly macros here,
86 * and hide all the ugliness from the user.
87 *
88 * The "__xxx" versions of the user access functions are versions that
89 * do not verify the address space, that must have been done previously
90 * with a separate "access_ok()" call (this is used when we do multiple
91 * accesses to the same area of user memory).
92 *
93 * As we use the same address space for kernel and user data on the
94 * PowerPC, we can just do these as direct assignments. (Of course, the
95 * exception handling means that it's no longer "just"...)
96 *
97 * The "user64" versions of the user access functions are versions that
98 * allow access of 64-bit data. The "get_user" functions do not
99 * properly handle 64-bit data because the value gets down cast to a long.
100 * The "put_user" functions already handle 64-bit data properly but we add
101 * "user64" versions for completeness
102 */
103#define get_user(x, ptr) \
104 __get_user_check((x), (ptr), sizeof(*(ptr)))
105#define put_user(x, ptr) \
106 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
107
108#define __get_user(x, ptr) \
109 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
110#define __put_user(x, ptr) \
111 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
112#ifndef __powerpc64__
113#define __get_user64(x, ptr) \
114 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
115#define __put_user64(x, ptr) __put_user(x, ptr)
116#endif
117
118#define __get_user_unaligned __get_user
119#define __put_user_unaligned __put_user
120
121extern long __put_user_bad(void);
122
123#ifdef __powerpc64__
124#define __EX_TABLE_ALIGN "3"
125#define __EX_TABLE_TYPE "llong"
126#else
127#define __EX_TABLE_ALIGN "2"
128#define __EX_TABLE_TYPE "long"
129#endif
130
131/*
132 * We don't tell gcc that we are accessing memory, but this is OK
133 * because we do not write to any memory gcc knows about, so there
134 * are no aliasing issues.
135 */
136#define __put_user_asm(x, addr, err, op) \
137 __asm__ __volatile__( \
138 "1: " op " %1,0(%2) # put_user\n" \
139 "2:\n" \
140 ".section .fixup,\"ax\"\n" \
141 "3: li %0,%3\n" \
142 " b 2b\n" \
143 ".previous\n" \
144 ".section __ex_table,\"a\"\n" \
145 " .align " __EX_TABLE_ALIGN "\n" \
146 " ."__EX_TABLE_TYPE" 1b,3b\n" \
147 ".previous" \
148 : "=r" (err) \
149 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
150
151#ifdef __powerpc64__
152#define __put_user_asm2(x, ptr, retval) \
153 __put_user_asm(x, ptr, retval, "std")
154#else /* __powerpc64__ */
155#define __put_user_asm2(x, addr, err) \
156 __asm__ __volatile__( \
157 "1: stw %1,0(%2)\n" \
158 "2: stw %1+1,4(%2)\n" \
159 "3:\n" \
160 ".section .fixup,\"ax\"\n" \
161 "4: li %0,%3\n" \
162 " b 3b\n" \
163 ".previous\n" \
164 ".section __ex_table,\"a\"\n" \
165 " .align " __EX_TABLE_ALIGN "\n" \
166 " ." __EX_TABLE_TYPE " 1b,4b\n" \
167 " ." __EX_TABLE_TYPE " 2b,4b\n" \
168 ".previous" \
169 : "=r" (err) \
170 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
171#endif /* __powerpc64__ */
172
173#define __put_user_size(x, ptr, size, retval) \
174do { \
175 retval = 0; \
176 switch (size) { \
177 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
178 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
179 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
180 case 8: __put_user_asm2(x, ptr, retval); break; \
181 default: __put_user_bad(); \
182 } \
183} while (0)
184
185#define __put_user_nocheck(x, ptr, size) \
186({ \
187 long __pu_err; \
188 might_sleep(); \
189 __chk_user_ptr(ptr); \
190 __put_user_size((x), (ptr), (size), __pu_err); \
191 __pu_err; \
192})
193
194#define __put_user_check(x, ptr, size) \
195({ \
196 long __pu_err = -EFAULT; \
197 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
198 might_sleep(); \
199 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
200 __put_user_size((x), __pu_addr, (size), __pu_err); \
201 __pu_err; \
202})
203
204extern long __get_user_bad(void);
205
206#define __get_user_asm(x, addr, err, op) \
207 __asm__ __volatile__( \
208 "1: "op" %1,0(%2) # get_user\n" \
209 "2:\n" \
210 ".section .fixup,\"ax\"\n" \
211 "3: li %0,%3\n" \
212 " li %1,0\n" \
213 " b 2b\n" \
214 ".previous\n" \
215 ".section __ex_table,\"a\"\n" \
216 " .align "__EX_TABLE_ALIGN "\n" \
217 " ." __EX_TABLE_TYPE " 1b,3b\n" \
218 ".previous" \
219 : "=r" (err), "=r" (x) \
220 : "b" (addr), "i" (-EFAULT), "0" (err))
221
222#ifdef __powerpc64__
223#define __get_user_asm2(x, addr, err) \
224 __get_user_asm(x, addr, err, "ld")
225#else /* __powerpc64__ */
226#define __get_user_asm2(x, addr, err) \
227 __asm__ __volatile__( \
228 "1: lwz %1,0(%2)\n" \
229 "2: lwz %1+1,4(%2)\n" \
230 "3:\n" \
231 ".section .fixup,\"ax\"\n" \
232 "4: li %0,%3\n" \
233 " li %1,0\n" \
234 " li %1+1,0\n" \
235 " b 3b\n" \
236 ".previous\n" \
237 ".section __ex_table,\"a\"\n" \
238 " .align " __EX_TABLE_ALIGN "\n" \
239 " ." __EX_TABLE_TYPE " 1b,4b\n" \
240 " ." __EX_TABLE_TYPE " 2b,4b\n" \
241 ".previous" \
242 : "=r" (err), "=&r" (x) \
243 : "b" (addr), "i" (-EFAULT), "0" (err))
244#endif /* __powerpc64__ */
245
246#define __get_user_size(x, ptr, size, retval) \
247do { \
248 retval = 0; \
249 __chk_user_ptr(ptr); \
250 if (size > sizeof(x)) \
251 (x) = __get_user_bad(); \
252 switch (size) { \
253 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
254 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
255 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
256 case 8: __get_user_asm2(x, ptr, retval); break; \
257 default: (x) = __get_user_bad(); \
258 } \
259} while (0)
260
261#define __get_user_nocheck(x, ptr, size) \
262({ \
263 long __gu_err; \
264 unsigned long __gu_val; \
265 __chk_user_ptr(ptr); \
266 might_sleep(); \
267 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
268 (x) = (__typeof__(*(ptr)))__gu_val; \
269 __gu_err; \
270})
271
272#ifndef __powerpc64__
273#define __get_user64_nocheck(x, ptr, size) \
274({ \
275 long __gu_err; \
276 long long __gu_val; \
277 __chk_user_ptr(ptr); \
278 might_sleep(); \
279 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
280 (x) = (__typeof__(*(ptr)))__gu_val; \
281 __gu_err; \
282})
283#endif /* __powerpc64__ */
284
285#define __get_user_check(x, ptr, size) \
286({ \
287 long __gu_err = -EFAULT; \
288 unsigned long __gu_val = 0; \
289 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
290 might_sleep(); \
291 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
292 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
293 (x) = (__typeof__(*(ptr)))__gu_val; \
294 __gu_err; \
295})
296
297/* more complex routines */
298
299extern unsigned long __copy_tofrom_user(void __user *to,
300 const void __user *from, unsigned long size);
301
302#ifndef __powerpc64__
303
304extern inline unsigned long copy_from_user(void *to,
305 const void __user *from, unsigned long n)
306{
307 unsigned long over;
308
309 if (access_ok(VERIFY_READ, from, n))
310 return __copy_tofrom_user((__force void __user *)to, from, n);
311 if ((unsigned long)from < TASK_SIZE) {
312 over = (unsigned long)from + n - TASK_SIZE;
313 return __copy_tofrom_user((__force void __user *)to, from,
314 n - over) + over;
315 }
316 return n;
317}
318
319extern inline unsigned long copy_to_user(void __user *to,
320 const void *from, unsigned long n)
321{
322 unsigned long over;
323
324 if (access_ok(VERIFY_WRITE, to, n))
325 return __copy_tofrom_user(to, (__force void __user *)from, n);
326 if ((unsigned long)to < TASK_SIZE) {
327 over = (unsigned long)to + n - TASK_SIZE;
328 return __copy_tofrom_user(to, (__force void __user *)from,
329 n - over) + over;
330 }
331 return n;
332}
333
334#else /* __powerpc64__ */
335
336#define __copy_in_user(to, from, size) \
337 __copy_tofrom_user((to), (from), (size))
338
339extern unsigned long copy_from_user(void *to, const void __user *from,
340 unsigned long n);
341extern unsigned long copy_to_user(void __user *to, const void *from,
342 unsigned long n);
343extern unsigned long copy_in_user(void __user *to, const void __user *from,
344 unsigned long n);
345
346#endif /* __powerpc64__ */
347
348static inline unsigned long __copy_from_user_inatomic(void *to,
349 const void __user *from, unsigned long n)
350{
351 if (__builtin_constant_p(n) && (n <= 8)) {
352 unsigned long ret;
353
354 switch (n) {
355 case 1:
356 __get_user_size(*(u8 *)to, from, 1, ret);
357 break;
358 case 2:
359 __get_user_size(*(u16 *)to, from, 2, ret);
360 break;
361 case 4:
362 __get_user_size(*(u32 *)to, from, 4, ret);
363 break;
364 case 8:
365 __get_user_size(*(u64 *)to, from, 8, ret);
366 break;
367 }
368 if (ret == 0)
369 return 0;
370 }
371 return __copy_tofrom_user((__force void __user *)to, from, n);
372}
373
374static inline unsigned long __copy_to_user_inatomic(void __user *to,
375 const void *from, unsigned long n)
376{
377 if (__builtin_constant_p(n) && (n <= 8)) {
378 unsigned long ret;
379
380 switch (n) {
381 case 1:
382 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
383 break;
384 case 2:
385 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
386 break;
387 case 4:
388 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
389 break;
390 case 8:
391 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
392 break;
393 }
394 if (ret == 0)
395 return 0;
396 }
397 return __copy_tofrom_user(to, (__force const void __user *)from, n);
398}
399
400static inline unsigned long __copy_from_user(void *to,
401 const void __user *from, unsigned long size)
402{
403 might_sleep();
404 return __copy_from_user_inatomic(to, from, size);
405}
406
407static inline unsigned long __copy_to_user(void __user *to,
408 const void *from, unsigned long size)
409{
410 might_sleep();
411 return __copy_to_user_inatomic(to, from, size);
412}
413
414extern unsigned long __clear_user(void __user *addr, unsigned long size);
415
416static inline unsigned long clear_user(void __user *addr, unsigned long size)
417{
418 might_sleep();
419 if (likely(access_ok(VERIFY_WRITE, addr, size)))
420 return __clear_user(addr, size);
421 if ((unsigned long)addr < TASK_SIZE) {
422 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
423 return __clear_user(addr, size - over) + over;
424 }
425 return size;
426}
427
428extern int __strncpy_from_user(char *dst, const char __user *src, long count);
429
430static inline long strncpy_from_user(char *dst, const char __user *src,
431 long count)
432{
433 might_sleep();
434 if (likely(access_ok(VERIFY_READ, src, 1)))
435 return __strncpy_from_user(dst, src, count);
436 return -EFAULT;
437}
438
439/*
440 * Return the size of a string (including the ending 0)
441 *
442 * Return 0 for error
443 */
444extern int __strnlen_user(const char __user *str, long len, unsigned long top);
445
446/*
447 * Returns the length of the string at str (including the null byte),
448 * or 0 if we hit a page we can't access,
449 * or something > len if we didn't find a null byte.
450 *
451 * The `top' parameter to __strnlen_user is to make sure that
452 * we can never overflow from the user area into kernel space.
453 */
454static inline int strnlen_user(const char __user *str, long len)
455{
456 unsigned long top = current->thread.fs.seg;
457
458 if ((unsigned long)str > top)
459 return 0;
460 return __strnlen_user(str, len, top);
461}
462
463#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
464
465#endif /* __ASSEMBLY__ */
466#endif /* __KERNEL__ */
467
468#endif /* _ARCH_POWERPC_UACCESS_H */