aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 19:27:50 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 19:27:50 -0500
commit602d4a7e2f4b843d1a67375d4d7104073495b758 (patch)
tree0b9f184e54fa693c27bd5986c114bdcf6949f788 /include
parent0bbacc402e67abca8794a8401c1621dc0c0202e9 (diff)
parentc51e3a417bb0f295e13a5bad86302b5212eafdf3 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc-merge
Diffstat (limited to 'include')
-rw-r--r--include/asm-powerpc/bitops.h437
-rw-r--r--include/asm-powerpc/bug.h34
-rw-r--r--include/asm-powerpc/elf.h22
-rw-r--r--include/asm-powerpc/futex.h (renamed from include/asm-ppc64/futex.h)45
-rw-r--r--include/asm-powerpc/ioctls.h3
-rw-r--r--include/asm-powerpc/ipcbuf.h34
-rw-r--r--include/asm-powerpc/irq.h2
-rw-r--r--include/asm-powerpc/iseries/hv_call.h (renamed from include/asm-ppc64/iSeries/HvCall.h)10
-rw-r--r--include/asm-powerpc/iseries/hv_call_event.h (renamed from include/asm-ppc64/iSeries/HvCallEvent.h)10
-rw-r--r--include/asm-powerpc/iseries/hv_call_sc.h (renamed from include/asm-ppc64/iSeries/HvCallSc.h)6
-rw-r--r--include/asm-powerpc/iseries/hv_call_xm.h (renamed from include/asm-ppc64/iSeries/HvCallXm.h)10
-rw-r--r--include/asm-powerpc/iseries/hv_lp_config.h (renamed from include/asm-ppc64/iSeries/HvLpConfig.h)12
-rw-r--r--include/asm-powerpc/iseries/hv_lp_event.h (renamed from include/asm-ppc64/iSeries/HvLpEvent.h)10
-rw-r--r--include/asm-powerpc/iseries/hv_types.h (renamed from include/asm-ppc64/iSeries/HvTypes.h)6
-rw-r--r--include/asm-powerpc/iseries/iseries_io.h (renamed from include/asm-ppc64/iSeries/iSeries_io.h)6
-rw-r--r--include/asm-powerpc/iseries/it_exp_vpd_panel.h (renamed from include/asm-ppc64/iSeries/ItExtVpdPanel.h)6
-rw-r--r--include/asm-powerpc/iseries/it_lp_naca.h (renamed from include/asm-ppc64/iSeries/ItLpNaca.h)6
-rw-r--r--include/asm-powerpc/iseries/it_lp_queue.h (renamed from include/asm-ppc64/iSeries/ItLpQueue.h)6
-rw-r--r--include/asm-powerpc/iseries/it_lp_reg_save.h (renamed from include/asm-ppc64/iSeries/ItLpRegSave.h)4
-rw-r--r--include/asm-powerpc/iseries/lpar_map.h (renamed from include/asm-ppc64/iSeries/LparMap.h)6
-rw-r--r--include/asm-powerpc/iseries/mf.h (renamed from include/asm-ppc64/iSeries/mf.h)10
-rw-r--r--include/asm-powerpc/iseries/vio.h (renamed from include/asm-ppc64/iSeries/vio.h)10
-rw-r--r--include/asm-powerpc/kexec.h49
-rw-r--r--include/asm-powerpc/machdep.h1
-rw-r--r--include/asm-powerpc/numnodes.h7
-rw-r--r--include/asm-powerpc/ppc_asm.h7
-rw-r--r--include/asm-powerpc/processor.h2
-rw-r--r--include/asm-powerpc/ptrace.h (renamed from include/asm-ppc64/ptrace.h)143
-rw-r--r--include/asm-powerpc/rtas.h25
-rw-r--r--include/asm-powerpc/sigcontext.h (renamed from include/asm-ppc64/sigcontext.h)41
-rw-r--r--include/asm-powerpc/smp.h (renamed from include/asm-ppc64/smp.h)45
-rw-r--r--include/asm-powerpc/sparsemem.h (renamed from include/asm-ppc64/sparsemem.h)6
-rw-r--r--include/asm-powerpc/stat.h81
-rw-r--r--include/asm-powerpc/system.h48
-rw-r--r--include/asm-powerpc/termios.h135
-rw-r--r--include/asm-powerpc/time.h2
-rw-r--r--include/asm-powerpc/tlb.h (renamed from include/asm-ppc/tlb.h)59
-rw-r--r--include/asm-powerpc/tlbflush.h146
-rw-r--r--include/asm-powerpc/uaccess.h468
-rw-r--r--include/asm-powerpc/ucontext.h40
-rw-r--r--include/asm-ppc/bitops.h460
-rw-r--r--include/asm-ppc/commproc.h2
-rw-r--r--include/asm-ppc/futex.h53
-rw-r--r--include/asm-ppc/ipcbuf.h29
-rw-r--r--include/asm-ppc/kexec.h40
-rw-r--r--include/asm-ppc/ptrace.h152
-rw-r--r--include/asm-ppc/sigcontext.h15
-rw-r--r--include/asm-ppc/stat.h69
-rw-r--r--include/asm-ppc/tlbflush.h115
-rw-r--r--include/asm-ppc/uaccess.h393
-rw-r--r--include/asm-ppc/ucontext.h27
-rw-r--r--include/asm-ppc64/bitops.h360
-rw-r--r--include/asm-ppc64/dart.h59
-rw-r--r--include/asm-ppc64/io.h2
-rw-r--r--include/asm-ppc64/ipcbuf.h28
-rw-r--r--include/asm-ppc64/kexec.h41
-rw-r--r--include/asm-ppc64/mmu_context.h15
-rw-r--r--include/asm-ppc64/naca.h24
-rw-r--r--include/asm-ppc64/numnodes.h7
-rw-r--r--include/asm-ppc64/nvram.h2
-rw-r--r--include/asm-ppc64/paca.h2
-rw-r--r--include/asm-ppc64/plpar_wrappers.h120
-rw-r--r--include/asm-ppc64/ppc32.h122
-rw-r--r--include/asm-ppc64/spinlock.h2
-rw-r--r--include/asm-ppc64/stat.h60
-rw-r--r--include/asm-ppc64/tlb.h39
-rw-r--r--include/asm-ppc64/tlbflush.h52
-rw-r--r--include/asm-ppc64/uaccess.h341
-rw-r--r--include/asm-ppc64/ucontext.h22
69 files changed, 1629 insertions, 3024 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
new file mode 100644
index 000000000000..dc25c53704d5
--- /dev/null
+++ b/include/asm-powerpc/bitops.h
@@ -0,0 +1,437 @@
1/*
2 * PowerPC atomic bit operations.
3 *
4 * Merged version by David Gibson <david@gibson.dropbear.id.au>.
5 * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
6 * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
7 * originally took it from the ppc32 code.
8 *
9 * Within a word, bits are numbered LSB first. Lot's of places make
10 * this assumption by directly testing bits with (val & (1<<nr)).
11 * This can cause confusion for large (> 1 word) bitmaps on a
12 * big-endian system because, unlike little endian, the number of each
13 * bit depends on the word size.
14 *
15 * The bitop functions are defined to work on unsigned longs, so for a
16 * ppc64 system the bits end up numbered:
17 * |63..............0|127............64|191...........128|255...........196|
18 * and on ppc32:
19 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
20 *
21 * There are a few little-endian macros used mostly for filesystem
22 * bitmaps, these work on similar bit arrays layouts, but
23 * byte-oriented:
24 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
25 *
26 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
27 * number field needs to be reversed compared to the big-endian bit
28 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
29 *
30 * This program is free software; you can redistribute it and/or
31 * modify it under the terms of the GNU General Public License
32 * as published by the Free Software Foundation; either version
33 * 2 of the License, or (at your option) any later version.
34 */
35
36#ifndef _ASM_POWERPC_BITOPS_H
37#define _ASM_POWERPC_BITOPS_H
38
39#ifdef __KERNEL__
40
41#include <linux/compiler.h>
42#include <asm/atomic.h>
43#include <asm/synch.h>
44
45/*
46 * clear_bit doesn't imply a memory barrier
47 */
48#define smp_mb__before_clear_bit() smp_mb()
49#define smp_mb__after_clear_bit() smp_mb()
50
51#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
52#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
53#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
54
55#ifdef CONFIG_PPC64
56#define LARXL "ldarx"
57#define STCXL "stdcx."
58#define CNTLZL "cntlzd"
59#else
60#define LARXL "lwarx"
61#define STCXL "stwcx."
62#define CNTLZL "cntlzw"
63#endif
64
65static __inline__ void set_bit(int nr, volatile unsigned long *addr)
66{
67 unsigned long old;
68 unsigned long mask = BITOP_MASK(nr);
69 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
70
71 __asm__ __volatile__(
72"1:" LARXL " %0,0,%3 # set_bit\n"
73 "or %0,%0,%2\n"
74 PPC405_ERR77(0,%3)
75 STCXL " %0,0,%3\n"
76 "bne- 1b"
77 : "=&r"(old), "=m"(*p)
78 : "r"(mask), "r"(p), "m"(*p)
79 : "cc" );
80}
81
82static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
83{
84 unsigned long old;
85 unsigned long mask = BITOP_MASK(nr);
86 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
87
88 __asm__ __volatile__(
89"1:" LARXL " %0,0,%3 # set_bit\n"
90 "andc %0,%0,%2\n"
91 PPC405_ERR77(0,%3)
92 STCXL " %0,0,%3\n"
93 "bne- 1b"
94 : "=&r"(old), "=m"(*p)
95 : "r"(mask), "r"(p), "m"(*p)
96 : "cc" );
97}
98
99static __inline__ void change_bit(int nr, volatile unsigned long *addr)
100{
101 unsigned long old;
102 unsigned long mask = BITOP_MASK(nr);
103 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
104
105 __asm__ __volatile__(
106"1:" LARXL " %0,0,%3 # set_bit\n"
107 "xor %0,%0,%2\n"
108 PPC405_ERR77(0,%3)
109 STCXL " %0,0,%3\n"
110 "bne- 1b"
111 : "=&r"(old), "=m"(*p)
112 : "r"(mask), "r"(p), "m"(*p)
113 : "cc" );
114}
115
116static __inline__ int test_and_set_bit(unsigned long nr,
117 volatile unsigned long *addr)
118{
119 unsigned long old, t;
120 unsigned long mask = BITOP_MASK(nr);
121 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
122
123 __asm__ __volatile__(
124 EIEIO_ON_SMP
125"1:" LARXL " %0,0,%3 # test_and_set_bit\n"
126 "or %1,%0,%2 \n"
127 PPC405_ERR77(0,%3)
128 STCXL " %1,0,%3 \n"
129 "bne- 1b"
130 ISYNC_ON_SMP
131 : "=&r" (old), "=&r" (t)
132 : "r" (mask), "r" (p)
133 : "cc", "memory");
134
135 return (old & mask) != 0;
136}
137
138static __inline__ int test_and_clear_bit(unsigned long nr,
139 volatile unsigned long *addr)
140{
141 unsigned long old, t;
142 unsigned long mask = BITOP_MASK(nr);
143 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
144
145 __asm__ __volatile__(
146 EIEIO_ON_SMP
147"1:" LARXL " %0,0,%3 # test_and_clear_bit\n"
148 "andc %1,%0,%2 \n"
149 PPC405_ERR77(0,%3)
150 STCXL " %1,0,%3 \n"
151 "bne- 1b"
152 ISYNC_ON_SMP
153 : "=&r" (old), "=&r" (t)
154 : "r" (mask), "r" (p)
155 : "cc", "memory");
156
157 return (old & mask) != 0;
158}
159
160static __inline__ int test_and_change_bit(unsigned long nr,
161 volatile unsigned long *addr)
162{
163 unsigned long old, t;
164 unsigned long mask = BITOP_MASK(nr);
165 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
166
167 __asm__ __volatile__(
168 EIEIO_ON_SMP
169"1:" LARXL " %0,0,%3 # test_and_change_bit\n"
170 "xor %1,%0,%2 \n"
171 PPC405_ERR77(0,%3)
172 STCXL " %1,0,%3 \n"
173 "bne- 1b"
174 ISYNC_ON_SMP
175 : "=&r" (old), "=&r" (t)
176 : "r" (mask), "r" (p)
177 : "cc", "memory");
178
179 return (old & mask) != 0;
180}
181
182static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
183{
184 unsigned long old;
185
186 __asm__ __volatile__(
187"1:" LARXL " %0,0,%3 # set_bit\n"
188 "or %0,%0,%2\n"
189 STCXL " %0,0,%3\n"
190 "bne- 1b"
191 : "=&r" (old), "=m" (*addr)
192 : "r" (mask), "r" (addr), "m" (*addr)
193 : "cc");
194}
195
196/* Non-atomic versions */
197static __inline__ int test_bit(unsigned long nr,
198 __const__ volatile unsigned long *addr)
199{
200 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
201}
202
203static __inline__ void __set_bit(unsigned long nr,
204 volatile unsigned long *addr)
205{
206 unsigned long mask = BITOP_MASK(nr);
207 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
208
209 *p |= mask;
210}
211
212static __inline__ void __clear_bit(unsigned long nr,
213 volatile unsigned long *addr)
214{
215 unsigned long mask = BITOP_MASK(nr);
216 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
217
218 *p &= ~mask;
219}
220
221static __inline__ void __change_bit(unsigned long nr,
222 volatile unsigned long *addr)
223{
224 unsigned long mask = BITOP_MASK(nr);
225 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
226
227 *p ^= mask;
228}
229
230static __inline__ int __test_and_set_bit(unsigned long nr,
231 volatile unsigned long *addr)
232{
233 unsigned long mask = BITOP_MASK(nr);
234 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
235 unsigned long old = *p;
236
237 *p = old | mask;
238 return (old & mask) != 0;
239}
240
241static __inline__ int __test_and_clear_bit(unsigned long nr,
242 volatile unsigned long *addr)
243{
244 unsigned long mask = BITOP_MASK(nr);
245 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
246 unsigned long old = *p;
247
248 *p = old & ~mask;
249 return (old & mask) != 0;
250}
251
252static __inline__ int __test_and_change_bit(unsigned long nr,
253 volatile unsigned long *addr)
254{
255 unsigned long mask = BITOP_MASK(nr);
256 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
257 unsigned long old = *p;
258
259 *p = old ^ mask;
260 return (old & mask) != 0;
261}
262
263/*
264 * Return the zero-based bit position (LE, not IBM bit numbering) of
265 * the most significant 1-bit in a double word.
266 */
267static __inline__ int __ilog2(unsigned long x)
268{
269 int lz;
270
271 asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x));
272 return BITS_PER_LONG - 1 - lz;
273}
274
275/*
276 * Determines the bit position of the least significant 0 bit in the
277 * specified double word. The returned bit position will be
278 * zero-based, starting from the right side (63/31 - 0).
279 */
280static __inline__ unsigned long ffz(unsigned long x)
281{
282 /* no zero exists anywhere in the 8 byte area. */
283 if ((x = ~x) == 0)
284 return BITS_PER_LONG;
285
286 /*
287 * Calculate the bit position of the least signficant '1' bit in x
288 * (since x has been changed this will actually be the least signficant
289 * '0' bit in * the original x). Note: (x & -x) gives us a mask that
290 * is the least significant * (RIGHT-most) 1-bit of the value in x.
291 */
292 return __ilog2(x & -x);
293}
294
295static __inline__ int __ffs(unsigned long x)
296{
297 return __ilog2(x & -x);
298}
299
300/*
301 * ffs: find first bit set. This is defined the same way as
302 * the libc and compiler builtin ffs routines, therefore
303 * differs in spirit from the above ffz (man ffs).
304 */
305static __inline__ int ffs(int x)
306{
307 unsigned long i = (unsigned long)x;
308 return __ilog2(i & -i) + 1;
309}
310
311/*
312 * fls: find last (most-significant) bit set.
313 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
314 */
315static __inline__ int fls(unsigned int x)
316{
317 int lz;
318
319 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
320 return 32 - lz;
321}
322
323/*
324 * hweightN: returns the hamming weight (i.e. the number
325 * of bits set) of a N-bit word
326 */
327#define hweight64(x) generic_hweight64(x)
328#define hweight32(x) generic_hweight32(x)
329#define hweight16(x) generic_hweight16(x)
330#define hweight8(x) generic_hweight8(x)
331
332#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
333unsigned long find_next_zero_bit(const unsigned long *addr,
334 unsigned long size, unsigned long offset);
335/**
336 * find_first_bit - find the first set bit in a memory region
337 * @addr: The address to start the search at
338 * @size: The maximum size to search
339 *
340 * Returns the bit-number of the first set bit, not the number of the byte
341 * containing a bit.
342 */
343#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
344unsigned long find_next_bit(const unsigned long *addr,
345 unsigned long size, unsigned long offset);
346
347/* Little-endian versions */
348
349static __inline__ int test_le_bit(unsigned long nr,
350 __const__ unsigned long *addr)
351{
352 __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
353 return (tmp[nr >> 3] >> (nr & 7)) & 1;
354}
355
356#define __set_le_bit(nr, addr) \
357 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
358#define __clear_le_bit(nr, addr) \
359 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
360
361#define test_and_set_le_bit(nr, addr) \
362 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
363#define test_and_clear_le_bit(nr, addr) \
364 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
365
366#define __test_and_set_le_bit(nr, addr) \
367 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
368#define __test_and_clear_le_bit(nr, addr) \
369 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
370
371#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0)
372unsigned long find_next_zero_le_bit(const unsigned long *addr,
373 unsigned long size, unsigned long offset);
374
375/* Bitmap functions for the ext2 filesystem */
376
377#define ext2_set_bit(nr,addr) \
378 __test_and_set_le_bit((nr), (unsigned long*)addr)
379#define ext2_clear_bit(nr, addr) \
380 __test_and_clear_le_bit((nr), (unsigned long*)addr)
381
382#define ext2_set_bit_atomic(lock, nr, addr) \
383 test_and_set_le_bit((nr), (unsigned long*)addr)
384#define ext2_clear_bit_atomic(lock, nr, addr) \
385 test_and_clear_le_bit((nr), (unsigned long*)addr)
386
387#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
388
389#define ext2_find_first_zero_bit(addr, size) \
390 find_first_zero_le_bit((unsigned long*)addr, size)
391#define ext2_find_next_zero_bit(addr, size, off) \
392 find_next_zero_le_bit((unsigned long*)addr, size, off)
393
394/* Bitmap functions for the minix filesystem. */
395
396#define minix_test_and_set_bit(nr,addr) \
397 __test_and_set_le_bit(nr, (unsigned long *)addr)
398#define minix_set_bit(nr,addr) \
399 __set_le_bit(nr, (unsigned long *)addr)
400#define minix_test_and_clear_bit(nr,addr) \
401 __test_and_clear_le_bit(nr, (unsigned long *)addr)
402#define minix_test_bit(nr,addr) \
403 test_le_bit(nr, (unsigned long *)addr)
404
405#define minix_find_first_zero_bit(addr,size) \
406 find_first_zero_le_bit((unsigned long *)addr, size)
407
408/*
409 * Every architecture must define this function. It's the fastest
410 * way of searching a 140-bit bitmap where the first 100 bits are
411 * unlikely to be set. It's guaranteed that at least one of the 140
412 * bits is cleared.
413 */
414static inline int sched_find_first_bit(const unsigned long *b)
415{
416#ifdef CONFIG_PPC64
417 if (unlikely(b[0]))
418 return __ffs(b[0]);
419 if (unlikely(b[1]))
420 return __ffs(b[1]) + 64;
421 return __ffs(b[2]) + 128;
422#else
423 if (unlikely(b[0]))
424 return __ffs(b[0]);
425 if (unlikely(b[1]))
426 return __ffs(b[1]) + 32;
427 if (unlikely(b[2]))
428 return __ffs(b[2]) + 64;
429 if (b[3])
430 return __ffs(b[3]) + 96;
431 return __ffs(b[4]) + 128;
432#endif
433}
434
435#endif /* __KERNEL__ */
436
437#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index e4d028e87020..d625ee55f957 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -12,20 +12,16 @@
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
14#ifdef __powerpc64__ 14#ifdef __powerpc64__
15#define BUG_TABLE_ENTRY(label, line, file, func) \ 15#define BUG_TABLE_ENTRY ".llong"
16 ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n" 16#define BUG_TRAP_OP "tdnei"
17#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
18#define DATA_TYPE long long
19#else 17#else
20#define BUG_TABLE_ENTRY(label, line, file, func) \ 18#define BUG_TABLE_ENTRY ".long"
21 ".long " #label ", " #line ", " #file ", " #func "\n" 19#define BUG_TRAP_OP "twnei"
22#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
23#define DATA_TYPE int
24#endif /* __powerpc64__ */ 20#endif /* __powerpc64__ */
25 21
26struct bug_entry { 22struct bug_entry {
27 unsigned long bug_addr; 23 unsigned long bug_addr;
28 int line; 24 long line;
29 const char *file; 25 const char *file;
30 const char *function; 26 const char *function;
31}; 27};
@@ -43,29 +39,29 @@ struct bug_entry *find_bug(unsigned long bugaddr);
43#define BUG() do { \ 39#define BUG() do { \
44 __asm__ __volatile__( \ 40 __asm__ __volatile__( \
45 "1: twi 31,0,0\n" \ 41 "1: twi 31,0,0\n" \
46 ".section __bug_table,\"a\"\n\t" \ 42 ".section __bug_table,\"a\"\n" \
47 BUG_TABLE_ENTRY(1b,%0,%1,%2) \ 43 "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \
48 ".previous" \ 44 ".previous" \
49 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ 45 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
50} while (0) 46} while (0)
51 47
52#define BUG_ON(x) do { \ 48#define BUG_ON(x) do { \
53 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
54 TRAP_OP(%0,0) \ 50 "1: "BUG_TRAP_OP" %0,0\n" \
55 ".section __bug_table,\"a\"\n\t" \ 51 ".section __bug_table,\"a\"\n" \
56 BUG_TABLE_ENTRY(1b,%1,%2,%3) \ 52 "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
57 ".previous" \ 53 ".previous" \
58 : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \ 54 : : "r" ((long)(x)), "i" (__LINE__), \
59 "i" (__FILE__), "i" (__FUNCTION__)); \ 55 "i" (__FILE__), "i" (__FUNCTION__)); \
60} while (0) 56} while (0)
61 57
62#define WARN_ON(x) do { \ 58#define WARN_ON(x) do { \
63 __asm__ __volatile__( \ 59 __asm__ __volatile__( \
64 TRAP_OP(%0,0) \ 60 "1: "BUG_TRAP_OP" %0,0\n" \
65 ".section __bug_table,\"a\"\n\t" \ 61 ".section __bug_table,\"a\"\n" \
66 BUG_TABLE_ENTRY(1b,%1,%2,%3) \ 62 "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
67 ".previous" \ 63 ".previous" \
68 : : "r" ((DATA_TYPE)(x)), \ 64 : : "r" ((long)(x)), \
69 "i" (__LINE__ + BUG_WARNING_TRAP), \ 65 "i" (__LINE__ + BUG_WARNING_TRAP), \
70 "i" (__FILE__), "i" (__FUNCTION__)); \ 66 "i" (__FILE__), "i" (__FUNCTION__)); \
71} while (0) 67} while (0)
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index d22b10021b5d..d140577d0a05 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -178,18 +178,22 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
178static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, 178static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
179 struct pt_regs *regs) 179 struct pt_regs *regs)
180{ 180{
181 int i; 181 int i, nregs;
182 int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE);
183 182
184 if (gprs > ELF_NGREG) 183 memset((void *)elf_regs, 0, sizeof(elf_gregset_t));
185 gprs = ELF_NGREG;
186 184
187 for (i=0; i < gprs; i++) 185 /* Our registers are always unsigned longs, whether we're a 32 bit
188 elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i]; 186 * process or 64 bit, on either a 64 bit or 32 bit kernel.
189 187 * Don't use ELF_GREG_TYPE here. */
190 memset((char *)(elf_regs) + sizeof(struct pt_regs), 0, \ 188 nregs = sizeof(struct pt_regs) / sizeof(unsigned long);
191 sizeof(elf_gregset_t) - sizeof(struct pt_regs)); 189 if (nregs > ELF_NGREG)
190 nregs = ELF_NGREG;
192 191
192 for (i = 0; i < nregs; i++) {
193 /* This will correctly truncate 64 bit registers to 32 bits
194 * for a 32 bit process on a 64 bit kernel. */
195 elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
196 }
193} 197}
194#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); 198#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
195 199
diff --git a/include/asm-ppc64/futex.h b/include/asm-powerpc/futex.h
index 266b460de44e..37c94e52ab6d 100644
--- a/include/asm-ppc64/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_FUTEX_H 1#ifndef _ASM_POWERPC_FUTEX_H
2#define _ASM_FUTEX_H 2#define _ASM_POWERPC_FUTEX_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -7,28 +7,29 @@
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/synch.h> 8#include <asm/synch.h>
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10#include <asm/ppc_asm.h>
10 11
11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
12 __asm__ __volatile (SYNC_ON_SMP \ 13 __asm__ __volatile ( \
13"1: lwarx %0,0,%2\n" \ 14 SYNC_ON_SMP \
14 insn \ 15"1: lwarx %0,0,%2\n" \
15"2: stwcx. %1,0,%2\n\ 16 insn \
16 bne- 1b\n\ 17"2: stwcx. %1,0,%2\n" \
17 li %1,0\n\ 18 "bne- 1b\n" \
183: .section .fixup,\"ax\"\n\ 19 "li %1,0\n" \
194: li %1,%3\n\ 20"3: .section .fixup,\"ax\"\n" \
20 b 3b\n\ 21"4: li %1,%3\n" \
21 .previous\n\ 22 "b 3b\n" \
22 .section __ex_table,\"a\"\n\ 23 ".previous\n" \
23 .align 3\n\ 24 ".section __ex_table,\"a\"\n" \
24 .llong 1b,4b,2b,4b\n\ 25 ".align 3\n" \
25 .previous" \ 26 DATAL " 1b,4b,2b,4b\n" \
26 : "=&r" (oldval), "=&r" (ret) \ 27 ".previous" \
27 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ 28 : "=&r" (oldval), "=&r" (ret) \
29 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
28 : "cr0", "memory") 30 : "cr0", "memory")
29 31
30static inline int 32static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
31futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
32{ 33{
33 int op = (encoded_op >> 28) & 7; 34 int op = (encoded_op >> 28) & 7;
34 int cmp = (encoded_op >> 24) & 15; 35 int cmp = (encoded_op >> 24) & 15;
@@ -79,5 +80,5 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
79 return ret; 80 return ret;
80} 81}
81 82
82#endif 83#endif /* __KERNEL__ */
83#endif 84#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/include/asm-powerpc/ioctls.h b/include/asm-powerpc/ioctls.h
index 5b94ff489b8b..279a6229584b 100644
--- a/include/asm-powerpc/ioctls.h
+++ b/include/asm-powerpc/ioctls.h
@@ -62,6 +62,9 @@
62# define TIOCM_DSR 0x100 62# define TIOCM_DSR 0x100
63# define TIOCM_CD TIOCM_CAR 63# define TIOCM_CD TIOCM_CAR
64# define TIOCM_RI TIOCM_RNG 64# define TIOCM_RI TIOCM_RNG
65#define TIOCM_OUT1 0x2000
66#define TIOCM_OUT2 0x4000
67#define TIOCM_LOOP 0x8000
65 68
66#define TIOCGSOFTCAR 0x5419 69#define TIOCGSOFTCAR 0x5419
67#define TIOCSSOFTCAR 0x541A 70#define TIOCSSOFTCAR 0x541A
diff --git a/include/asm-powerpc/ipcbuf.h b/include/asm-powerpc/ipcbuf.h
new file mode 100644
index 000000000000..2c3e1d94db1d
--- /dev/null
+++ b/include/asm-powerpc/ipcbuf.h
@@ -0,0 +1,34 @@
1#ifndef _ASM_POWERPC_IPCBUF_H
2#define _ASM_POWERPC_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for the powerpc is identical to
6 * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
7 * kernel. Note extra padding because this structure is passed back
8 * and forth between kernel and user space. Pad space is left for:
9 * - 1 32-bit value to fill up for 8-byte alignment
10 * - 2 miscellaneous 64-bit values
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/types.h>
19
20struct ipc64_perm
21{
22 __kernel_key_t key;
23 __kernel_uid_t uid;
24 __kernel_gid_t gid;
25 __kernel_uid_t cuid;
26 __kernel_gid_t cgid;
27 __kernel_mode_t mode;
28 unsigned int seq;
29 unsigned int __pad1;
30 unsigned long long __unused1;
31 unsigned long long __unused2;
32};
33
34#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index c7c3f912a3c2..b3935ea28fff 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -73,7 +73,7 @@ extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
73#define IC_INVALID 0 73#define IC_INVALID 0
74#define IC_OPEN_PIC 1 74#define IC_OPEN_PIC 1
75#define IC_PPC_XIC 2 75#define IC_PPC_XIC 2
76#define IC_BPA_IIC 3 76#define IC_CELL_PIC 3
77#define IC_ISERIES 4 77#define IC_ISERIES 4
78 78
79extern u64 ppc64_interrupt_controller; 79extern u64 ppc64_interrupt_controller;
diff --git a/include/asm-ppc64/iSeries/HvCall.h b/include/asm-powerpc/iseries/hv_call.h
index c3f19475c0d9..e9f831c9a5e5 100644
--- a/include/asm-ppc64/iSeries/HvCall.h
+++ b/include/asm-powerpc/iseries/hv_call.h
@@ -20,11 +20,11 @@
20 * This file contains the "hypervisor call" interface which is used to 20 * This file contains the "hypervisor call" interface which is used to
21 * drive the hypervisor from the OS. 21 * drive the hypervisor from the OS.
22 */ 22 */
23#ifndef _HVCALL_H 23#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H
24#define _HVCALL_H 24#define _ASM_POWERPC_ISERIES_HV_CALL_H
25 25
26#include <asm/iSeries/HvCallSc.h> 26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iSeries/HvTypes.h> 27#include <asm/iseries/hv_types.h>
28#include <asm/paca.h> 28#include <asm/paca.h>
29 29
30/* Type of yield for HvCallBaseYieldProcessor */ 30/* Type of yield for HvCallBaseYieldProcessor */
@@ -110,4 +110,4 @@ static inline void HvCall_sendIPI(struct paca_struct *targetPaca)
110 HvCall1(HvCallBaseSendIPI, targetPaca->paca_index); 110 HvCall1(HvCallBaseSendIPI, targetPaca->paca_index);
111} 111}
112 112
113#endif /* _HVCALL_H */ 113#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */
diff --git a/include/asm-ppc64/iSeries/HvCallEvent.h b/include/asm-powerpc/iseries/hv_call_event.h
index 5d9a327d0122..46763a30590a 100644
--- a/include/asm-ppc64/iSeries/HvCallEvent.h
+++ b/include/asm-powerpc/iseries/hv_call_event.h
@@ -20,11 +20,11 @@
20 * This file contains the "hypervisor call" interface which is used to 20 * This file contains the "hypervisor call" interface which is used to
21 * drive the hypervisor from the OS. 21 * drive the hypervisor from the OS.
22 */ 22 */
23#ifndef _HVCALLEVENT_H 23#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
24#define _HVCALLEVENT_H 24#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
25 25
26#include <asm/iSeries/HvCallSc.h> 26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iSeries/HvTypes.h> 27#include <asm/iseries/hv_types.h>
28#include <asm/abs_addr.h> 28#include <asm/abs_addr.h>
29 29
30struct HvLpEvent; 30struct HvLpEvent;
@@ -250,4 +250,4 @@ static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
250 return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir); 250 return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir);
251} 251}
252 252
253#endif /* _HVCALLEVENT_H */ 253#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/include/asm-ppc64/iSeries/HvCallSc.h b/include/asm-powerpc/iseries/hv_call_sc.h
index a62cef3822f9..dec7e9d9ab78 100644
--- a/include/asm-ppc64/iSeries/HvCallSc.h
+++ b/include/asm-powerpc/iseries/hv_call_sc.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _HVCALLSC_H 19#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H
20#define _HVCALLSC_H 20#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23 23
@@ -48,4 +48,4 @@ extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64);
48extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64); 48extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64);
49extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64); 49extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64);
50 50
51#endif /* _HVCALLSC_H */ 51#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */
diff --git a/include/asm-ppc64/iSeries/HvCallXm.h b/include/asm-powerpc/iseries/hv_call_xm.h
index 8b9ba608daaf..ca9202cb01ed 100644
--- a/include/asm-ppc64/iSeries/HvCallXm.h
+++ b/include/asm-powerpc/iseries/hv_call_xm.h
@@ -2,11 +2,11 @@
2 * This file contains the "hypervisor call" interface which is used to 2 * This file contains the "hypervisor call" interface which is used to
3 * drive the hypervisor from SLIC. 3 * drive the hypervisor from SLIC.
4 */ 4 */
5#ifndef _HVCALLXM_H 5#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H
6#define _HVCALLXM_H 6#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H
7 7
8#include <asm/iSeries/HvCallSc.h> 8#include <asm/iseries/hv_call_sc.h>
9#include <asm/iSeries/HvTypes.h> 9#include <asm/iseries/hv_types.h>
10 10
11#define HvCallXmGetTceTableParms HvCallXm + 0 11#define HvCallXmGetTceTableParms HvCallXm + 0
12#define HvCallXmTestBus HvCallXm + 1 12#define HvCallXmTestBus HvCallXm + 1
@@ -75,4 +75,4 @@ static inline u64 HvCallXm_loadTod(void)
75 return HvCall0(HvCallXmLoadTod); 75 return HvCall0(HvCallXmLoadTod);
76} 76}
77 77
78#endif /* _HVCALLXM_H */ 78#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */
diff --git a/include/asm-ppc64/iSeries/HvLpConfig.h b/include/asm-powerpc/iseries/hv_lp_config.h
index f1cf1e70ca3c..bc00f036bca0 100644
--- a/include/asm-ppc64/iSeries/HvLpConfig.h
+++ b/include/asm-powerpc/iseries/hv_lp_config.h
@@ -16,17 +16,17 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _HVLPCONFIG_H 19#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
20#define _HVLPCONFIG_H 20#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
21 21
22/* 22/*
23 * This file contains the interface to the LPAR configuration data 23 * This file contains the interface to the LPAR configuration data
24 * to determine which resources should be allocated to each partition. 24 * to determine which resources should be allocated to each partition.
25 */ 25 */
26 26
27#include <asm/iSeries/HvCallSc.h> 27#include <asm/iseries/hv_call_sc.h>
28#include <asm/iSeries/HvTypes.h> 28#include <asm/iseries/hv_types.h>
29#include <asm/iSeries/ItLpNaca.h> 29#include <asm/iseries/it_lp_naca.h>
30 30
31enum { 31enum {
32 HvCallCfg_Cur = 0, 32 HvCallCfg_Cur = 0,
@@ -135,4 +135,4 @@ static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
135 return HvCall1(HvCallCfgGetHostingLpIndex, lp); 135 return HvCall1(HvCallCfgGetHostingLpIndex, lp);
136} 136}
137 137
138#endif /* _HVLPCONFIG_H */ 138#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */
diff --git a/include/asm-ppc64/iSeries/HvLpEvent.h b/include/asm-powerpc/iseries/hv_lp_event.h
index 865000de79b6..499ab1ad0185 100644
--- a/include/asm-ppc64/iSeries/HvLpEvent.h
+++ b/include/asm-powerpc/iseries/hv_lp_event.h
@@ -19,13 +19,13 @@
19 19
20/* This file contains the class for HV events in the system. */ 20/* This file contains the class for HV events in the system. */
21 21
22#ifndef _HVLPEVENT_H 22#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
23#define _HVLPEVENT_H 23#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
24 24
25#include <asm/types.h> 25#include <asm/types.h>
26#include <asm/ptrace.h> 26#include <asm/ptrace.h>
27#include <asm/iSeries/HvTypes.h> 27#include <asm/iseries/hv_types.h>
28#include <asm/iSeries/HvCallEvent.h> 28#include <asm/iseries/hv_call_event.h>
29 29
30/* 30/*
31 * HvLpEvent is the structure for Lp Event messages passed between 31 * HvLpEvent is the structure for Lp Event messages passed between
@@ -139,4 +139,4 @@ extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
139#define HvLpDma_Rc_InvalidAddress 4 139#define HvLpDma_Rc_InvalidAddress 4
140#define HvLpDma_Rc_InvalidLength 5 140#define HvLpDma_Rc_InvalidLength 5
141 141
142#endif /* _HVLPEVENT_H */ 142#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/include/asm-ppc64/iSeries/HvTypes.h b/include/asm-powerpc/iseries/hv_types.h
index b1ef2b4cb3e3..c38f7e3d01dc 100644
--- a/include/asm-ppc64/iSeries/HvTypes.h
+++ b/include/asm-powerpc/iseries/hv_types.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _HVTYPES_H 19#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H
20#define _HVTYPES_H 20#define _ASM_POWERPC_ISERIES_HV_TYPES_H
21 21
22/* 22/*
23 * General typedefs for the hypervisor. 23 * General typedefs for the hypervisor.
@@ -110,4 +110,4 @@ struct HvLpBufferList {
110 u64 len; 110 u64 len;
111}; 111};
112 112
113#endif /* _HVTYPES_H */ 113#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */
diff --git a/include/asm-ppc64/iSeries/iSeries_io.h b/include/asm-powerpc/iseries/iseries_io.h
index 9f79413342b3..56b2113ff0f5 100644
--- a/include/asm-ppc64/iSeries/iSeries_io.h
+++ b/include/asm-powerpc/iseries/iseries_io.h
@@ -1,5 +1,5 @@
1#ifndef _ISERIES_IO_H 1#ifndef _ASM_POWERPC_ISERIES_ISERIES_IO_H
2#define _ISERIES_IO_H 2#define _ASM_POWERPC_ISERIES_ISERIES_IO_H
3 3
4#include <linux/config.h> 4#include <linux/config.h>
5 5
@@ -46,4 +46,4 @@ extern void iSeries_memcpy_fromio(void *dest,
46 const volatile void __iomem *source, size_t n); 46 const volatile void __iomem *source, size_t n);
47 47
48#endif /* CONFIG_PPC_ISERIES */ 48#endif /* CONFIG_PPC_ISERIES */
49#endif /* _ISERIES_IO_H */ 49#endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */
diff --git a/include/asm-ppc64/iSeries/ItExtVpdPanel.h b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
index 4c546a8802b4..66a17a230c52 100644
--- a/include/asm-ppc64/iSeries/ItExtVpdPanel.h
+++ b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _ITEXTVPDPANEL_H 19#ifndef _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H
20#define _ITEXTVPDPANEL_H 20#define _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H
21 21
22/* 22/*
23 * This struct maps the panel information 23 * This struct maps the panel information
@@ -49,4 +49,4 @@ struct ItExtVpdPanel {
49 49
50extern struct ItExtVpdPanel xItExtVpdPanel; 50extern struct ItExtVpdPanel xItExtVpdPanel;
51 51
52#endif /* _ITEXTVPDPANEL_H */ 52#endif /* _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H */
diff --git a/include/asm-ppc64/iSeries/ItLpNaca.h b/include/asm-powerpc/iseries/it_lp_naca.h
index 225d0176779d..c3ef1de45d82 100644
--- a/include/asm-ppc64/iSeries/ItLpNaca.h
+++ b/include/asm-powerpc/iseries/it_lp_naca.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _ITLPNACA_H 19#ifndef _ASM_POWERPC_ISERIES_IT_LP_NACA_H
20#define _ITLPNACA_H 20#define _ASM_POWERPC_ISERIES_IT_LP_NACA_H
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23 23
@@ -77,4 +77,4 @@ struct ItLpNaca {
77 77
78extern struct ItLpNaca itLpNaca; 78extern struct ItLpNaca itLpNaca;
79 79
80#endif /* _ITLPNACA_H */ 80#endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */
diff --git a/include/asm-ppc64/iSeries/ItLpQueue.h b/include/asm-powerpc/iseries/it_lp_queue.h
index 69b26ad74135..a60d03afbf95 100644
--- a/include/asm-ppc64/iSeries/ItLpQueue.h
+++ b/include/asm-powerpc/iseries/it_lp_queue.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _ITLPQUEUE_H 19#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
20#define _ITLPQUEUE_H 20#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
21 21
22/* 22/*
23 * This control block defines the simple LP queue structure that is 23 * This control block defines the simple LP queue structure that is
@@ -78,4 +78,4 @@ extern int hvlpevent_is_pending(void);
78extern void process_hvlpevents(struct pt_regs *); 78extern void process_hvlpevents(struct pt_regs *);
79extern void setup_hvlpevent_queue(void); 79extern void setup_hvlpevent_queue(void);
80 80
81#endif /* _ITLPQUEUE_H */ 81#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */
diff --git a/include/asm-ppc64/iSeries/ItLpRegSave.h b/include/asm-powerpc/iseries/it_lp_reg_save.h
index 1b3087e76205..288044b702de 100644
--- a/include/asm-ppc64/iSeries/ItLpRegSave.h
+++ b/include/asm-powerpc/iseries/it_lp_reg_save.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _ITLPREGSAVE_H 19#ifndef _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H
20#define _ITLPREGSAVE_H 20#define _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H
21 21
22/* 22/*
23 * This control block contains the data that is shared between PLIC 23 * This control block contains the data that is shared between PLIC
diff --git a/include/asm-ppc64/iSeries/LparMap.h b/include/asm-powerpc/iseries/lpar_map.h
index a6840b186d03..84fc321615bf 100644
--- a/include/asm-ppc64/iSeries/LparMap.h
+++ b/include/asm-powerpc/iseries/lpar_map.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _LPARMAP_H 19#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H
20#define _LPARMAP_H 20#define _ASM_POWERPC_ISERIES_LPAR_MAP_H
21 21
22#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
23 23
@@ -80,4 +80,4 @@ extern const struct LparMap xLparMap;
80/* the fixed address where the LparMap exists */ 80/* the fixed address where the LparMap exists */
81#define LPARMAP_PHYS 0x7000 81#define LPARMAP_PHYS 0x7000
82 82
83#endif /* _LPARMAP_H */ 83#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */
diff --git a/include/asm-ppc64/iSeries/mf.h b/include/asm-powerpc/iseries/mf.h
index 7e6a0d936999..e7bd57a03fb1 100644
--- a/include/asm-ppc64/iSeries/mf.h
+++ b/include/asm-powerpc/iseries/mf.h
@@ -23,13 +23,13 @@
23 * along with this program; if not, write to the Free Software 23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 25 */
26#ifndef _ASM_PPC64_ISERIES_MF_H 26#ifndef _ASM_POWERPC_ISERIES_MF_H
27#define _ASM_PPC64_ISERIES_MF_H 27#define _ASM_POWERPC_ISERIES_MF_H
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30 30
31#include <asm/iSeries/HvTypes.h> 31#include <asm/iseries/hv_types.h>
32#include <asm/iSeries/HvCallEvent.h> 32#include <asm/iseries/hv_call_event.h>
33 33
34struct rtc_time; 34struct rtc_time;
35 35
@@ -54,4 +54,4 @@ extern int mf_get_rtc(struct rtc_time *tm);
54extern int mf_get_boot_rtc(struct rtc_time *tm); 54extern int mf_get_boot_rtc(struct rtc_time *tm);
55extern int mf_set_rtc(struct rtc_time *tm); 55extern int mf_set_rtc(struct rtc_time *tm);
56 56
57#endif /* _ASM_PPC64_ISERIES_MF_H */ 57#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/include/asm-ppc64/iSeries/vio.h b/include/asm-powerpc/iseries/vio.h
index 6c05e6257f53..7e3a469420dd 100644
--- a/include/asm-ppc64/iSeries/vio.h
+++ b/include/asm-powerpc/iseries/vio.h
@@ -38,11 +38,11 @@
38 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 38 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 * 39 *
40 */ 40 */
41#ifndef _ISERIES_VIO_H 41#ifndef _ASM_POWERPC_ISERIES_VIO_H
42#define _ISERIES_VIO_H 42#define _ASM_POWERPC_ISERIES_VIO_H
43 43
44#include <asm/iSeries/HvTypes.h> 44#include <asm/iseries/hv_types.h>
45#include <asm/iSeries/HvLpEvent.h> 45#include <asm/iseries/hv_lp_event.h>
46 46
47/* 47/*
48 * iSeries virtual I/O events use the subtype field in 48 * iSeries virtual I/O events use the subtype field in
@@ -127,4 +127,4 @@ struct device;
127 127
128extern struct device *iSeries_vio_dev; 128extern struct device *iSeries_vio_dev;
129 129
130#endif /* _ISERIES_VIO_H */ 130#endif /* _ASM_POWERPC_ISERIES_VIO_H */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
new file mode 100644
index 000000000000..062ab9ba68eb
--- /dev/null
+++ b/include/asm-powerpc/kexec.h
@@ -0,0 +1,49 @@
1#ifndef _ASM_POWERPC_KEXEC_H
2#define _ASM_POWERPC_KEXEC_H
3
4/*
5 * Maximum page that is mapped directly into kernel memory.
6 * XXX: Since we copy virt we can use any page we allocate
7 */
8#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
9
10/*
11 * Maximum address we can reach in physical address mode.
12 * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR.
13 */
14#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
15
16/* Maximum address we can use for the control code buffer */
17#ifdef __powerpc64__
18#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
19#else
20/* TASK_SIZE, probably left over from use_mm ?? */
21#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
22#endif
23
24#define KEXEC_CONTROL_CODE_SIZE 4096
25
26/* The native architecture */
27#ifdef __powerpc64__
28#define KEXEC_ARCH KEXEC_ARCH_PPC64
29#else
30#define KEXEC_ARCH KEXEC_ARCH_PPC
31#endif
32
33#ifndef __ASSEMBLY__
34
35#define MAX_NOTE_BYTES 1024
36typedef u32 note_buf_t[MAX_NOTE_BYTES / sizeof(u32)];
37
38extern note_buf_t crash_notes[];
39
40#ifdef __powerpc64__
41extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
42 master to copy new code to 0 */
43#else
44struct kimage;
45extern void machine_kexec_simple(struct kimage *image);
46#endif
47
48#endif /* ! __ASSEMBLY__ */
49#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 451b345cfc78..629ca964b974 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -80,6 +80,7 @@ struct machdep_calls {
80 void (*iommu_dev_setup)(struct pci_dev *dev); 80 void (*iommu_dev_setup)(struct pci_dev *dev);
81 void (*iommu_bus_setup)(struct pci_bus *bus); 81 void (*iommu_bus_setup)(struct pci_bus *bus);
82 void (*irq_bus_setup)(struct pci_bus *bus); 82 void (*irq_bus_setup)(struct pci_bus *bus);
83 int (*set_dabr)(unsigned long dabr);
83#endif 84#endif
84 85
85 int (*probe)(int platform); 86 int (*probe)(int platform);
diff --git a/include/asm-powerpc/numnodes.h b/include/asm-powerpc/numnodes.h
new file mode 100644
index 000000000000..795533aca095
--- /dev/null
+++ b/include/asm-powerpc/numnodes.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_POWERPC_MAX_NUMNODES_H
2#define _ASM_POWERPC_MAX_NUMNODES_H
3
4/* Max 16 Nodes */
5#define NODES_SHIFT 4
6
7#endif /* _ASM_POWERPC_MAX_NUMNODES_H */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index f99f2af82ca5..c534ca41224b 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -506,6 +506,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
506#else 506#else
507 #define __ASM_CONST(x) x##UL 507 #define __ASM_CONST(x) x##UL
508 #define ASM_CONST(x) __ASM_CONST(x) 508 #define ASM_CONST(x) __ASM_CONST(x)
509
510#ifdef CONFIG_PPC64
511#define DATAL ".llong"
512#else
513#define DATAL ".long"
514#endif
515
509#endif /* __ASSEMBLY__ */ 516#endif /* __ASSEMBLY__ */
510 517
511#endif /* _ASM_POWERPC_PPC_ASM_H */ 518#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index eee954a001fd..1dc4bf7b52b3 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -70,7 +70,7 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
70#define PLATFORM_LPAR 0x0001 70#define PLATFORM_LPAR 0x0001
71#define PLATFORM_POWERMAC 0x0400 71#define PLATFORM_POWERMAC 0x0400
72#define PLATFORM_MAPLE 0x0500 72#define PLATFORM_MAPLE 0x0500
73#define PLATFORM_BPA 0x1000 73#define PLATFORM_CELL 0x1000
74 74
75/* Compatibility with drivers coming from PPC32 world */ 75/* Compatibility with drivers coming from PPC32 world */
76#define _machine (systemcfg->platform) 76#define _machine (systemcfg->platform)
diff --git a/include/asm-ppc64/ptrace.h b/include/asm-powerpc/ptrace.h
index 3a55377f1fd3..1f7ecdb0b6ce 100644
--- a/include/asm-ppc64/ptrace.h
+++ b/include/asm-powerpc/ptrace.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_PTRACE_H 1#ifndef _ASM_POWERPC_PTRACE_H
2#define _PPC64_PTRACE_H 2#define _ASM_POWERPC_PTRACE_H
3 3
4/* 4/*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp 5 * Copyright (C) 2001 PPC64 Team, IBM Corp
@@ -16,7 +16,7 @@
16 * that the overall structure is a multiple of 16 bytes in length. 16 * that the overall structure is a multiple of 16 bytes in length.
17 * 17 *
18 * Note that the offsets of the fields in this struct correspond with 18 * Note that the offsets of the fields in this struct correspond with
19 * the PT_* values below. This simplifies arch/ppc64/kernel/ptrace.c. 19 * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
20 * 20 *
21 * This program is free software; you can redistribute it and/or 21 * This program is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU General Public License 22 * modify it under the terms of the GNU General Public License
@@ -30,70 +30,96 @@ struct pt_regs {
30 unsigned long gpr[32]; 30 unsigned long gpr[32];
31 unsigned long nip; 31 unsigned long nip;
32 unsigned long msr; 32 unsigned long msr;
33 unsigned long orig_gpr3; /* Used for restarting system calls */ 33 unsigned long orig_gpr3; /* Used for restarting system calls */
34 unsigned long ctr; 34 unsigned long ctr;
35 unsigned long link; 35 unsigned long link;
36 unsigned long xer; 36 unsigned long xer;
37 unsigned long ccr; 37 unsigned long ccr;
38 unsigned long softe; /* Soft enabled/disabled */ 38#ifdef __powerpc64__
39 unsigned long trap; /* Reason for being here */ 39 unsigned long softe; /* Soft enabled/disabled */
40 unsigned long dar; /* Fault registers */ 40#else
41 unsigned long dsisr; 41 unsigned long mq; /* 601 only (not used at present) */
42 unsigned long result; /* Result of a system call */ 42 /* Used on APUS to hold IPL value. */
43#endif
44 unsigned long trap; /* Reason for being here */
45 /* N.B. for critical exceptions on 4xx, the dar and dsisr
46 fields are overloaded to hold srr0 and srr1. */
47 unsigned long dar; /* Fault registers */
48 unsigned long dsisr; /* on 4xx/Book-E used for ESR */
49 unsigned long result; /* Result of a system call */
43}; 50};
44 51
45struct pt_regs32 { 52#endif /* __ASSEMBLY__ */
46 unsigned int gpr[32];
47 unsigned int nip;
48 unsigned int msr;
49 unsigned int orig_gpr3; /* Used for restarting system calls */
50 unsigned int ctr;
51 unsigned int link;
52 unsigned int xer;
53 unsigned int ccr;
54 unsigned int mq; /* 601 only (not used at present) */
55 unsigned int trap; /* Reason for being here */
56 unsigned int dar; /* Fault registers */
57 unsigned int dsisr;
58 unsigned int result; /* Result of a system call */
59};
60 53
61#ifdef __KERNEL__ 54#ifdef __KERNEL__
62 55
63#define instruction_pointer(regs) ((regs)->nip) 56#ifdef __powerpc64__
57
58#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
59
60/* Size of dummy stack frame allocated when calling signal handler. */
61#define __SIGNAL_FRAMESIZE 128
62#define __SIGNAL_FRAMESIZE32 64
63
64#else /* __powerpc64__ */
65
66#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
67
68/* Size of stack frame allocated when calling signal handler. */
69#define __SIGNAL_FRAMESIZE 64
70
71#endif /* __powerpc64__ */
64 72
73#ifndef __ASSEMBLY__
74
75#define instruction_pointer(regs) ((regs)->nip)
65#ifdef CONFIG_SMP 76#ifdef CONFIG_SMP
66extern unsigned long profile_pc(struct pt_regs *regs); 77extern unsigned long profile_pc(struct pt_regs *regs);
67#else 78#else
68#define profile_pc(regs) instruction_pointer(regs) 79#define profile_pc(regs) instruction_pointer(regs)
69#endif 80#endif
70 81
82#ifdef __powerpc64__
71#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) 83#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
84#else
85#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
86#endif
72 87
73#define force_successful_syscall_return() \ 88#define force_successful_syscall_return() \
74 (current_thread_info()->syscall_noerror = 1) 89 do { \
90 current_thread_info()->syscall_noerror = 1; \
91 } while(0)
75 92
76/* 93/*
77 * We use the least-significant bit of the trap field to indicate 94 * We use the least-significant bit of the trap field to indicate
78 * whether we have saved the full set of registers, or only a 95 * whether we have saved the full set of registers, or only a
79 * partial set. A 1 there means the partial set. 96 * partial set. A 1 there means the partial set.
97 * On 4xx we use the next bit to indicate whether the exception
98 * is a critical exception (1 means it is).
80 */ 99 */
81#define FULL_REGS(regs) (((regs)->trap & 1) == 0) 100#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
101#ifndef __powerpc64__
102#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0)
103#endif /* ! __powerpc64__ */
82#define TRAP(regs) ((regs)->trap & ~0xF) 104#define TRAP(regs) ((regs)->trap & ~0xF)
105#ifdef __powerpc64__
83#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) 106#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
84 107#else
85#endif /* __KERNEL__ */ 108#define CHECK_FULL_REGS(regs) \
109do { \
110 if ((regs)->trap & 1) \
111 printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
112} while (0)
113#endif /* __powerpc64__ */
86 114
87#endif /* __ASSEMBLY__ */ 115#endif /* __ASSEMBLY__ */
88 116
89#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ 117#endif /* __KERNEL__ */
90
91/* Size of dummy stack frame allocated when calling signal handler. */
92#define __SIGNAL_FRAMESIZE 128
93#define __SIGNAL_FRAMESIZE32 64
94 118
95/* 119/*
96 * Offsets used by 'ptrace' system call interface. 120 * Offsets used by 'ptrace' system call interface.
121 * These can't be changed without breaking binary compatibility
122 * with MkLinux, etc.
97 */ 123 */
98#define PT_R0 0 124#define PT_R0 0
99#define PT_R1 1 125#define PT_R1 1
@@ -137,18 +163,25 @@ extern unsigned long profile_pc(struct pt_regs *regs);
137#define PT_LNK 36 163#define PT_LNK 36
138#define PT_XER 37 164#define PT_XER 37
139#define PT_CCR 38 165#define PT_CCR 38
166#ifndef __powerpc64__
167#define PT_MQ 39
168#else
140#define PT_SOFTE 39 169#define PT_SOFTE 39
141#define PT_TRAP 40 170#define PT_TRAP 40
142#define PT_DAR 41 171#define PT_DAR 41
143#define PT_DSISR 42 172#define PT_DSISR 42
144#define PT_RESULT 43 173#define PT_RESULT 43
174#endif
145 175
146#define PT_FPR0 48 176#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
177
178#ifndef __powerpc64__
179
180#define PT_FPR31 (PT_FPR0 + 2*31)
181#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
182
183#else /* __powerpc64__ */
147 184
148/*
149 * Kernel and userspace will both use this PT_FPSCR value. 32-bit apps will
150 * have visibility to the asm-ppc/ptrace.h header instead of this one.
151 */
152#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */ 185#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */
153 186
154#ifdef __KERNEL__ 187#ifdef __KERNEL__
@@ -165,29 +198,29 @@ extern unsigned long profile_pc(struct pt_regs *regs);
165#define PT_VRSAVE_32 (PT_VR0 + 33*4) 198#define PT_VRSAVE_32 (PT_VR0 + 33*4)
166#endif 199#endif
167 200
201#endif /* __powerpc64__ */
202
168/* 203/*
169 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. 204 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
170 * The transfer totals 34 quadword. Quadwords 0-31 contain the 205 * The transfer totals 34 quadword. Quadwords 0-31 contain the
171 * corresponding vector registers. Quadword 32 contains the vscr as the 206 * corresponding vector registers. Quadword 32 contains the vscr as the
172 * last word (offset 12) within that quadword. Quadword 33 contains the 207 * last word (offset 12) within that quadword. Quadword 33 contains the
173 * vrsave as the first word (offset 0) within the quadword. 208 * vrsave as the first word (offset 0) within the quadword.
174 * 209 *
175 * This definition of the VMX state is compatible with the current PPC32 210 * This definition of the VMX state is compatible with the current PPC32
176 * ptrace interface. This allows signal handling and ptrace to use the same 211 * ptrace interface. This allows signal handling and ptrace to use the same
177 * structures. This also simplifies the implementation of a bi-arch 212 * structures. This also simplifies the implementation of a bi-arch
178 * (combined (32- and 64-bit) gdb. 213 * (combined (32- and 64-bit) gdb.
179 */ 214 */
180#define PTRACE_GETVRREGS 18 215#define PTRACE_GETVRREGS 18
181#define PTRACE_SETVRREGS 19 216#define PTRACE_SETVRREGS 19
182 217
183/* 218#ifndef __powerpc64__
184 * While we dont have 64bit book E processors, we need to reserve the 219/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
185 * relevant ptrace calls for 32bit compatibility. 220 * spefscr, in one go */
186 */ 221#define PTRACE_GETEVRREGS 20
187#if 0 222#define PTRACE_SETEVRREGS 21
188#define PTRACE_GETEVRREGS 20 223#endif /* __powerpc64__ */
189#define PTRACE_SETEVRREGS 21
190#endif
191 224
192/* 225/*
193 * Get or set a debug register. The first 16 are DABR registers and the 226 * Get or set a debug register. The first 16 are DABR registers and the
@@ -196,6 +229,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
196#define PTRACE_GET_DEBUGREG 25 229#define PTRACE_GET_DEBUGREG 25
197#define PTRACE_SET_DEBUGREG 26 230#define PTRACE_SET_DEBUGREG 26
198 231
232#ifdef __powerpc64__
199/* Additional PTRACE requests implemented on PowerPC. */ 233/* Additional PTRACE requests implemented on PowerPC. */
200#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */ 234#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
201#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */ 235#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
@@ -209,5 +243,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
209#define PPC_PTRACE_POKEDATA_3264 0x92 243#define PPC_PTRACE_POKEDATA_3264 0x92
210#define PPC_PTRACE_PEEKUSR_3264 0x91 244#define PPC_PTRACE_PEEKUSR_3264 0x91
211#define PPC_PTRACE_POKEUSR_3264 0x90 245#define PPC_PTRACE_POKEUSR_3264 0x90
246#endif /* __powerpc64__ */
212 247
213#endif /* _PPC64_PTRACE_H */ 248#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index 2c050332471d..d1bb611ea626 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -149,28 +149,11 @@ struct rtas_error_log {
149 unsigned char buffer[1]; 149 unsigned char buffer[1];
150}; 150};
151 151
152struct flash_block { 152/*
153 char *data; 153 * This can be set by the rtas_flash module so that it can get called
154 unsigned long length; 154 * as the absolutely last thing before the kernel terminates.
155};
156
157/* This struct is very similar but not identical to
158 * that needed by the rtas flash update.
159 * All we need to do for rtas is rewrite num_blocks
160 * into a version/length and translate the pointers
161 * to absolute.
162 */ 155 */
163#define FLASH_BLOCKS_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct flash_block)) 156extern void (*rtas_flash_term_hook)(int);
164struct flash_block_list {
165 unsigned long num_blocks;
166 struct flash_block_list *next;
167 struct flash_block blocks[FLASH_BLOCKS_PER_NODE];
168};
169struct flash_block_list_header { /* just the header of flash_block_list */
170 unsigned long num_blocks;
171 struct flash_block_list *next;
172};
173extern struct flash_block_list_header rtas_firmware_flash_list;
174 157
175extern struct rtas_t rtas; 158extern struct rtas_t rtas;
176 159
diff --git a/include/asm-ppc64/sigcontext.h b/include/asm-powerpc/sigcontext.h
index 6f8aee768c5e..165d630e1cf3 100644
--- a/include/asm-ppc64/sigcontext.h
+++ b/include/asm-powerpc/sigcontext.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_PPC64_SIGCONTEXT_H 1#ifndef _ASM_POWERPC_SIGCONTEXT_H
2#define _ASM_PPC64_SIGCONTEXT_H 2#define _ASM_POWERPC_SIGCONTEXT_H
3 3
4/* 4/*
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -9,39 +9,44 @@
9 */ 9 */
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <asm/ptrace.h> 11#include <asm/ptrace.h>
12#ifdef __powerpc64__
12#include <asm/elf.h> 13#include <asm/elf.h>
13 14#endif
14 15
15struct sigcontext { 16struct sigcontext {
16 unsigned long _unused[4]; 17 unsigned long _unused[4];
17 int signal; 18 int signal;
19#ifdef __powerpc64__
18 int _pad0; 20 int _pad0;
21#endif
19 unsigned long handler; 22 unsigned long handler;
20 unsigned long oldmask; 23 unsigned long oldmask;
21 struct pt_regs __user *regs; 24 struct pt_regs __user *regs;
25#ifdef __powerpc64__
22 elf_gregset_t gp_regs; 26 elf_gregset_t gp_regs;
23 elf_fpregset_t fp_regs; 27 elf_fpregset_t fp_regs;
24/* 28/*
25 * To maintain compatibility with current implementations the sigcontext is 29 * To maintain compatibility with current implementations the sigcontext is
26 * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t) 30 * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
27 * followed by an unstructured (vmx_reserve) field of 69 doublewords. This 31 * followed by an unstructured (vmx_reserve) field of 69 doublewords. This
28 * allows the array of vector registers to be quadword aligned independent of 32 * allows the array of vector registers to be quadword aligned independent of
29 * the alignment of the containing sigcontext or ucontext. It is the 33 * the alignment of the containing sigcontext or ucontext. It is the
30 * responsibility of the code setting the sigcontext to set this pointer to 34 * responsibility of the code setting the sigcontext to set this pointer to
31 * either NULL (if this processor does not support the VMX feature) or the 35 * either NULL (if this processor does not support the VMX feature) or the
32 * address of the first quadword within the allocated (vmx_reserve) area. 36 * address of the first quadword within the allocated (vmx_reserve) area.
33 * 37 *
34 * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with 38 * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with
35 * an array of 34 quadword entries (elf_vrregset_t). The entries with 39 * an array of 34 quadword entries (elf_vrregset_t). The entries with
36 * indexes 0-31 contain the corresponding vector registers. The entry with 40 * indexes 0-31 contain the corresponding vector registers. The entry with
37 * index 32 contains the vscr as the last word (offset 12) within the 41 * index 32 contains the vscr as the last word (offset 12) within the
38 * quadword. This allows the vscr to be stored as either a quadword (since 42 * quadword. This allows the vscr to be stored as either a quadword (since
39 * it must be copied via a vector register to/from storage) or as a word. 43 * it must be copied via a vector register to/from storage) or as a word.
40 * The entry with index 33 contains the vrsave as the first word (offset 0) 44 * The entry with index 33 contains the vrsave as the first word (offset 0)
41 * within the quadword. 45 * within the quadword.
42 */ 46 */
43 elf_vrreg_t __user *v_regs; 47 elf_vrreg_t __user *v_regs;
44 long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1]; 48 long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1];
49#endif
45}; 50};
46 51
47#endif /* _ASM_PPC64_SIGCONTEXT_H */ 52#endif /* _ASM_POWERPC_SIGCONTEXT_H */
diff --git a/include/asm-ppc64/smp.h b/include/asm-powerpc/smp.h
index c5e9052e7967..8bcdd0faefea 100644
--- a/include/asm-ppc64/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * smp.h: PPC64 specific SMP code. 2 * smp.h: PowerPC-specific SMP code.
3 * 3 *
4 * Original was a copy of sparc smp.h. Now heavily modified 4 * Original was a copy of sparc smp.h. Now heavily modified
5 * for PPC. 5 * for PPC.
@@ -13,9 +13,9 @@
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 */ 14 */
15 15
16#ifndef _ASM_POWERPC_SMP_H
17#define _ASM_POWERPC_SMP_H
16#ifdef __KERNEL__ 18#ifdef __KERNEL__
17#ifndef _PPC64_SMP_H
18#define _PPC64_SMP_H
19 19
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/threads.h> 21#include <linux/threads.h>
@@ -24,7 +24,9 @@
24 24
25#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
26 26
27#ifdef CONFIG_PPC64
27#include <asm/paca.h> 28#include <asm/paca.h>
29#endif
28 30
29extern int boot_cpuid; 31extern int boot_cpuid;
30extern int boot_cpuid_phys; 32extern int boot_cpuid_phys;
@@ -45,8 +47,19 @@ void generic_cpu_die(unsigned int cpu);
45void generic_mach_cpu_die(void); 47void generic_mach_cpu_die(void);
46#endif 48#endif
47 49
50#ifdef CONFIG_PPC64
48#define raw_smp_processor_id() (get_paca()->paca_index) 51#define raw_smp_processor_id() (get_paca()->paca_index)
49#define hard_smp_processor_id() (get_paca()->hw_cpu_id) 52#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
53#else
54/* 32-bit */
55extern int smp_hw_index[];
56
57#define raw_smp_processor_id() (current_thread_info()->cpu)
58#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
59#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
60#define set_hard_smp_processor_id(cpu, phys)\
61 (smp_hw_index[(cpu)] = (phys))
62#endif
50 63
51extern cpumask_t cpu_sibling_map[NR_CPUS]; 64extern cpumask_t cpu_sibling_map[NR_CPUS];
52 65
@@ -64,14 +77,30 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
64 77
65void smp_init_iSeries(void); 78void smp_init_iSeries(void);
66void smp_init_pSeries(void); 79void smp_init_pSeries(void);
80void smp_init_cell(void);
81void smp_setup_cpu_maps(void);
67 82
68extern int __cpu_disable(void); 83extern int __cpu_disable(void);
69extern void __cpu_die(unsigned int cpu); 84extern void __cpu_die(unsigned int cpu);
85
86#else
87/* for UP */
88#define smp_setup_cpu_maps()
89#define smp_release_cpus()
90
70#endif /* CONFIG_SMP */ 91#endif /* CONFIG_SMP */
71 92
93#ifdef CONFIG_PPC64
72#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id) 94#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id)
73#define set_hard_smp_processor_id(CPU, VAL) \ 95#define set_hard_smp_processor_id(CPU, VAL) \
74 do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0) 96 do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0)
97#else
98/* 32-bit */
99#ifndef CONFIG_SMP
100#define get_hard_smp_processor_id(cpu) boot_cpuid_phys
101#define set_hard_smp_processor_id(cpu, phys)
102#endif
103#endif
75 104
76extern int smt_enabled_at_boot; 105extern int smt_enabled_at_boot;
77 106
@@ -84,15 +113,7 @@ extern void smp_generic_take_timebase(void);
84 113
85extern struct smp_ops_t *smp_ops; 114extern struct smp_ops_t *smp_ops;
86 115
87#ifdef CONFIG_PPC_PSERIES
88void vpa_init(int cpu);
89#else
90static inline void vpa_init(int cpu)
91{
92}
93#endif /* CONFIG_PPC_PSERIES */
94
95#endif /* __ASSEMBLY__ */ 116#endif /* __ASSEMBLY__ */
96 117
97#endif /* !(_PPC64_SMP_H) */
98#endif /* __KERNEL__ */ 118#endif /* __KERNEL__ */
119#endif /* _ASM_POWERPC_SMP_H) */
diff --git a/include/asm-ppc64/sparsemem.h b/include/asm-powerpc/sparsemem.h
index c5bd47e57f17..1c95ab99deb3 100644
--- a/include/asm-ppc64/sparsemem.h
+++ b/include/asm-powerpc/sparsemem.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_PPC64_SPARSEMEM_H 1#ifndef _ASM_POWERPC_SPARSEMEM_H
2#define _ASM_PPC64_SPARSEMEM_H 1 2#define _ASM_POWERPC_SPARSEMEM_H 1
3 3
4#ifdef CONFIG_SPARSEMEM 4#ifdef CONFIG_SPARSEMEM
5/* 5/*
@@ -13,4 +13,4 @@
13 13
14#endif /* CONFIG_SPARSEMEM */ 14#endif /* CONFIG_SPARSEMEM */
15 15
16#endif /* _ASM_PPC64_SPARSEMEM_H */ 16#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/stat.h b/include/asm-powerpc/stat.h
new file mode 100644
index 000000000000..e4edc510b530
--- /dev/null
+++ b/include/asm-powerpc/stat.h
@@ -0,0 +1,81 @@
1#ifndef _ASM_POWERPC_STAT_H
2#define _ASM_POWERPC_STAT_H
3/*
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/types.h>
10
11#define STAT_HAVE_NSEC 1
12
13#ifndef __powerpc64__
14struct __old_kernel_stat {
15 unsigned short st_dev;
16 unsigned short st_ino;
17 unsigned short st_mode;
18 unsigned short st_nlink;
19 unsigned short st_uid;
20 unsigned short st_gid;
21 unsigned short st_rdev;
22 unsigned long st_size;
23 unsigned long st_atime;
24 unsigned long st_mtime;
25 unsigned long st_ctime;
26};
27#endif /* !__powerpc64__ */
28
29struct stat {
30 unsigned long st_dev;
31 ino_t st_ino;
32#ifdef __powerpc64__
33 nlink_t st_nlink;
34 mode_t st_mode;
35#else
36 mode_t st_mode;
37 nlink_t st_nlink;
38#endif
39 uid_t st_uid;
40 gid_t st_gid;
41 unsigned long st_rdev;
42 off_t st_size;
43 unsigned long st_blksize;
44 unsigned long st_blocks;
45 unsigned long st_atime;
46 unsigned long st_atime_nsec;
47 unsigned long st_mtime;
48 unsigned long st_mtime_nsec;
49 unsigned long st_ctime;
50 unsigned long st_ctime_nsec;
51 unsigned long __unused4;
52 unsigned long __unused5;
53#ifdef __powerpc64__
54 unsigned long __unused6;
55#endif
56};
57
58/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
59struct stat64 {
60 unsigned long long st_dev; /* Device. */
61 unsigned long long st_ino; /* File serial number. */
62 unsigned int st_mode; /* File mode. */
63 unsigned int st_nlink; /* Link count. */
64 unsigned int st_uid; /* User ID of the file's owner. */
65 unsigned int st_gid; /* Group ID of the file's group. */
66 unsigned long long st_rdev; /* Device number, if device. */
67 unsigned short __pad2;
68 long long st_size; /* Size of file, in bytes. */
69 int st_blksize; /* Optimal block size for I/O. */
70 long long st_blocks; /* Number 512-byte blocks allocated. */
71 int st_atime; /* Time of last access. */
72 unsigned int st_atime_nsec;
73 int st_mtime; /* Time of last modification. */
74 unsigned int st_mtime_nsec;
75 int st_ctime; /* Time of last status change. */
76 unsigned int st_ctime_nsec;
77 unsigned int __unused4;
78 unsigned int __unused5;
79};
80
81#endif /* _ASM_POWERPC_STAT_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 5b2ecbc47907..b5da0b851e02 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -359,5 +359,53 @@ extern void reloc_got2(unsigned long);
359 359
360#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) 360#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
361 361
362static inline void create_instruction(unsigned long addr, unsigned int instr)
363{
364 unsigned int *p;
365 p = (unsigned int *)addr;
366 *p = instr;
367 asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
368}
369
370/* Flags for create_branch:
371 * "b" == create_branch(addr, target, 0);
372 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
373 * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
374 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
375 */
376#define BRANCH_SET_LINK 0x1
377#define BRANCH_ABSOLUTE 0x2
378
379static inline void create_branch(unsigned long addr,
380 unsigned long target, int flags)
381{
382 unsigned int instruction;
383
384 if (! (flags & BRANCH_ABSOLUTE))
385 target = target - addr;
386
387 /* Mask out the flags and target, so they don't step on each other. */
388 instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
389
390 create_instruction(addr, instruction);
391}
392
393static inline void create_function_call(unsigned long addr, void * func)
394{
395 unsigned long func_addr;
396
397#ifdef CONFIG_PPC64
398 /*
399 * On PPC64 the function pointer actually points to the function's
400 * descriptor. The first entry in the descriptor is the address
401 * of the function text.
402 */
403 func_addr = *(unsigned long *)func;
404#else
405 func_addr = (unsigned long)func;
406#endif
407 create_branch(addr, func_addr, BRANCH_SET_LINK);
408}
409
362#endif /* __KERNEL__ */ 410#endif /* __KERNEL__ */
363#endif /* _ASM_POWERPC_SYSTEM_H */ 411#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/termios.h b/include/asm-powerpc/termios.h
index c5b8e5358f83..7f80a019b6a0 100644
--- a/include/asm-powerpc/termios.h
+++ b/include/asm-powerpc/termios.h
@@ -94,142 +94,9 @@ struct termio {
94#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" 94#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
95#endif 95#endif
96 96
97#define FIOCLEX _IO('f', 1)
98#define FIONCLEX _IO('f', 2)
99#define FIOASYNC _IOW('f', 125, int)
100#define FIONBIO _IOW('f', 126, int)
101#define FIONREAD _IOR('f', 127, int)
102#define TIOCINQ FIONREAD
103
104#define TIOCGETP _IOR('t', 8, struct sgttyb)
105#define TIOCSETP _IOW('t', 9, struct sgttyb)
106#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
107
108#define TIOCSETC _IOW('t', 17, struct tchars)
109#define TIOCGETC _IOR('t', 18, struct tchars)
110#define TCGETS _IOR('t', 19, struct termios)
111#define TCSETS _IOW('t', 20, struct termios)
112#define TCSETSW _IOW('t', 21, struct termios)
113#define TCSETSF _IOW('t', 22, struct termios)
114
115#define TCGETA _IOR('t', 23, struct termio)
116#define TCSETA _IOW('t', 24, struct termio)
117#define TCSETAW _IOW('t', 25, struct termio)
118#define TCSETAF _IOW('t', 28, struct termio)
119
120#define TCSBRK _IO('t', 29)
121#define TCXONC _IO('t', 30)
122#define TCFLSH _IO('t', 31)
123
124#define TIOCSWINSZ _IOW('t', 103, struct winsize)
125#define TIOCGWINSZ _IOR('t', 104, struct winsize)
126#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
127#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
128#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
129
130#define TIOCGLTC _IOR('t', 116, struct ltchars)
131#define TIOCSLTC _IOW('t', 117, struct ltchars)
132#define TIOCSPGRP _IOW('t', 118, int)
133#define TIOCGPGRP _IOR('t', 119, int)
134
135#define TIOCEXCL 0x540C
136#define TIOCNXCL 0x540D
137#define TIOCSCTTY 0x540E
138
139#define TIOCSTI 0x5412
140#define TIOCMGET 0x5415
141#define TIOCMBIS 0x5416
142#define TIOCMBIC 0x5417
143#define TIOCMSET 0x5418
144#define TIOCGSOFTCAR 0x5419
145#define TIOCSSOFTCAR 0x541A
146#define TIOCLINUX 0x541C
147#define TIOCCONS 0x541D
148#define TIOCGSERIAL 0x541E
149#define TIOCSSERIAL 0x541F
150#define TIOCPKT 0x5420
151
152#define TIOCNOTTY 0x5422
153#define TIOCSETD 0x5423
154#define TIOCGETD 0x5424
155#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
156
157#define TIOCSERCONFIG 0x5453
158#define TIOCSERGWILD 0x5454
159#define TIOCSERSWILD 0x5455
160#define TIOCGLCKTRMIOS 0x5456
161#define TIOCSLCKTRMIOS 0x5457
162#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
163#define TIOCSERGETLSR 0x5459 /* Get line status register */
164#define TIOCSERGETMULTI 0x545A /* Get multiport config */
165#define TIOCSERSETMULTI 0x545B /* Set multiport config */
166
167#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
168#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
169
170/* Used for packet mode */
171#define TIOCPKT_DATA 0
172#define TIOCPKT_FLUSHREAD 1
173#define TIOCPKT_FLUSHWRITE 2
174#define TIOCPKT_STOP 4
175#define TIOCPKT_START 8
176#define TIOCPKT_NOSTOP 16
177#define TIOCPKT_DOSTOP 32
178
179/* modem lines */
180#define TIOCM_LE 0x001
181#define TIOCM_DTR 0x002
182#define TIOCM_RTS 0x004
183#define TIOCM_ST 0x008
184#define TIOCM_SR 0x010
185#define TIOCM_CTS 0x020
186#define TIOCM_CAR 0x040
187#define TIOCM_RNG 0x080
188#define TIOCM_DSR 0x100
189#define TIOCM_CD TIOCM_CAR
190#define TIOCM_RI TIOCM_RNG
191#define TIOCM_OUT1 0x2000
192#define TIOCM_OUT2 0x4000
193#define TIOCM_LOOP 0x8000
194
195/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
196#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
197
198#ifdef __KERNEL__ 97#ifdef __KERNEL__
199 98
200/* 99#include <asm-generic/termios.h>
201 * Translate a "termio" structure into a "termios". Ugh.
202 */
203#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
204 unsigned short __tmp; \
205 get_user(__tmp,&(termio)->x); \
206 (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \
207}
208
209#define user_termio_to_kernel_termios(termios, termio) \
210({ \
211 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
212 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
213 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
214 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
215 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
216})
217
218/*
219 * Translate a "termios" structure into a "termio". Ugh.
220 */
221#define kernel_termios_to_user_termio(termio, termios) \
222({ \
223 put_user((termios)->c_iflag, &(termio)->c_iflag); \
224 put_user((termios)->c_oflag, &(termio)->c_oflag); \
225 put_user((termios)->c_cflag, &(termio)->c_cflag); \
226 put_user((termios)->c_lflag, &(termio)->c_lflag); \
227 put_user((termios)->c_line, &(termio)->c_line); \
228 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
229})
230
231#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
232#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
233 100
234#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
235 102
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index 410e795f7d43..d9b86a17271b 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -21,7 +21,7 @@
21#include <asm/processor.h> 21#include <asm/processor.h>
22#ifdef CONFIG_PPC64 22#ifdef CONFIG_PPC64
23#include <asm/paca.h> 23#include <asm/paca.h>
24#include <asm/iSeries/HvCall.h> 24#include <asm/iseries/hv_call.h>
25#endif 25#endif
26 26
27/* time.c */ 27/* time.c */
diff --git a/include/asm-ppc/tlb.h b/include/asm-powerpc/tlb.h
index 2c142c5d8584..56659f121779 100644
--- a/include/asm-ppc/tlb.h
+++ b/include/asm-powerpc/tlb.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * TLB shootdown specifics for PPC 2 * TLB shootdown specifics for powerpc
3 * 3 *
4 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
4 * Copyright (C) 2002 Paul Mackerras, IBM Corp. 5 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -8,29 +9,53 @@
8 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
10 */ 11 */
11#ifndef _PPC_TLB_H 12#ifndef _ASM_POWERPC_TLB_H
12#define _PPC_TLB_H 13#define _ASM_POWERPC_TLB_H
13 14
14#include <linux/config.h> 15#include <linux/config.h>
16#ifndef __powerpc64__
15#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#endif
16#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
17#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
21#ifndef __powerpc64__
18#include <asm/page.h> 22#include <asm/page.h>
19#include <asm/mmu.h> 23#include <asm/mmu.h>
20 24#endif
21#ifdef CONFIG_PPC_STD_MMU
22/* Classic PPC with hash-table based MMU... */
23 25
24struct mmu_gather; 26struct mmu_gather;
27
28#define tlb_start_vma(tlb, vma) do { } while (0)
29#define tlb_end_vma(tlb, vma) do { } while (0)
30
31#if !defined(CONFIG_PPC_STD_MMU)
32
33#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
34
35#elif defined(__powerpc64__)
36
37extern void pte_free_finish(void);
38
39static inline void tlb_flush(struct mmu_gather *tlb)
40{
41 flush_tlb_pending();
42 pte_free_finish();
43}
44
45#else
46
25extern void tlb_flush(struct mmu_gather *tlb); 47extern void tlb_flush(struct mmu_gather *tlb);
26 48
49#endif
50
27/* Get the generic bits... */ 51/* Get the generic bits... */
28#include <asm-generic/tlb.h> 52#include <asm-generic/tlb.h>
29 53
30/* Nothing needed here in fact... */ 54#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
31#define tlb_start_vma(tlb, vma) do { } while (0) 55
32#define tlb_end_vma(tlb, vma) do { } while (0) 56#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
33 57
58#else
34extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, 59extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
35 unsigned long address); 60 unsigned long address);
36 61
@@ -41,17 +66,5 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
41 flush_hash_entry(tlb->mm, ptep, address); 66 flush_hash_entry(tlb->mm, ptep, address);
42} 67}
43 68
44#else 69#endif
45/* Embedded PPC with software-loaded TLB, very simple... */ 70#endif /* __ASM_POWERPC_TLB_H */
46
47#define tlb_start_vma(tlb, vma) do { } while (0)
48#define tlb_end_vma(tlb, vma) do { } while (0)
49#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
50#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
51
52/* Get the generic bits... */
53#include <asm-generic/tlb.h>
54
55#endif /* CONFIG_PPC_STD_MMU */
56
57#endif /* __PPC_TLB_H */
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
new file mode 100644
index 000000000000..ca3655672bbc
--- /dev/null
+++ b/include/asm-powerpc/tlbflush.h
@@ -0,0 +1,146 @@
1#ifndef _ASM_POWERPC_TLBFLUSH_H
2#define _ASM_POWERPC_TLBFLUSH_H
3/*
4 * TLB flushing:
5 *
6 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
7 * - flush_tlb_page(vma, vmaddr) flushes one page
8 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
9 * - flush_tlb_range(vma, start, end) flushes a range of pages
10 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
11 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifdef __KERNEL__
19
20#include <linux/config.h>
21
22struct mm_struct;
23
24#ifdef CONFIG_PPC64
25
26#include <linux/percpu.h>
27#include <asm/page.h>
28
29#define PPC64_TLB_BATCH_NR 192
30
31struct ppc64_tlb_batch {
32 unsigned long index;
33 struct mm_struct *mm;
34 pte_t pte[PPC64_TLB_BATCH_NR];
35 unsigned long vaddr[PPC64_TLB_BATCH_NR];
36 unsigned int large;
37};
38DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
39
40extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
41
42static inline void flush_tlb_pending(void)
43{
44 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
45
46 if (batch->index)
47 __flush_tlb_pending(batch);
48 put_cpu_var(ppc64_tlb_batch);
49}
50
51extern void flush_hash_page(unsigned long va, pte_t pte, int local);
52void flush_hash_range(unsigned long number, int local);
53
54#else /* CONFIG_PPC64 */
55
56#include <linux/mm.h>
57
58extern void _tlbie(unsigned long address);
59extern void _tlbia(void);
60
61/*
62 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
63 * flush_tlb_kernel_range are best implemented as tlbia vs
64 * specific tlbie's
65 */
66
67#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
68#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory")
69#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
70#define flush_tlb_pending() _tlbia()
71#endif
72
73/*
74 * This gets called at the end of handling a page fault, when
75 * the kernel has put a new PTE into the page table for the process.
76 * We use it to ensure coherency between the i-cache and d-cache
77 * for the page which has just been mapped in.
78 * On machines which use an MMU hash table, we use this to put a
79 * corresponding HPTE into the hash table ahead of time, instead of
80 * waiting for the inevitable extra hash-table miss exception.
81 */
82extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
83
84#endif /* CONFIG_PPC64 */
85
86#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
87 defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
88
89static inline void flush_tlb_mm(struct mm_struct *mm)
90{
91 flush_tlb_pending();
92}
93
94static inline void flush_tlb_page(struct vm_area_struct *vma,
95 unsigned long vmaddr)
96{
97#ifdef CONFIG_PPC64
98 flush_tlb_pending();
99#else
100 _tlbie(vmaddr);
101#endif
102}
103
104static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
105 unsigned long vmaddr)
106{
107#ifndef CONFIG_PPC64
108 _tlbie(vmaddr);
109#endif
110}
111
112static inline void flush_tlb_range(struct vm_area_struct *vma,
113 unsigned long start, unsigned long end)
114{
115 flush_tlb_pending();
116}
117
118static inline void flush_tlb_kernel_range(unsigned long start,
119 unsigned long end)
120{
121 flush_tlb_pending();
122}
123
124#else /* 6xx, 7xx, 7xxx cpus */
125
126extern void flush_tlb_mm(struct mm_struct *mm);
127extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
128extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
129extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
130 unsigned long end);
131extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
132
133#endif
134
135/*
136 * This is called in munmap when we have freed up some page-table
137 * pages. We don't need to do anything here, there's nothing special
138 * about our page-table pages. -- paulus
139 */
140static inline void flush_tlb_pgtables(struct mm_struct *mm,
141 unsigned long start, unsigned long end)
142{
143}
144
145#endif /*__KERNEL__ */
146#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
new file mode 100644
index 000000000000..33af730f0d19
--- /dev/null
+++ b/include/asm-powerpc/uaccess.h
@@ -0,0 +1,468 @@
1#ifndef _ARCH_POWERPC_UACCESS_H
2#define _ARCH_POWERPC_UACCESS_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/sched.h>
8#include <linux/errno.h>
9#include <asm/processor.h>
10
11#define VERIFY_READ 0
12#define VERIFY_WRITE 1
13
14/*
15 * The fs value determines whether argument validity checking should be
16 * performed or not. If get_fs() == USER_DS, checking is performed, with
17 * get_fs() == KERNEL_DS, checking is bypassed.
18 *
19 * For historical reasons, these macros are grossly misnamed.
20 *
21 * The fs/ds values are now the highest legal address in the "segment".
22 * This simplifies the checking in the routines below.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(~0UL)
28#ifdef __powerpc64__
29/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
30#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
31#else
32#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
33#endif
34
35#define get_ds() (KERNEL_DS)
36#define get_fs() (current->thread.fs)
37#define set_fs(val) (current->thread.fs = (val))
38
39#define segment_eq(a, b) ((a).seg == (b).seg)
40
41#ifdef __powerpc64__
42/*
43 * This check is sufficient because there is a large enough
44 * gap between user addresses and the kernel addresses
45 */
46#define __access_ok(addr, size, segment) \
47 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
48
49#else
50
51#define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && \
53 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
54
55#endif
56
57#define access_ok(type, addr, size) \
58 (__chk_user_ptr(addr), \
59 __access_ok((__force unsigned long)(addr), (size), get_fs()))
60
61/*
62 * The exception table consists of pairs of addresses: the first is the
63 * address of an instruction that is allowed to fault, and the second is
64 * the address at which the program should continue. No registers are
65 * modified, so it is entirely up to the continuation code to figure out
66 * what to do.
67 *
68 * All the routines below use bits of fixup code that are out of line
69 * with the main instruction path. This means when everything is well,
70 * we don't even have to jump over them. Further, they do not intrude
71 * on our cache or tlb entries.
72 */
73
74struct exception_table_entry {
75 unsigned long insn;
76 unsigned long fixup;
77};
78
79/*
80 * These are the main single-value transfer routines. They automatically
81 * use the right size if we just have the right pointer type.
82 *
83 * This gets kind of ugly. We want to return _two_ values in "get_user()"
84 * and yet we don't want to do any pointers, because that is too much
85 * of a performance impact. Thus we have a few rather ugly macros here,
86 * and hide all the ugliness from the user.
87 *
88 * The "__xxx" versions of the user access functions are versions that
89 * do not verify the address space, that must have been done previously
90 * with a separate "access_ok()" call (this is used when we do multiple
91 * accesses to the same area of user memory).
92 *
93 * As we use the same address space for kernel and user data on the
94 * PowerPC, we can just do these as direct assignments. (Of course, the
95 * exception handling means that it's no longer "just"...)
96 *
97 * The "user64" versions of the user access functions are versions that
98 * allow access of 64-bit data. The "get_user" functions do not
99 * properly handle 64-bit data because the value gets down cast to a long.
100 * The "put_user" functions already handle 64-bit data properly but we add
101 * "user64" versions for completeness
102 */
103#define get_user(x, ptr) \
104 __get_user_check((x), (ptr), sizeof(*(ptr)))
105#define put_user(x, ptr) \
106 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
107
108#define __get_user(x, ptr) \
109 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
110#define __put_user(x, ptr) \
111 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
112#ifndef __powerpc64__
113#define __get_user64(x, ptr) \
114 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
115#define __put_user64(x, ptr) __put_user(x, ptr)
116#endif
117
118#define __get_user_unaligned __get_user
119#define __put_user_unaligned __put_user
120
121extern long __put_user_bad(void);
122
123#ifdef __powerpc64__
124#define __EX_TABLE_ALIGN "3"
125#define __EX_TABLE_TYPE "llong"
126#else
127#define __EX_TABLE_ALIGN "2"
128#define __EX_TABLE_TYPE "long"
129#endif
130
131/*
132 * We don't tell gcc that we are accessing memory, but this is OK
133 * because we do not write to any memory gcc knows about, so there
134 * are no aliasing issues.
135 */
136#define __put_user_asm(x, addr, err, op) \
137 __asm__ __volatile__( \
138 "1: " op " %1,0(%2) # put_user\n" \
139 "2:\n" \
140 ".section .fixup,\"ax\"\n" \
141 "3: li %0,%3\n" \
142 " b 2b\n" \
143 ".previous\n" \
144 ".section __ex_table,\"a\"\n" \
145 " .align " __EX_TABLE_ALIGN "\n" \
146 " ."__EX_TABLE_TYPE" 1b,3b\n" \
147 ".previous" \
148 : "=r" (err) \
149 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
150
151#ifdef __powerpc64__
152#define __put_user_asm2(x, ptr, retval) \
153 __put_user_asm(x, ptr, retval, "std")
154#else /* __powerpc64__ */
155#define __put_user_asm2(x, addr, err) \
156 __asm__ __volatile__( \
157 "1: stw %1,0(%2)\n" \
158 "2: stw %1+1,4(%2)\n" \
159 "3:\n" \
160 ".section .fixup,\"ax\"\n" \
161 "4: li %0,%3\n" \
162 " b 3b\n" \
163 ".previous\n" \
164 ".section __ex_table,\"a\"\n" \
165 " .align " __EX_TABLE_ALIGN "\n" \
166 " ." __EX_TABLE_TYPE " 1b,4b\n" \
167 " ." __EX_TABLE_TYPE " 2b,4b\n" \
168 ".previous" \
169 : "=r" (err) \
170 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
171#endif /* __powerpc64__ */
172
173#define __put_user_size(x, ptr, size, retval) \
174do { \
175 retval = 0; \
176 switch (size) { \
177 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
178 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
179 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
180 case 8: __put_user_asm2(x, ptr, retval); break; \
181 default: __put_user_bad(); \
182 } \
183} while (0)
184
185#define __put_user_nocheck(x, ptr, size) \
186({ \
187 long __pu_err; \
188 might_sleep(); \
189 __chk_user_ptr(ptr); \
190 __put_user_size((x), (ptr), (size), __pu_err); \
191 __pu_err; \
192})
193
194#define __put_user_check(x, ptr, size) \
195({ \
196 long __pu_err = -EFAULT; \
197 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
198 might_sleep(); \
199 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
200 __put_user_size((x), __pu_addr, (size), __pu_err); \
201 __pu_err; \
202})
203
204extern long __get_user_bad(void);
205
206#define __get_user_asm(x, addr, err, op) \
207 __asm__ __volatile__( \
208 "1: "op" %1,0(%2) # get_user\n" \
209 "2:\n" \
210 ".section .fixup,\"ax\"\n" \
211 "3: li %0,%3\n" \
212 " li %1,0\n" \
213 " b 2b\n" \
214 ".previous\n" \
215 ".section __ex_table,\"a\"\n" \
216 " .align "__EX_TABLE_ALIGN "\n" \
217 " ." __EX_TABLE_TYPE " 1b,3b\n" \
218 ".previous" \
219 : "=r" (err), "=r" (x) \
220 : "b" (addr), "i" (-EFAULT), "0" (err))
221
222#ifdef __powerpc64__
223#define __get_user_asm2(x, addr, err) \
224 __get_user_asm(x, addr, err, "ld")
225#else /* __powerpc64__ */
226#define __get_user_asm2(x, addr, err) \
227 __asm__ __volatile__( \
228 "1: lwz %1,0(%2)\n" \
229 "2: lwz %1+1,4(%2)\n" \
230 "3:\n" \
231 ".section .fixup,\"ax\"\n" \
232 "4: li %0,%3\n" \
233 " li %1,0\n" \
234 " li %1+1,0\n" \
235 " b 3b\n" \
236 ".previous\n" \
237 ".section __ex_table,\"a\"\n" \
238 " .align " __EX_TABLE_ALIGN "\n" \
239 " ." __EX_TABLE_TYPE " 1b,4b\n" \
240 " ." __EX_TABLE_TYPE " 2b,4b\n" \
241 ".previous" \
242 : "=r" (err), "=&r" (x) \
243 : "b" (addr), "i" (-EFAULT), "0" (err))
244#endif /* __powerpc64__ */
245
246#define __get_user_size(x, ptr, size, retval) \
247do { \
248 retval = 0; \
249 __chk_user_ptr(ptr); \
250 if (size > sizeof(x)) \
251 (x) = __get_user_bad(); \
252 switch (size) { \
253 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
254 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
255 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
256 case 8: __get_user_asm2(x, ptr, retval); break; \
257 default: (x) = __get_user_bad(); \
258 } \
259} while (0)
260
261#define __get_user_nocheck(x, ptr, size) \
262({ \
263 long __gu_err; \
264 unsigned long __gu_val; \
265 __chk_user_ptr(ptr); \
266 might_sleep(); \
267 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
268 (x) = (__typeof__(*(ptr)))__gu_val; \
269 __gu_err; \
270})
271
272#ifndef __powerpc64__
273#define __get_user64_nocheck(x, ptr, size) \
274({ \
275 long __gu_err; \
276 long long __gu_val; \
277 __chk_user_ptr(ptr); \
278 might_sleep(); \
279 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
280 (x) = (__typeof__(*(ptr)))__gu_val; \
281 __gu_err; \
282})
283#endif /* __powerpc64__ */
284
285#define __get_user_check(x, ptr, size) \
286({ \
287 long __gu_err = -EFAULT; \
288 unsigned long __gu_val = 0; \
289 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
290 might_sleep(); \
291 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
292 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
293 (x) = (__typeof__(*(ptr)))__gu_val; \
294 __gu_err; \
295})
296
297/* more complex routines */
298
299extern unsigned long __copy_tofrom_user(void __user *to,
300 const void __user *from, unsigned long size);
301
302#ifndef __powerpc64__
303
304extern inline unsigned long copy_from_user(void *to,
305 const void __user *from, unsigned long n)
306{
307 unsigned long over;
308
309 if (access_ok(VERIFY_READ, from, n))
310 return __copy_tofrom_user((__force void __user *)to, from, n);
311 if ((unsigned long)from < TASK_SIZE) {
312 over = (unsigned long)from + n - TASK_SIZE;
313 return __copy_tofrom_user((__force void __user *)to, from,
314 n - over) + over;
315 }
316 return n;
317}
318
319extern inline unsigned long copy_to_user(void __user *to,
320 const void *from, unsigned long n)
321{
322 unsigned long over;
323
324 if (access_ok(VERIFY_WRITE, to, n))
325 return __copy_tofrom_user(to, (__force void __user *)from, n);
326 if ((unsigned long)to < TASK_SIZE) {
327 over = (unsigned long)to + n - TASK_SIZE;
328 return __copy_tofrom_user(to, (__force void __user *)from,
329 n - over) + over;
330 }
331 return n;
332}
333
334#else /* __powerpc64__ */
335
336#define __copy_in_user(to, from, size) \
337 __copy_tofrom_user((to), (from), (size))
338
339extern unsigned long copy_from_user(void *to, const void __user *from,
340 unsigned long n);
341extern unsigned long copy_to_user(void __user *to, const void *from,
342 unsigned long n);
343extern unsigned long copy_in_user(void __user *to, const void __user *from,
344 unsigned long n);
345
346#endif /* __powerpc64__ */
347
348static inline unsigned long __copy_from_user_inatomic(void *to,
349 const void __user *from, unsigned long n)
350{
351 if (__builtin_constant_p(n) && (n <= 8)) {
352 unsigned long ret;
353
354 switch (n) {
355 case 1:
356 __get_user_size(*(u8 *)to, from, 1, ret);
357 break;
358 case 2:
359 __get_user_size(*(u16 *)to, from, 2, ret);
360 break;
361 case 4:
362 __get_user_size(*(u32 *)to, from, 4, ret);
363 break;
364 case 8:
365 __get_user_size(*(u64 *)to, from, 8, ret);
366 break;
367 }
368 if (ret == 0)
369 return 0;
370 }
371 return __copy_tofrom_user((__force void __user *)to, from, n);
372}
373
374static inline unsigned long __copy_to_user_inatomic(void __user *to,
375 const void *from, unsigned long n)
376{
377 if (__builtin_constant_p(n) && (n <= 8)) {
378 unsigned long ret;
379
380 switch (n) {
381 case 1:
382 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
383 break;
384 case 2:
385 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
386 break;
387 case 4:
388 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
389 break;
390 case 8:
391 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
392 break;
393 }
394 if (ret == 0)
395 return 0;
396 }
397 return __copy_tofrom_user(to, (__force const void __user *)from, n);
398}
399
400static inline unsigned long __copy_from_user(void *to,
401 const void __user *from, unsigned long size)
402{
403 might_sleep();
404 return __copy_from_user_inatomic(to, from, size);
405}
406
407static inline unsigned long __copy_to_user(void __user *to,
408 const void *from, unsigned long size)
409{
410 might_sleep();
411 return __copy_to_user_inatomic(to, from, size);
412}
413
414extern unsigned long __clear_user(void __user *addr, unsigned long size);
415
416static inline unsigned long clear_user(void __user *addr, unsigned long size)
417{
418 might_sleep();
419 if (likely(access_ok(VERIFY_WRITE, addr, size)))
420 return __clear_user(addr, size);
421 if ((unsigned long)addr < TASK_SIZE) {
422 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
423 return __clear_user(addr, size - over) + over;
424 }
425 return size;
426}
427
428extern int __strncpy_from_user(char *dst, const char __user *src, long count);
429
430static inline long strncpy_from_user(char *dst, const char __user *src,
431 long count)
432{
433 might_sleep();
434 if (likely(access_ok(VERIFY_READ, src, 1)))
435 return __strncpy_from_user(dst, src, count);
436 return -EFAULT;
437}
438
439/*
440 * Return the size of a string (including the ending 0)
441 *
442 * Return 0 for error
443 */
444extern int __strnlen_user(const char __user *str, long len, unsigned long top);
445
446/*
447 * Returns the length of the string at str (including the null byte),
448 * or 0 if we hit a page we can't access,
449 * or something > len if we didn't find a null byte.
450 *
451 * The `top' parameter to __strnlen_user is to make sure that
452 * we can never overflow from the user area into kernel space.
453 */
454static inline int strnlen_user(const char __user *str, long len)
455{
456 unsigned long top = current->thread.fs.seg;
457
458 if ((unsigned long)str > top)
459 return 0;
460 return __strnlen_user(str, len, top);
461}
462
463#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
464
465#endif /* __ASSEMBLY__ */
466#endif /* __KERNEL__ */
467
468#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/include/asm-powerpc/ucontext.h b/include/asm-powerpc/ucontext.h
new file mode 100644
index 000000000000..d9a4ddf0cc86
--- /dev/null
+++ b/include/asm-powerpc/ucontext.h
@@ -0,0 +1,40 @@
1#ifndef _ASM_POWERPC_UCONTEXT_H
2#define _ASM_POWERPC_UCONTEXT_H
3
4#ifdef __powerpc64__
5#include <asm/sigcontext.h>
6#else
7#include <asm/elf.h>
8#endif
9#include <asm/signal.h>
10
11#ifndef __powerpc64__
12struct mcontext {
13 elf_gregset_t mc_gregs;
14 elf_fpregset_t mc_fregs;
15 unsigned long mc_pad[2];
16 elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
17};
18#endif
19
20struct ucontext {
21 unsigned long uc_flags;
22 struct ucontext __user *uc_link;
23 stack_t uc_stack;
24#ifndef __powerpc64__
25 int uc_pad[7];
26 struct mcontext __user *uc_regs;/* points to uc_mcontext field */
27#endif
28 sigset_t uc_sigmask;
29 /* glibc has 1024-bit signal masks, ours are 64-bit */
30#ifdef __powerpc64__
31 sigset_t __unused[15]; /* Allow for uc_sigmask growth */
32 struct sigcontext uc_mcontext; /* last for extensibility */
33#else
34 int uc_maskext[30];
35 int uc_pad2[3];
36 struct mcontext uc_mcontext;
37#endif
38};
39
40#endif /* _ASM_POWERPC_UCONTEXT_H */
diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h
deleted file mode 100644
index e30f536fd830..000000000000
--- a/include/asm-ppc/bitops.h
+++ /dev/null
@@ -1,460 +0,0 @@
1/*
2 * bitops.h: Bit string operations on the ppc
3 */
4
5#ifdef __KERNEL__
6#ifndef _PPC_BITOPS_H
7#define _PPC_BITOPS_H
8
9#include <linux/config.h>
10#include <linux/compiler.h>
11#include <asm/byteorder.h>
12#include <asm/atomic.h>
13
14/*
15 * The test_and_*_bit operations are taken to imply a memory barrier
16 * on SMP systems.
17 */
18#ifdef CONFIG_SMP
19#define SMP_WMB "eieio\n"
20#define SMP_MB "\nsync"
21#else
22#define SMP_WMB
23#define SMP_MB
24#endif /* CONFIG_SMP */
25
26static __inline__ void set_bit(int nr, volatile unsigned long * addr)
27{
28 unsigned long old;
29 unsigned long mask = 1 << (nr & 0x1f);
30 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
31
32 __asm__ __volatile__("\n\
331: lwarx %0,0,%3 \n\
34 or %0,%0,%2 \n"
35 PPC405_ERR77(0,%3)
36" stwcx. %0,0,%3 \n\
37 bne- 1b"
38 : "=&r" (old), "=m" (*p)
39 : "r" (mask), "r" (p), "m" (*p)
40 : "cc" );
41}
42
43/*
44 * non-atomic version
45 */
46static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
47{
48 unsigned long mask = 1 << (nr & 0x1f);
49 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
50
51 *p |= mask;
52}
53
54/*
55 * clear_bit doesn't imply a memory barrier
56 */
57#define smp_mb__before_clear_bit() smp_mb()
58#define smp_mb__after_clear_bit() smp_mb()
59
60static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
61{
62 unsigned long old;
63 unsigned long mask = 1 << (nr & 0x1f);
64 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
65
66 __asm__ __volatile__("\n\
671: lwarx %0,0,%3 \n\
68 andc %0,%0,%2 \n"
69 PPC405_ERR77(0,%3)
70" stwcx. %0,0,%3 \n\
71 bne- 1b"
72 : "=&r" (old), "=m" (*p)
73 : "r" (mask), "r" (p), "m" (*p)
74 : "cc");
75}
76
77/*
78 * non-atomic version
79 */
80static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
81{
82 unsigned long mask = 1 << (nr & 0x1f);
83 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
84
85 *p &= ~mask;
86}
87
88static __inline__ void change_bit(int nr, volatile unsigned long *addr)
89{
90 unsigned long old;
91 unsigned long mask = 1 << (nr & 0x1f);
92 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
93
94 __asm__ __volatile__("\n\
951: lwarx %0,0,%3 \n\
96 xor %0,%0,%2 \n"
97 PPC405_ERR77(0,%3)
98" stwcx. %0,0,%3 \n\
99 bne- 1b"
100 : "=&r" (old), "=m" (*p)
101 : "r" (mask), "r" (p), "m" (*p)
102 : "cc");
103}
104
105/*
106 * non-atomic version
107 */
108static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
109{
110 unsigned long mask = 1 << (nr & 0x1f);
111 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
112
113 *p ^= mask;
114}
115
116/*
117 * test_and_*_bit do imply a memory barrier (?)
118 */
119static __inline__ int test_and_set_bit(int nr, volatile unsigned long *addr)
120{
121 unsigned int old, t;
122 unsigned int mask = 1 << (nr & 0x1f);
123 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
124
125 __asm__ __volatile__(SMP_WMB "\n\
1261: lwarx %0,0,%4 \n\
127 or %1,%0,%3 \n"
128 PPC405_ERR77(0,%4)
129" stwcx. %1,0,%4 \n\
130 bne 1b"
131 SMP_MB
132 : "=&r" (old), "=&r" (t), "=m" (*p)
133 : "r" (mask), "r" (p), "m" (*p)
134 : "cc", "memory");
135
136 return (old & mask) != 0;
137}
138
139/*
140 * non-atomic version
141 */
142static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
143{
144 unsigned long mask = 1 << (nr & 0x1f);
145 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
146 unsigned long old = *p;
147
148 *p = old | mask;
149 return (old & mask) != 0;
150}
151
152static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
153{
154 unsigned int old, t;
155 unsigned int mask = 1 << (nr & 0x1f);
156 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
157
158 __asm__ __volatile__(SMP_WMB "\n\
1591: lwarx %0,0,%4 \n\
160 andc %1,%0,%3 \n"
161 PPC405_ERR77(0,%4)
162" stwcx. %1,0,%4 \n\
163 bne 1b"
164 SMP_MB
165 : "=&r" (old), "=&r" (t), "=m" (*p)
166 : "r" (mask), "r" (p), "m" (*p)
167 : "cc", "memory");
168
169 return (old & mask) != 0;
170}
171
172/*
173 * non-atomic version
174 */
175static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
176{
177 unsigned long mask = 1 << (nr & 0x1f);
178 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
179 unsigned long old = *p;
180
181 *p = old & ~mask;
182 return (old & mask) != 0;
183}
184
185static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
186{
187 unsigned int old, t;
188 unsigned int mask = 1 << (nr & 0x1f);
189 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
190
191 __asm__ __volatile__(SMP_WMB "\n\
1921: lwarx %0,0,%4 \n\
193 xor %1,%0,%3 \n"
194 PPC405_ERR77(0,%4)
195" stwcx. %1,0,%4 \n\
196 bne 1b"
197 SMP_MB
198 : "=&r" (old), "=&r" (t), "=m" (*p)
199 : "r" (mask), "r" (p), "m" (*p)
200 : "cc", "memory");
201
202 return (old & mask) != 0;
203}
204
205/*
206 * non-atomic version
207 */
208static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
209{
210 unsigned long mask = 1 << (nr & 0x1f);
211 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
212 unsigned long old = *p;
213
214 *p = old ^ mask;
215 return (old & mask) != 0;
216}
217
218static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
219{
220 return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
221}
222
223/* Return the bit position of the most significant 1 bit in a word */
224static __inline__ int __ilog2(unsigned long x)
225{
226 int lz;
227
228 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
229 return 31 - lz;
230}
231
232static __inline__ int ffz(unsigned long x)
233{
234 if ((x = ~x) == 0)
235 return 32;
236 return __ilog2(x & -x);
237}
238
239static inline int __ffs(unsigned long x)
240{
241 return __ilog2(x & -x);
242}
243
244/*
245 * ffs: find first bit set. This is defined the same way as
246 * the libc and compiler builtin ffs routines, therefore
247 * differs in spirit from the above ffz (man ffs).
248 */
249static __inline__ int ffs(int x)
250{
251 return __ilog2(x & -x) + 1;
252}
253
254/*
255 * fls: find last (most-significant) bit set.
256 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
257 */
258static __inline__ int fls(unsigned int x)
259{
260 int lz;
261
262 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
263 return 32 - lz;
264}
265
266/*
267 * hweightN: returns the hamming weight (i.e. the number
268 * of bits set) of a N-bit word
269 */
270
271#define hweight32(x) generic_hweight32(x)
272#define hweight16(x) generic_hweight16(x)
273#define hweight8(x) generic_hweight8(x)
274
275/*
276 * Find the first bit set in a 140-bit bitmap.
277 * The first 100 bits are unlikely to be set.
278 */
279static inline int sched_find_first_bit(const unsigned long *b)
280{
281 if (unlikely(b[0]))
282 return __ffs(b[0]);
283 if (unlikely(b[1]))
284 return __ffs(b[1]) + 32;
285 if (unlikely(b[2]))
286 return __ffs(b[2]) + 64;
287 if (b[3])
288 return __ffs(b[3]) + 96;
289 return __ffs(b[4]) + 128;
290}
291
292/**
293 * find_next_bit - find the next set bit in a memory region
294 * @addr: The address to base the search on
295 * @offset: The bitnumber to start searching at
296 * @size: The maximum size to search
297 */
298static __inline__ unsigned long find_next_bit(const unsigned long *addr,
299 unsigned long size, unsigned long offset)
300{
301 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
302 unsigned int result = offset & ~31UL;
303 unsigned int tmp;
304
305 if (offset >= size)
306 return size;
307 size -= result;
308 offset &= 31UL;
309 if (offset) {
310 tmp = *p++;
311 tmp &= ~0UL << offset;
312 if (size < 32)
313 goto found_first;
314 if (tmp)
315 goto found_middle;
316 size -= 32;
317 result += 32;
318 }
319 while (size >= 32) {
320 if ((tmp = *p++) != 0)
321 goto found_middle;
322 result += 32;
323 size -= 32;
324 }
325 if (!size)
326 return result;
327 tmp = *p;
328
329found_first:
330 tmp &= ~0UL >> (32 - size);
331 if (tmp == 0UL) /* Are any bits set? */
332 return result + size; /* Nope. */
333found_middle:
334 return result + __ffs(tmp);
335}
336
337/**
338 * find_first_bit - find the first set bit in a memory region
339 * @addr: The address to start the search at
340 * @size: The maximum size to search
341 *
342 * Returns the bit-number of the first set bit, not the number of the byte
343 * containing a bit.
344 */
345#define find_first_bit(addr, size) \
346 find_next_bit((addr), (size), 0)
347
348/*
349 * This implementation of find_{first,next}_zero_bit was stolen from
350 * Linus' asm-alpha/bitops.h.
351 */
352#define find_first_zero_bit(addr, size) \
353 find_next_zero_bit((addr), (size), 0)
354
355static __inline__ unsigned long find_next_zero_bit(const unsigned long *addr,
356 unsigned long size, unsigned long offset)
357{
358 unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
359 unsigned int result = offset & ~31UL;
360 unsigned int tmp;
361
362 if (offset >= size)
363 return size;
364 size -= result;
365 offset &= 31UL;
366 if (offset) {
367 tmp = *p++;
368 tmp |= ~0UL >> (32-offset);
369 if (size < 32)
370 goto found_first;
371 if (tmp != ~0U)
372 goto found_middle;
373 size -= 32;
374 result += 32;
375 }
376 while (size >= 32) {
377 if ((tmp = *p++) != ~0U)
378 goto found_middle;
379 result += 32;
380 size -= 32;
381 }
382 if (!size)
383 return result;
384 tmp = *p;
385found_first:
386 tmp |= ~0UL << size;
387 if (tmp == ~0UL) /* Are any bits zero? */
388 return result + size; /* Nope. */
389found_middle:
390 return result + ffz(tmp);
391}
392
393
394#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
395#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
396#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
397#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
398
399static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
400{
401 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
402
403 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
404}
405
406/*
407 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
408 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
409 */
410
411#define ext2_find_first_zero_bit(addr, size) \
412 ext2_find_next_zero_bit((addr), (size), 0)
413
414static __inline__ unsigned long ext2_find_next_zero_bit(const void *addr,
415 unsigned long size, unsigned long offset)
416{
417 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
418 unsigned int result = offset & ~31UL;
419 unsigned int tmp;
420
421 if (offset >= size)
422 return size;
423 size -= result;
424 offset &= 31UL;
425 if (offset) {
426 tmp = cpu_to_le32p(p++);
427 tmp |= ~0UL >> (32-offset);
428 if (size < 32)
429 goto found_first;
430 if (tmp != ~0U)
431 goto found_middle;
432 size -= 32;
433 result += 32;
434 }
435 while (size >= 32) {
436 if ((tmp = cpu_to_le32p(p++)) != ~0U)
437 goto found_middle;
438 result += 32;
439 size -= 32;
440 }
441 if (!size)
442 return result;
443 tmp = cpu_to_le32p(p);
444found_first:
445 tmp |= ~0U << size;
446 if (tmp == ~0UL) /* Are any bits zero? */
447 return result + size; /* Nope. */
448found_middle:
449 return result + ffz(tmp);
450}
451
452/* Bitmap functions for the minix filesystem. */
453#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
454#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
455#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
456#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
457#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
458
459#endif /* _PPC_BITOPS_H */
460#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/commproc.h b/include/asm-ppc/commproc.h
index 5bbb8e2c1c6d..973e60908234 100644
--- a/include/asm-ppc/commproc.h
+++ b/include/asm-ppc/commproc.h
@@ -83,6 +83,8 @@ extern uint m8xx_cpm_hostalloc(uint size);
83extern int m8xx_cpm_hostfree(uint start); 83extern int m8xx_cpm_hostfree(uint start);
84extern void m8xx_cpm_hostdump(void); 84extern void m8xx_cpm_hostdump(void);
85 85
86extern void cpm_load_patch(volatile immap_t *immr);
87
86/* Buffer descriptors used by many of the CPM protocols. 88/* Buffer descriptors used by many of the CPM protocols.
87*/ 89*/
88typedef struct cpm_buf_desc { 90typedef struct cpm_buf_desc {
diff --git a/include/asm-ppc/futex.h b/include/asm-ppc/futex.h
deleted file mode 100644
index 9feff4ce1424..000000000000
--- a/include/asm-ppc/futex.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-ppc/ipcbuf.h b/include/asm-ppc/ipcbuf.h
deleted file mode 100644
index fab6752c7480..000000000000
--- a/include/asm-ppc/ipcbuf.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __PPC_IPCBUF_H__
2#define __PPC_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for PPC architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 1 32-bit value to fill up for 8-byte alignment
11 * - 2 miscellaneous 64-bit values (so that this structure matches
12 * PPC64 ipc64_perm)
13 */
14
15struct ipc64_perm
16{
17 __kernel_key_t key;
18 __kernel_uid_t uid;
19 __kernel_gid_t gid;
20 __kernel_uid_t cuid;
21 __kernel_gid_t cgid;
22 __kernel_mode_t mode;
23 unsigned long seq;
24 unsigned int __pad2;
25 unsigned long long __unused1;
26 unsigned long long __unused2;
27};
28
29#endif /* __PPC_IPCBUF_H__ */
diff --git a/include/asm-ppc/kexec.h b/include/asm-ppc/kexec.h
deleted file mode 100644
index 6d2aa0aa4642..000000000000
--- a/include/asm-ppc/kexec.h
+++ /dev/null
@@ -1,40 +0,0 @@
1#ifndef _PPC_KEXEC_H
2#define _PPC_KEXEC_H
3
4#ifdef CONFIG_KEXEC
5
6/*
7 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
8 * I.e. Maximum page that is mapped directly into kernel memory,
9 * and kmap is not required.
10 *
11 * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct
12 * calculation for the amount of memory directly mappable into the
13 * kernel memory space.
14 */
15
16/* Maximum physical address we can use pages from */
17#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
18/* Maximum address we can reach in physical address mode */
19#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
20/* Maximum address we can use for the control code buffer */
21#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
22
23#define KEXEC_CONTROL_CODE_SIZE 4096
24
25/* The native architecture */
26#define KEXEC_ARCH KEXEC_ARCH_PPC
27
28#ifndef __ASSEMBLY__
29
30extern void *crash_notes;
31
32struct kimage;
33
34extern void machine_kexec_simple(struct kimage *image);
35
36#endif /* __ASSEMBLY__ */
37
38#endif /* CONFIG_KEXEC */
39
40#endif /* _PPC_KEXEC_H */
diff --git a/include/asm-ppc/ptrace.h b/include/asm-ppc/ptrace.h
deleted file mode 100644
index c34fb4e37a97..000000000000
--- a/include/asm-ppc/ptrace.h
+++ /dev/null
@@ -1,152 +0,0 @@
1#ifndef _PPC_PTRACE_H
2#define _PPC_PTRACE_H
3
4/*
5 * This struct defines the way the registers are stored on the
6 * kernel stack during a system call or other kernel entry.
7 *
8 * this should only contain volatile regs
9 * since we can keep non-volatile in the thread_struct
10 * should set this up when only volatiles are saved
11 * by intr code.
12 *
13 * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
14 * that the overall structure is a multiple of 16 bytes in length.
15 *
16 * Note that the offsets of the fields in this struct correspond with
17 * the PT_* values below. This simplifies arch/ppc/kernel/ptrace.c.
18 */
19
20#ifndef __ASSEMBLY__
21struct pt_regs {
22 unsigned long gpr[32];
23 unsigned long nip;
24 unsigned long msr;
25 unsigned long orig_gpr3; /* Used for restarting system calls */
26 unsigned long ctr;
27 unsigned long link;
28 unsigned long xer;
29 unsigned long ccr;
30 unsigned long mq; /* 601 only (not used at present) */
31 /* Used on APUS to hold IPL value. */
32 unsigned long trap; /* Reason for being here */
33 /* N.B. for critical exceptions on 4xx, the dar and dsisr
34 fields are overloaded to hold srr0 and srr1. */
35 unsigned long dar; /* Fault registers */
36 unsigned long dsisr; /* on 4xx/Book-E used for ESR */
37 unsigned long result; /* Result of a system call */
38};
39
40#endif /* __ASSEMBLY__ */
41
42#ifdef __KERNEL__
43#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
44
45/* Size of stack frame allocated when calling signal handler. */
46#define __SIGNAL_FRAMESIZE 64
47
48#ifndef __ASSEMBLY__
49#define instruction_pointer(regs) ((regs)->nip)
50#ifdef CONFIG_SMP
51extern unsigned long profile_pc(struct pt_regs *regs);
52#else
53#define profile_pc(regs) instruction_pointer(regs)
54#endif
55
56#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
57
58#define force_successful_syscall_return() \
59 do { \
60 current_thread_info()->syscall_noerror = 1; \
61 } while(0)
62
63/*
64 * We use the least-significant bit of the trap field to indicate
65 * whether we have saved the full set of registers, or only a
66 * partial set. A 1 there means the partial set.
67 * On 4xx we use the next bit to indicate whether the exception
68 * is a critical exception (1 means it is).
69 */
70#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
71#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0)
72#define TRAP(regs) ((regs)->trap & ~0xF)
73
74#define CHECK_FULL_REGS(regs) \
75do { \
76 if ((regs)->trap & 1) \
77 printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
78} while (0)
79#endif /* __ASSEMBLY__ */
80
81#endif /* __KERNEL__ */
82
83/*
84 * Offsets used by 'ptrace' system call interface.
85 * These can't be changed without breaking binary compatibility
86 * with MkLinux, etc.
87 */
88#define PT_R0 0
89#define PT_R1 1
90#define PT_R2 2
91#define PT_R3 3
92#define PT_R4 4
93#define PT_R5 5
94#define PT_R6 6
95#define PT_R7 7
96#define PT_R8 8
97#define PT_R9 9
98#define PT_R10 10
99#define PT_R11 11
100#define PT_R12 12
101#define PT_R13 13
102#define PT_R14 14
103#define PT_R15 15
104#define PT_R16 16
105#define PT_R17 17
106#define PT_R18 18
107#define PT_R19 19
108#define PT_R20 20
109#define PT_R21 21
110#define PT_R22 22
111#define PT_R23 23
112#define PT_R24 24
113#define PT_R25 25
114#define PT_R26 26
115#define PT_R27 27
116#define PT_R28 28
117#define PT_R29 29
118#define PT_R30 30
119#define PT_R31 31
120
121#define PT_NIP 32
122#define PT_MSR 33
123#ifdef __KERNEL__
124#define PT_ORIG_R3 34
125#endif
126#define PT_CTR 35
127#define PT_LNK 36
128#define PT_XER 37
129#define PT_CCR 38
130#define PT_MQ 39
131
132#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
133#define PT_FPR31 (PT_FPR0 + 2*31)
134#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
135
136/* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go */
137#define PTRACE_GETVRREGS 18
138#define PTRACE_SETVRREGS 19
139
140/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
141 * spefscr, in one go */
142#define PTRACE_GETEVRREGS 20
143#define PTRACE_SETEVRREGS 21
144
145/*
146 * Get or set a debug register. The first 16 are DABR registers and the
147 * second 16 are IABR registers.
148 */
149#define PTRACE_GET_DEBUGREG 25
150#define PTRACE_SET_DEBUGREG 26
151
152#endif
diff --git a/include/asm-ppc/sigcontext.h b/include/asm-ppc/sigcontext.h
deleted file mode 100644
index b7a417e0a921..000000000000
--- a/include/asm-ppc/sigcontext.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _ASM_PPC_SIGCONTEXT_H
2#define _ASM_PPC_SIGCONTEXT_H
3
4#include <asm/ptrace.h>
5#include <linux/compiler.h>
6
7struct sigcontext {
8 unsigned long _unused[4];
9 int signal;
10 unsigned long handler;
11 unsigned long oldmask;
12 struct pt_regs __user *regs;
13};
14
15#endif
diff --git a/include/asm-ppc/stat.h b/include/asm-ppc/stat.h
deleted file mode 100644
index cadb34298496..000000000000
--- a/include/asm-ppc/stat.h
+++ /dev/null
@@ -1,69 +0,0 @@
1#ifndef _PPC_STAT_H
2#define _PPC_STAT_H
3
4#ifdef __KERNEL__
5#include <linux/types.h>
6#endif /* __KERNEL__ */
7
8struct __old_kernel_stat {
9 unsigned short st_dev;
10 unsigned short st_ino;
11 unsigned short st_mode;
12 unsigned short st_nlink;
13 unsigned short st_uid;
14 unsigned short st_gid;
15 unsigned short st_rdev;
16 unsigned long st_size;
17 unsigned long st_atime;
18 unsigned long st_mtime;
19 unsigned long st_ctime;
20};
21
22#define STAT_HAVE_NSEC 1
23
24struct stat {
25 unsigned st_dev;
26 ino_t st_ino;
27 mode_t st_mode;
28 nlink_t st_nlink;
29 uid_t st_uid;
30 gid_t st_gid;
31 unsigned st_rdev;
32 off_t st_size;
33 unsigned long st_blksize;
34 unsigned long st_blocks;
35 unsigned long st_atime;
36 unsigned long st_atime_nsec;
37 unsigned long st_mtime;
38 unsigned long st_mtime_nsec;
39 unsigned long st_ctime;
40 unsigned long st_ctime_nsec;
41 unsigned long __unused4;
42 unsigned long __unused5;
43};
44
45/* This matches struct stat64 in glibc2.1.
46 */
47struct stat64 {
48 unsigned long long st_dev; /* Device. */
49 unsigned long long st_ino; /* File serial number. */
50 unsigned int st_mode; /* File mode. */
51 unsigned int st_nlink; /* Link count. */
52 unsigned int st_uid; /* User ID of the file's owner. */
53 unsigned int st_gid; /* Group ID of the file's group. */
54 unsigned long long st_rdev; /* Device number, if device. */
55 unsigned short int __pad2;
56 long long st_size; /* Size of file, in bytes. */
57 long st_blksize; /* Optimal block size for I/O. */
58
59 long long st_blocks; /* Number 512-byte blocks allocated. */
60 long st_atime; /* Time of last access. */
61 unsigned long st_atime_nsec;
62 long st_mtime; /* Time of last modification. */
63 unsigned long int st_mtime_nsec;
64 long st_ctime; /* Time of last status change. */
65 unsigned long int st_ctime_nsec;
66 unsigned long int __unused4;
67 unsigned long int __unused5;
68};
69#endif
diff --git a/include/asm-ppc/tlbflush.h b/include/asm-ppc/tlbflush.h
deleted file mode 100644
index 9afee4ffc835..000000000000
--- a/include/asm-ppc/tlbflush.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * include/asm-ppc/tlbflush.h
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifdef __KERNEL__
10#ifndef _PPC_TLBFLUSH_H
11#define _PPC_TLBFLUSH_H
12
13#include <linux/config.h>
14#include <linux/mm.h>
15
16extern void _tlbie(unsigned long address);
17extern void _tlbia(void);
18
19#if defined(CONFIG_4xx)
20
21#ifndef CONFIG_44x
22#define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory")
23#else
24#define __tlbia _tlbia
25#endif
26
27static inline void flush_tlb_mm(struct mm_struct *mm)
28 { __tlbia(); }
29static inline void flush_tlb_page(struct vm_area_struct *vma,
30 unsigned long vmaddr)
31 { _tlbie(vmaddr); }
32static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
33 unsigned long vmaddr)
34 { _tlbie(vmaddr); }
35static inline void flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end)
37 { __tlbia(); }
38static inline void flush_tlb_kernel_range(unsigned long start,
39 unsigned long end)
40 { __tlbia(); }
41
42#elif defined(CONFIG_FSL_BOOKE)
43
44/* TODO: determine if flush_tlb_range & flush_tlb_kernel_range
45 * are best implemented as tlbia vs specific tlbie's */
46
47#define __tlbia() _tlbia()
48
49static inline void flush_tlb_mm(struct mm_struct *mm)
50 { __tlbia(); }
51static inline void flush_tlb_page(struct vm_area_struct *vma,
52 unsigned long vmaddr)
53 { _tlbie(vmaddr); }
54static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
55 unsigned long vmaddr)
56 { _tlbie(vmaddr); }
57static inline void flush_tlb_range(struct vm_area_struct *vma,
58 unsigned long start, unsigned long end)
59 { __tlbia(); }
60static inline void flush_tlb_kernel_range(unsigned long start,
61 unsigned long end)
62 { __tlbia(); }
63
64#elif defined(CONFIG_8xx)
65#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
66
67static inline void flush_tlb_mm(struct mm_struct *mm)
68 { __tlbia(); }
69static inline void flush_tlb_page(struct vm_area_struct *vma,
70 unsigned long vmaddr)
71 { _tlbie(vmaddr); }
72static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
73 unsigned long vmaddr)
74 { _tlbie(vmaddr); }
75static inline void flush_tlb_range(struct vm_area_struct *vma,
76 unsigned long start, unsigned long end)
77 { __tlbia(); }
78static inline void flush_tlb_kernel_range(unsigned long start,
79 unsigned long end)
80 { __tlbia(); }
81
82#else /* 6xx, 7xx, 7xxx cpus */
83struct mm_struct;
84struct vm_area_struct;
85extern void flush_tlb_mm(struct mm_struct *mm);
86extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
87extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
88extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
89 unsigned long end);
90extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
91#endif
92
93/*
94 * This is called in munmap when we have freed up some page-table
95 * pages. We don't need to do anything here, there's nothing special
96 * about our page-table pages. -- paulus
97 */
98static inline void flush_tlb_pgtables(struct mm_struct *mm,
99 unsigned long start, unsigned long end)
100{
101}
102
103/*
104 * This gets called at the end of handling a page fault, when
105 * the kernel has put a new PTE into the page table for the process.
106 * We use it to ensure coherency between the i-cache and d-cache
107 * for the page which has just been mapped in.
108 * On machines which use an MMU hash table, we use this to put a
109 * corresponding HPTE into the hash table ahead of time, instead of
110 * waiting for the inevitable extra hash-table miss exception.
111 */
112extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
113
114#endif /* _PPC_TLBFLUSH_H */
115#endif /*__KERNEL__ */
diff --git a/include/asm-ppc/uaccess.h b/include/asm-ppc/uaccess.h
deleted file mode 100644
index 63f56224da8c..000000000000
--- a/include/asm-ppc/uaccess.h
+++ /dev/null
@@ -1,393 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _PPC_UACCESS_H
3#define _PPC_UACCESS_H
4
5#ifndef __ASSEMBLY__
6#include <linux/sched.h>
7#include <linux/errno.h>
8#include <asm/processor.h>
9
10#define VERIFY_READ 0
11#define VERIFY_WRITE 1
12
13/*
14 * The fs value determines whether argument validity checking should be
15 * performed or not. If get_fs() == USER_DS, checking is performed, with
16 * get_fs() == KERNEL_DS, checking is bypassed.
17 *
18 * For historical reasons, these macros are grossly misnamed.
19 *
20 * The fs/ds values are now the highest legal address in the "segment".
21 * This simplifies the checking in the routines below.
22 */
23
24#define KERNEL_DS ((mm_segment_t) { ~0UL })
25#define USER_DS ((mm_segment_t) { TASK_SIZE - 1 })
26
27#define get_ds() (KERNEL_DS)
28#define get_fs() (current->thread.fs)
29#define set_fs(val) (current->thread.fs = (val))
30
31#define segment_eq(a,b) ((a).seg == (b).seg)
32
33#define __access_ok(addr,size) \
34 ((addr) <= current->thread.fs.seg \
35 && ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
36
37#define access_ok(type, addr, size) \
38 (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
39
40/*
41 * The exception table consists of pairs of addresses: the first is the
42 * address of an instruction that is allowed to fault, and the second is
43 * the address at which the program should continue. No registers are
44 * modified, so it is entirely up to the continuation code to figure out
45 * what to do.
46 *
47 * All the routines below use bits of fixup code that are out of line
48 * with the main instruction path. This means when everything is well,
49 * we don't even have to jump over them. Further, they do not intrude
50 * on our cache or tlb entries.
51 */
52
53struct exception_table_entry
54{
55 unsigned long insn, fixup;
56};
57
58/*
59 * These are the main single-value transfer routines. They automatically
60 * use the right size if we just have the right pointer type.
61 *
62 * This gets kind of ugly. We want to return _two_ values in "get_user()"
63 * and yet we don't want to do any pointers, because that is too much
64 * of a performance impact. Thus we have a few rather ugly macros here,
65 * and hide all the ugliness from the user.
66 *
67 * The "__xxx" versions of the user access functions are versions that
68 * do not verify the address space, that must have been done previously
69 * with a separate "access_ok()" call (this is used when we do multiple
70 * accesses to the same area of user memory).
71 *
72 * As we use the same address space for kernel and user data on the
73 * PowerPC, we can just do these as direct assignments. (Of course, the
74 * exception handling means that it's no longer "just"...)
75 *
76 * The "user64" versions of the user access functions are versions that
77 * allow access of 64-bit data. The "get_user" functions do not
78 * properly handle 64-bit data because the value gets down cast to a long.
79 * The "put_user" functions already handle 64-bit data properly but we add
80 * "user64" versions for completeness
81 */
82#define get_user(x,ptr) \
83 __get_user_check((x),(ptr),sizeof(*(ptr)))
84#define get_user64(x,ptr) \
85 __get_user64_check((x),(ptr),sizeof(*(ptr)))
86#define put_user(x,ptr) \
87 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88#define put_user64(x,ptr) put_user(x,ptr)
89
90#define __get_user(x,ptr) \
91 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
92#define __get_user64(x,ptr) \
93 __get_user64_nocheck((x),(ptr),sizeof(*(ptr)))
94#define __put_user(x,ptr) \
95 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
96#define __put_user64(x,ptr) __put_user(x,ptr)
97
98extern long __put_user_bad(void);
99
100#define __put_user_nocheck(x,ptr,size) \
101({ \
102 long __pu_err; \
103 __chk_user_ptr(ptr); \
104 __put_user_size((x),(ptr),(size),__pu_err); \
105 __pu_err; \
106})
107
108#define __put_user_check(x,ptr,size) \
109({ \
110 long __pu_err = -EFAULT; \
111 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
112 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
113 __put_user_size((x),__pu_addr,(size),__pu_err); \
114 __pu_err; \
115})
116
117#define __put_user_size(x,ptr,size,retval) \
118do { \
119 retval = 0; \
120 switch (size) { \
121 case 1: \
122 __put_user_asm(x, ptr, retval, "stb"); \
123 break; \
124 case 2: \
125 __put_user_asm(x, ptr, retval, "sth"); \
126 break; \
127 case 4: \
128 __put_user_asm(x, ptr, retval, "stw"); \
129 break; \
130 case 8: \
131 __put_user_asm2(x, ptr, retval); \
132 break; \
133 default: \
134 __put_user_bad(); \
135 } \
136} while (0)
137
138/*
139 * We don't tell gcc that we are accessing memory, but this is OK
140 * because we do not write to any memory gcc knows about, so there
141 * are no aliasing issues.
142 */
143#define __put_user_asm(x, addr, err, op) \
144 __asm__ __volatile__( \
145 "1: "op" %1,0(%2)\n" \
146 "2:\n" \
147 ".section .fixup,\"ax\"\n" \
148 "3: li %0,%3\n" \
149 " b 2b\n" \
150 ".previous\n" \
151 ".section __ex_table,\"a\"\n" \
152 " .align 2\n" \
153 " .long 1b,3b\n" \
154 ".previous" \
155 : "=r" (err) \
156 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
157
158#define __put_user_asm2(x, addr, err) \
159 __asm__ __volatile__( \
160 "1: stw %1,0(%2)\n" \
161 "2: stw %1+1,4(%2)\n" \
162 "3:\n" \
163 ".section .fixup,\"ax\"\n" \
164 "4: li %0,%3\n" \
165 " b 3b\n" \
166 ".previous\n" \
167 ".section __ex_table,\"a\"\n" \
168 " .align 2\n" \
169 " .long 1b,4b\n" \
170 " .long 2b,4b\n" \
171 ".previous" \
172 : "=r" (err) \
173 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
174
175#define __get_user_nocheck(x, ptr, size) \
176({ \
177 long __gu_err; \
178 unsigned long __gu_val; \
179 __chk_user_ptr(ptr); \
180 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
181 (x) = (__typeof__(*(ptr)))__gu_val; \
182 __gu_err; \
183})
184
185#define __get_user64_nocheck(x, ptr, size) \
186({ \
187 long __gu_err; \
188 long long __gu_val; \
189 __chk_user_ptr(ptr); \
190 __get_user_size64(__gu_val, (ptr), (size), __gu_err); \
191 (x) = (__typeof__(*(ptr)))__gu_val; \
192 __gu_err; \
193})
194
195#define __get_user_check(x, ptr, size) \
196({ \
197 long __gu_err = -EFAULT; \
198 unsigned long __gu_val = 0; \
199 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
200 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
201 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
202 (x) = (__typeof__(*(ptr)))__gu_val; \
203 __gu_err; \
204})
205
206#define __get_user64_check(x, ptr, size) \
207({ \
208 long __gu_err = -EFAULT; \
209 long long __gu_val = 0; \
210 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
211 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
212 __get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
213 (x) = (__typeof__(*(ptr)))__gu_val; \
214 __gu_err; \
215})
216
217extern long __get_user_bad(void);
218
219#define __get_user_size(x, ptr, size, retval) \
220do { \
221 retval = 0; \
222 switch (size) { \
223 case 1: \
224 __get_user_asm(x, ptr, retval, "lbz"); \
225 break; \
226 case 2: \
227 __get_user_asm(x, ptr, retval, "lhz"); \
228 break; \
229 case 4: \
230 __get_user_asm(x, ptr, retval, "lwz"); \
231 break; \
232 default: \
233 x = __get_user_bad(); \
234 } \
235} while (0)
236
237#define __get_user_size64(x, ptr, size, retval) \
238do { \
239 retval = 0; \
240 switch (size) { \
241 case 1: \
242 __get_user_asm(x, ptr, retval, "lbz"); \
243 break; \
244 case 2: \
245 __get_user_asm(x, ptr, retval, "lhz"); \
246 break; \
247 case 4: \
248 __get_user_asm(x, ptr, retval, "lwz"); \
249 break; \
250 case 8: \
251 __get_user_asm2(x, ptr, retval); \
252 break; \
253 default: \
254 x = __get_user_bad(); \
255 } \
256} while (0)
257
258#define __get_user_asm(x, addr, err, op) \
259 __asm__ __volatile__( \
260 "1: "op" %1,0(%2)\n" \
261 "2:\n" \
262 ".section .fixup,\"ax\"\n" \
263 "3: li %0,%3\n" \
264 " li %1,0\n" \
265 " b 2b\n" \
266 ".previous\n" \
267 ".section __ex_table,\"a\"\n" \
268 " .align 2\n" \
269 " .long 1b,3b\n" \
270 ".previous" \
271 : "=r"(err), "=r"(x) \
272 : "b"(addr), "i"(-EFAULT), "0"(err))
273
274#define __get_user_asm2(x, addr, err) \
275 __asm__ __volatile__( \
276 "1: lwz %1,0(%2)\n" \
277 "2: lwz %1+1,4(%2)\n" \
278 "3:\n" \
279 ".section .fixup,\"ax\"\n" \
280 "4: li %0,%3\n" \
281 " li %1,0\n" \
282 " li %1+1,0\n" \
283 " b 3b\n" \
284 ".previous\n" \
285 ".section __ex_table,\"a\"\n" \
286 " .align 2\n" \
287 " .long 1b,4b\n" \
288 " .long 2b,4b\n" \
289 ".previous" \
290 : "=r"(err), "=&r"(x) \
291 : "b"(addr), "i"(-EFAULT), "0"(err))
292
293/* more complex routines */
294
295extern int __copy_tofrom_user(void __user *to, const void __user *from,
296 unsigned long size);
297
298extern inline unsigned long
299copy_from_user(void *to, const void __user *from, unsigned long n)
300{
301 unsigned long over;
302
303 if (access_ok(VERIFY_READ, from, n))
304 return __copy_tofrom_user((__force void __user *)to, from, n);
305 if ((unsigned long)from < TASK_SIZE) {
306 over = (unsigned long)from + n - TASK_SIZE;
307 return __copy_tofrom_user((__force void __user *)to, from, n - over) + over;
308 }
309 return n;
310}
311
312extern inline unsigned long
313copy_to_user(void __user *to, const void *from, unsigned long n)
314{
315 unsigned long over;
316
317 if (access_ok(VERIFY_WRITE, to, n))
318 return __copy_tofrom_user(to, (__force void __user *) from, n);
319 if ((unsigned long)to < TASK_SIZE) {
320 over = (unsigned long)to + n - TASK_SIZE;
321 return __copy_tofrom_user(to, (__force void __user *) from, n - over) + over;
322 }
323 return n;
324}
325
326static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long size)
327{
328 return __copy_tofrom_user((__force void __user *)to, from, size);
329}
330
331static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long size)
332{
333 return __copy_tofrom_user(to, (__force void __user *)from, size);
334}
335
336#define __copy_to_user_inatomic __copy_to_user
337#define __copy_from_user_inatomic __copy_from_user
338
339extern unsigned long __clear_user(void __user *addr, unsigned long size);
340
341extern inline unsigned long
342clear_user(void __user *addr, unsigned long size)
343{
344 if (access_ok(VERIFY_WRITE, addr, size))
345 return __clear_user(addr, size);
346 if ((unsigned long)addr < TASK_SIZE) {
347 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
348 return __clear_user(addr, size - over) + over;
349 }
350 return size;
351}
352
353extern int __strncpy_from_user(char *dst, const char __user *src, long count);
354
355extern inline long
356strncpy_from_user(char *dst, const char __user *src, long count)
357{
358 if (access_ok(VERIFY_READ, src, 1))
359 return __strncpy_from_user(dst, src, count);
360 return -EFAULT;
361}
362
363/*
364 * Return the size of a string (including the ending 0)
365 *
366 * Return 0 for error
367 */
368
369extern int __strnlen_user(const char __user *str, long len, unsigned long top);
370
371/*
372 * Returns the length of the string at str (including the null byte),
373 * or 0 if we hit a page we can't access,
374 * or something > len if we didn't find a null byte.
375 *
376 * The `top' parameter to __strnlen_user is to make sure that
377 * we can never overflow from the user area into kernel space.
378 */
379extern __inline__ int strnlen_user(const char __user *str, long len)
380{
381 unsigned long top = current->thread.fs.seg;
382
383 if ((unsigned long)str > top)
384 return 0;
385 return __strnlen_user(str, len, top);
386}
387
388#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
389
390#endif /* __ASSEMBLY__ */
391
392#endif /* _PPC_UACCESS_H */
393#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/ucontext.h b/include/asm-ppc/ucontext.h
deleted file mode 100644
index 664bc984d51f..000000000000
--- a/include/asm-ppc/ucontext.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _ASMPPC_UCONTEXT_H
2#define _ASMPPC_UCONTEXT_H
3
4#include <asm/elf.h>
5#include <asm/signal.h>
6
7struct mcontext {
8 elf_gregset_t mc_gregs;
9 elf_fpregset_t mc_fregs;
10 unsigned long mc_pad[2];
11 elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
12};
13
14struct ucontext {
15 unsigned long uc_flags;
16 struct ucontext __user *uc_link;
17 stack_t uc_stack;
18 int uc_pad[7];
19 struct mcontext __user *uc_regs;/* points to uc_mcontext field */
20 sigset_t uc_sigmask;
21 /* glibc has 1024-bit signal masks, ours are 64-bit */
22 int uc_maskext[30];
23 int uc_pad2[3];
24 struct mcontext uc_mcontext;
25};
26
27#endif /* !_ASMPPC_UCONTEXT_H */
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h
deleted file mode 100644
index dbfa42ef4a99..000000000000
--- a/include/asm-ppc64/bitops.h
+++ /dev/null
@@ -1,360 +0,0 @@
1/*
2 * PowerPC64 atomic bit operations.
3 * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner,
4 * Anton Blanchard
5 *
6 * Originally taken from the 32b PPC code. Modified to use 64b values for
7 * the various counters & memory references.
8 *
9 * Bitops are odd when viewed on big-endian systems. They were designed
10 * on little endian so the size of the bitset doesn't matter (low order bytes
11 * come first) as long as the bit in question is valid.
12 *
13 * Bits are "tested" often using the C expression (val & (1<<nr)) so we do
14 * our best to stay compatible with that. The assumption is that val will
15 * be unsigned long for such tests. As such, we assume the bits are stored
16 * as an array of unsigned long (the usual case is a single unsigned long,
17 * of course). Here's an example bitset with bit numbering:
18 *
19 * |63..........0|127........64|195.......128|255.......196|
20 *
21 * This leads to a problem. If an int, short or char is passed as a bitset
22 * it will be a bad memory reference since we want to store in chunks
23 * of unsigned long (64 bits here) size.
24 *
25 * There are a few little-endian macros used mostly for filesystem bitmaps,
26 * these work on similar bit arrays layouts, but byte-oriented:
27 *
28 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
29 *
30 * The main difference is that bit 3-5 in the bit number field needs to be
31 * reversed compared to the big-endian bit fields. This can be achieved
32 * by XOR with 0b111000 (0x38).
33 *
34 * This program is free software; you can redistribute it and/or
35 * modify it under the terms of the GNU General Public License
36 * as published by the Free Software Foundation; either version
37 * 2 of the License, or (at your option) any later version.
38 */
39
40#ifndef _PPC64_BITOPS_H
41#define _PPC64_BITOPS_H
42
43#ifdef __KERNEL__
44
45#include <asm/synch.h>
46
47/*
48 * clear_bit doesn't imply a memory barrier
49 */
50#define smp_mb__before_clear_bit() smp_mb()
51#define smp_mb__after_clear_bit() smp_mb()
52
53static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr)
54{
55 return (1UL & (addr[nr >> 6] >> (nr & 63)));
56}
57
58static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr)
59{
60 unsigned long old;
61 unsigned long mask = 1UL << (nr & 0x3f);
62 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
63
64 __asm__ __volatile__(
65"1: ldarx %0,0,%3 # set_bit\n\
66 or %0,%0,%2\n\
67 stdcx. %0,0,%3\n\
68 bne- 1b"
69 : "=&r" (old), "=m" (*p)
70 : "r" (mask), "r" (p), "m" (*p)
71 : "cc");
72}
73
74static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr)
75{
76 unsigned long old;
77 unsigned long mask = 1UL << (nr & 0x3f);
78 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
79
80 __asm__ __volatile__(
81"1: ldarx %0,0,%3 # clear_bit\n\
82 andc %0,%0,%2\n\
83 stdcx. %0,0,%3\n\
84 bne- 1b"
85 : "=&r" (old), "=m" (*p)
86 : "r" (mask), "r" (p), "m" (*p)
87 : "cc");
88}
89
90static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr)
91{
92 unsigned long old;
93 unsigned long mask = 1UL << (nr & 0x3f);
94 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
95
96 __asm__ __volatile__(
97"1: ldarx %0,0,%3 # change_bit\n\
98 xor %0,%0,%2\n\
99 stdcx. %0,0,%3\n\
100 bne- 1b"
101 : "=&r" (old), "=m" (*p)
102 : "r" (mask), "r" (p), "m" (*p)
103 : "cc");
104}
105
106static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
107{
108 unsigned long old, t;
109 unsigned long mask = 1UL << (nr & 0x3f);
110 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
111
112 __asm__ __volatile__(
113 EIEIO_ON_SMP
114"1: ldarx %0,0,%3 # test_and_set_bit\n\
115 or %1,%0,%2 \n\
116 stdcx. %1,0,%3 \n\
117 bne- 1b"
118 ISYNC_ON_SMP
119 : "=&r" (old), "=&r" (t)
120 : "r" (mask), "r" (p)
121 : "cc", "memory");
122
123 return (old & mask) != 0;
124}
125
126static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
127{
128 unsigned long old, t;
129 unsigned long mask = 1UL << (nr & 0x3f);
130 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
131
132 __asm__ __volatile__(
133 EIEIO_ON_SMP
134"1: ldarx %0,0,%3 # test_and_clear_bit\n\
135 andc %1,%0,%2\n\
136 stdcx. %1,0,%3\n\
137 bne- 1b"
138 ISYNC_ON_SMP
139 : "=&r" (old), "=&r" (t)
140 : "r" (mask), "r" (p)
141 : "cc", "memory");
142
143 return (old & mask) != 0;
144}
145
146static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
147{
148 unsigned long old, t;
149 unsigned long mask = 1UL << (nr & 0x3f);
150 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
151
152 __asm__ __volatile__(
153 EIEIO_ON_SMP
154"1: ldarx %0,0,%3 # test_and_change_bit\n\
155 xor %1,%0,%2\n\
156 stdcx. %1,0,%3\n\
157 bne- 1b"
158 ISYNC_ON_SMP
159 : "=&r" (old), "=&r" (t)
160 : "r" (mask), "r" (p)
161 : "cc", "memory");
162
163 return (old & mask) != 0;
164}
165
166static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
167{
168 unsigned long old;
169
170 __asm__ __volatile__(
171"1: ldarx %0,0,%3 # set_bit\n\
172 or %0,%0,%2\n\
173 stdcx. %0,0,%3\n\
174 bne- 1b"
175 : "=&r" (old), "=m" (*addr)
176 : "r" (mask), "r" (addr), "m" (*addr)
177 : "cc");
178}
179
180/*
181 * non-atomic versions
182 */
183static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr)
184{
185 unsigned long mask = 1UL << (nr & 0x3f);
186 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
187
188 *p |= mask;
189}
190
191static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr)
192{
193 unsigned long mask = 1UL << (nr & 0x3f);
194 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
195
196 *p &= ~mask;
197}
198
199static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr)
200{
201 unsigned long mask = 1UL << (nr & 0x3f);
202 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
203
204 *p ^= mask;
205}
206
207static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
208{
209 unsigned long mask = 1UL << (nr & 0x3f);
210 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
211 unsigned long old = *p;
212
213 *p = old | mask;
214 return (old & mask) != 0;
215}
216
217static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
218{
219 unsigned long mask = 1UL << (nr & 0x3f);
220 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
221 unsigned long old = *p;
222
223 *p = old & ~mask;
224 return (old & mask) != 0;
225}
226
227static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
228{
229 unsigned long mask = 1UL << (nr & 0x3f);
230 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
231 unsigned long old = *p;
232
233 *p = old ^ mask;
234 return (old & mask) != 0;
235}
236
237/*
238 * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the
239 * most significant (left-most) 1-bit in a double word.
240 */
241static __inline__ int __ilog2(unsigned long x)
242{
243 int lz;
244
245 asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
246 return 63 - lz;
247}
248
249/*
250 * Determines the bit position of the least significant (rightmost) 0 bit
251 * in the specified double word. The returned bit position will be zero-based,
252 * starting from the right side (63 - 0).
253 */
254static __inline__ unsigned long ffz(unsigned long x)
255{
256 /* no zero exists anywhere in the 8 byte area. */
257 if ((x = ~x) == 0)
258 return 64;
259
260 /*
261 * Calculate the bit position of the least signficant '1' bit in x
262 * (since x has been changed this will actually be the least signficant
263 * '0' bit in * the original x). Note: (x & -x) gives us a mask that
264 * is the least significant * (RIGHT-most) 1-bit of the value in x.
265 */
266 return __ilog2(x & -x);
267}
268
269static __inline__ int __ffs(unsigned long x)
270{
271 return __ilog2(x & -x);
272}
273
274/*
275 * ffs: find first bit set. This is defined the same way as
276 * the libc and compiler builtin ffs routines, therefore
277 * differs in spirit from the above ffz (man ffs).
278 */
279static __inline__ int ffs(int x)
280{
281 unsigned long i = (unsigned long)x;
282 return __ilog2(i & -i) + 1;
283}
284
285/*
286 * fls: find last (most-significant) bit set.
287 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
288 */
289#define fls(x) generic_fls(x)
290
291/*
292 * hweightN: returns the hamming weight (i.e. the number
293 * of bits set) of a N-bit word
294 */
295#define hweight64(x) generic_hweight64(x)
296#define hweight32(x) generic_hweight32(x)
297#define hweight16(x) generic_hweight16(x)
298#define hweight8(x) generic_hweight8(x)
299
300extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
301#define find_first_zero_bit(addr, size) \
302 find_next_zero_bit((addr), (size), 0)
303
304extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
305#define find_first_bit(addr, size) \
306 find_next_bit((addr), (size), 0)
307
308extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
309#define find_first_zero_le_bit(addr, size) \
310 find_next_zero_le_bit((addr), (size), 0)
311
312static __inline__ int test_le_bit(unsigned long nr, __const__ unsigned long * addr)
313{
314 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
315 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
316}
317
318#define test_and_clear_le_bit(nr, addr) \
319 test_and_clear_bit((nr) ^ 0x38, (addr))
320#define test_and_set_le_bit(nr, addr) \
321 test_and_set_bit((nr) ^ 0x38, (addr))
322
323/*
324 * non-atomic versions
325 */
326
327#define __set_le_bit(nr, addr) \
328 __set_bit((nr) ^ 0x38, (addr))
329#define __clear_le_bit(nr, addr) \
330 __clear_bit((nr) ^ 0x38, (addr))
331#define __test_and_clear_le_bit(nr, addr) \
332 __test_and_clear_bit((nr) ^ 0x38, (addr))
333#define __test_and_set_le_bit(nr, addr) \
334 __test_and_set_bit((nr) ^ 0x38, (addr))
335
336#define ext2_set_bit(nr,addr) \
337 __test_and_set_le_bit((nr), (unsigned long*)addr)
338#define ext2_clear_bit(nr, addr) \
339 __test_and_clear_le_bit((nr), (unsigned long*)addr)
340
341#define ext2_set_bit_atomic(lock, nr, addr) \
342 test_and_set_le_bit((nr), (unsigned long*)addr)
343#define ext2_clear_bit_atomic(lock, nr, addr) \
344 test_and_clear_le_bit((nr), (unsigned long*)addr)
345
346
347#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
348#define ext2_find_first_zero_bit(addr, size) \
349 find_first_zero_le_bit((unsigned long*)addr, size)
350#define ext2_find_next_zero_bit(addr, size, off) \
351 find_next_zero_le_bit((unsigned long*)addr, size, off)
352
353#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
354#define minix_set_bit(nr,addr) set_bit(nr,addr)
355#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
356#define minix_test_bit(nr,addr) test_bit(nr,addr)
357#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
358
359#endif /* __KERNEL__ */
360#endif /* _PPC64_BITOPS_H */
diff --git a/include/asm-ppc64/dart.h b/include/asm-ppc64/dart.h
deleted file mode 100644
index cdf8a2dec05f..000000000000
--- a/include/asm-ppc64/dart.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_DART_H
20#define _ASM_DART_H
21
22
23/* physical base of DART registers */
24#define DART_BASE 0xf8033000UL
25
26/* Offset from base to control register */
27#define DARTCNTL 0
28/* Offset from base to exception register */
29#define DARTEXCP 0x10
30/* Offset from base to TLB tag registers */
31#define DARTTAG 0x1000
32
33
34/* Control Register fields */
35
36/* base address of table (pfn) */
37#define DARTCNTL_BASE_MASK 0xfffff
38#define DARTCNTL_BASE_SHIFT 12
39
40#define DARTCNTL_FLUSHTLB 0x400
41#define DARTCNTL_ENABLE 0x200
42
43/* size of table in pages */
44#define DARTCNTL_SIZE_MASK 0x1ff
45#define DARTCNTL_SIZE_SHIFT 0
46
47
48/* DART table fields */
49
50#define DARTMAP_VALID 0x80000000
51#define DARTMAP_RPNMASK 0x00ffffff
52
53
54#define DART_PAGE_SHIFT 12
55#define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT)
56#define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT)
57
58
59#endif
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
index bd7c9532d77b..77fc07c3c6bd 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-ppc64/io.h
@@ -13,7 +13,7 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/byteorder.h> 14#include <asm/byteorder.h>
15#ifdef CONFIG_PPC_ISERIES 15#ifdef CONFIG_PPC_ISERIES
16#include <asm/iSeries/iSeries_io.h> 16#include <asm/iseries/iseries_io.h>
17#endif 17#endif
18#include <asm/synch.h> 18#include <asm/synch.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
diff --git a/include/asm-ppc64/ipcbuf.h b/include/asm-ppc64/ipcbuf.h
deleted file mode 100644
index fa393c8342af..000000000000
--- a/include/asm-ppc64/ipcbuf.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef __PPC64_IPCBUF_H__
2#define __PPC64_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for the PPC is identical to kern_ipc_perm
6 * as we have always had 32-bit UIDs and GIDs in the kernel.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid_t uid;
18 __kernel_gid_t gid;
19 __kernel_uid_t cuid;
20 __kernel_gid_t cgid;
21 __kernel_mode_t mode;
22 unsigned int seq;
23 unsigned int __pad1;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* __PPC64_IPCBUF_H__ */
diff --git a/include/asm-ppc64/kexec.h b/include/asm-ppc64/kexec.h
deleted file mode 100644
index 511908afaeeb..000000000000
--- a/include/asm-ppc64/kexec.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _PPC64_KEXEC_H
2#define _PPC64_KEXEC_H
3
4/*
5 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
6 * I.e. Maximum page that is mapped directly into kernel memory,
7 * and kmap is not required.
8 */
9
10/* Maximum physical address we can use pages from */
11/* XXX: since we copy virt we can use any page we allocate */
12#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
13
14/* Maximum address we can reach in physical address mode */
15/* XXX: I want to allow initrd in highmem. otherwise set to rmo on lpar */
16#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
17
18/* Maximum address we can use for the control code buffer */
19/* XXX: unused today, ppc32 uses TASK_SIZE, probably left over from use_mm */
20#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
21
22/* XXX: today we don't use this at all, althogh we have a static stack */
23#define KEXEC_CONTROL_CODE_SIZE 4096
24
25/* The native architecture */
26#define KEXEC_ARCH KEXEC_ARCH_PPC64
27
28#define MAX_NOTE_BYTES 1024
29
30#ifndef __ASSEMBLY__
31
32typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
33
34extern note_buf_t crash_notes[];
35
36extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
37 master to copy new code to 0 */
38
39#endif /* __ASSEMBLY__ */
40#endif /* _PPC_KEXEC_H */
41
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
index 77a743402db4..820dd729b895 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-ppc64/mmu_context.h
@@ -16,21 +16,6 @@
16 * 2 of the License, or (at your option) any later version. 16 * 2 of the License, or (at your option) any later version.
17 */ 17 */
18 18
19/*
20 * Every architecture must define this function. It's the fastest
21 * way of searching a 140-bit bitmap where the first 100 bits are
22 * unlikely to be set. It's guaranteed that at least one of the 140
23 * bits is cleared.
24 */
25static inline int sched_find_first_bit(unsigned long *b)
26{
27 if (unlikely(b[0]))
28 return __ffs(b[0]);
29 if (unlikely(b[1]))
30 return __ffs(b[1]) + 64;
31 return __ffs(b[2]) + 128;
32}
33
34static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 19static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
35{ 20{
36} 21}
diff --git a/include/asm-ppc64/naca.h b/include/asm-ppc64/naca.h
deleted file mode 100644
index d2afe6447597..000000000000
--- a/include/asm-ppc64/naca.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _NACA_H
2#define _NACA_H
3
4/*
5 * c 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/types.h>
14
15struct naca_struct {
16 /* Kernel only data - undefined for user space */
17 void *xItVpdAreas; /* VPD Data 0x00 */
18 void *xRamDisk; /* iSeries ramdisk 0x08 */
19 u64 xRamDiskSize; /* In pages 0x10 */
20};
21
22extern struct naca_struct naca;
23
24#endif /* _NACA_H */
diff --git a/include/asm-ppc64/numnodes.h b/include/asm-ppc64/numnodes.h
deleted file mode 100644
index 75ae0b906708..000000000000
--- a/include/asm-ppc64/numnodes.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _ASM_MAX_NUMNODES_H
2#define _ASM_MAX_NUMNODES_H
3
4/* Max 16 Nodes */
5#define NODES_SHIFT 4
6
7#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/include/asm-ppc64/nvram.h b/include/asm-ppc64/nvram.h
index dfaa21566c9a..def47d720d3d 100644
--- a/include/asm-ppc64/nvram.h
+++ b/include/asm-ppc64/nvram.h
@@ -70,7 +70,7 @@ extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
70 70
71extern int pSeries_nvram_init(void); 71extern int pSeries_nvram_init(void);
72extern int pmac_nvram_init(void); 72extern int pmac_nvram_init(void);
73extern int bpa_nvram_init(void); 73extern int mmio_nvram_init(void);
74 74
75/* PowerMac specific nvram stuffs */ 75/* PowerMac specific nvram stuffs */
76 76
diff --git a/include/asm-ppc64/paca.h b/include/asm-ppc64/paca.h
index 2f0f36f73d38..f68fe91debaf 100644
--- a/include/asm-ppc64/paca.h
+++ b/include/asm-ppc64/paca.h
@@ -19,7 +19,7 @@
19#include <linux/config.h> 19#include <linux/config.h>
20#include <asm/types.h> 20#include <asm/types.h>
21#include <asm/lppaca.h> 21#include <asm/lppaca.h>
22#include <asm/iSeries/ItLpRegSave.h> 22#include <asm/iseries/it_lp_reg_save.h>
23#include <asm/mmu.h> 23#include <asm/mmu.h>
24 24
25register struct paca_struct *local_paca asm("r13"); 25register struct paca_struct *local_paca asm("r13");
diff --git a/include/asm-ppc64/plpar_wrappers.h b/include/asm-ppc64/plpar_wrappers.h
deleted file mode 100644
index 72dd2449ee76..000000000000
--- a/include/asm-ppc64/plpar_wrappers.h
+++ /dev/null
@@ -1,120 +0,0 @@
1#ifndef _PPC64_PLPAR_WRAPPERS_H
2#define _PPC64_PLPAR_WRAPPERS_H
3
4#include <asm/hvcall.h>
5
6static inline long poll_pending(void)
7{
8 unsigned long dummy;
9 return plpar_hcall(H_POLL_PENDING, 0, 0, 0, 0,
10 &dummy, &dummy, &dummy);
11}
12
13static inline long prod_processor(void)
14{
15 plpar_hcall_norets(H_PROD);
16 return(0);
17}
18
19static inline long cede_processor(void)
20{
21 plpar_hcall_norets(H_CEDE);
22 return(0);
23}
24
25static inline long register_vpa(unsigned long flags, unsigned long proc,
26 unsigned long vpa)
27{
28 return plpar_hcall_norets(H_REGISTER_VPA, flags, proc, vpa);
29}
30
31void vpa_init(int cpu);
32
33static inline long plpar_pte_remove(unsigned long flags,
34 unsigned long ptex,
35 unsigned long avpn,
36 unsigned long *old_pteh_ret,
37 unsigned long *old_ptel_ret)
38{
39 unsigned long dummy;
40 return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0,
41 old_pteh_ret, old_ptel_ret, &dummy);
42}
43
44static inline long plpar_pte_read(unsigned long flags,
45 unsigned long ptex,
46 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
47{
48 unsigned long dummy;
49 return plpar_hcall(H_READ, flags, ptex, 0, 0,
50 old_pteh_ret, old_ptel_ret, &dummy);
51}
52
53static inline long plpar_pte_protect(unsigned long flags,
54 unsigned long ptex,
55 unsigned long avpn)
56{
57 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
58}
59
60static inline long plpar_tce_get(unsigned long liobn,
61 unsigned long ioba,
62 unsigned long *tce_ret)
63{
64 unsigned long dummy;
65 return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0,
66 tce_ret, &dummy, &dummy);
67}
68
69static inline long plpar_tce_put(unsigned long liobn,
70 unsigned long ioba,
71 unsigned long tceval)
72{
73 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
74}
75
76static inline long plpar_tce_put_indirect(unsigned long liobn,
77 unsigned long ioba,
78 unsigned long page,
79 unsigned long count)
80{
81 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
82}
83
84static inline long plpar_tce_stuff(unsigned long liobn,
85 unsigned long ioba,
86 unsigned long tceval,
87 unsigned long count)
88{
89 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
90}
91
92static inline long plpar_get_term_char(unsigned long termno,
93 unsigned long *len_ret,
94 char *buf_ret)
95{
96 unsigned long *lbuf = (unsigned long *)buf_ret; /* ToDo: alignment? */
97 return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0,
98 len_ret, lbuf+0, lbuf+1);
99}
100
101static inline long plpar_put_term_char(unsigned long termno,
102 unsigned long len,
103 const char *buffer)
104{
105 unsigned long *lbuf = (unsigned long *)buffer; /* ToDo: alignment? */
106 return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
107 lbuf[1]);
108}
109
110static inline long plpar_set_xdabr(unsigned long address, unsigned long flags)
111{
112 return plpar_hcall_norets(H_SET_XDABR, address, flags);
113}
114
115static inline long plpar_set_dabr(unsigned long val)
116{
117 return plpar_hcall_norets(H_SET_DABR, val);
118}
119
120#endif /* _PPC64_PLPAR_WRAPPERS_H */
diff --git a/include/asm-ppc64/ppc32.h b/include/asm-ppc64/ppc32.h
deleted file mode 100644
index 3945a55d112a..000000000000
--- a/include/asm-ppc64/ppc32.h
+++ /dev/null
@@ -1,122 +0,0 @@
1#ifndef _PPC64_PPC32_H
2#define _PPC64_PPC32_H
3
4#include <linux/compat.h>
5#include <asm/siginfo.h>
6#include <asm/signal.h>
7
8/*
9 * Data types and macros for providing 32b PowerPC support.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17/* These are here to support 32-bit syscalls on a 64-bit kernel. */
18
19typedef struct compat_siginfo {
20 int si_signo;
21 int si_errno;
22 int si_code;
23
24 union {
25 int _pad[SI_PAD_SIZE32];
26
27 /* kill() */
28 struct {
29 compat_pid_t _pid; /* sender's pid */
30 compat_uid_t _uid; /* sender's uid */
31 } _kill;
32
33 /* POSIX.1b timers */
34 struct {
35 compat_timer_t _tid; /* timer id */
36 int _overrun; /* overrun count */
37 compat_sigval_t _sigval; /* same as below */
38 int _sys_private; /* not to be passed to user */
39 } _timer;
40
41 /* POSIX.1b signals */
42 struct {
43 compat_pid_t _pid; /* sender's pid */
44 compat_uid_t _uid; /* sender's uid */
45 compat_sigval_t _sigval;
46 } _rt;
47
48 /* SIGCHLD */
49 struct {
50 compat_pid_t _pid; /* which child */
51 compat_uid_t _uid; /* sender's uid */
52 int _status; /* exit code */
53 compat_clock_t _utime;
54 compat_clock_t _stime;
55 } _sigchld;
56
57 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
58 struct {
59 unsigned int _addr; /* faulting insn/memory ref. */
60 } _sigfault;
61
62 /* SIGPOLL */
63 struct {
64 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
65 int _fd;
66 } _sigpoll;
67 } _sifields;
68} compat_siginfo_t;
69
70#define __old_sigaction32 old_sigaction32
71
72struct __old_sigaction32 {
73 compat_uptr_t sa_handler;
74 compat_old_sigset_t sa_mask;
75 unsigned int sa_flags;
76 compat_uptr_t sa_restorer; /* not used by Linux/SPARC yet */
77};
78
79
80
81struct sigaction32 {
82 compat_uptr_t sa_handler; /* Really a pointer, but need to deal with 32 bits */
83 unsigned int sa_flags;
84 compat_uptr_t sa_restorer; /* Another 32 bit pointer */
85 compat_sigset_t sa_mask; /* A 32 bit mask */
86};
87
88typedef struct sigaltstack_32 {
89 unsigned int ss_sp;
90 int ss_flags;
91 compat_size_t ss_size;
92} stack_32_t;
93
94struct sigcontext32 {
95 unsigned int _unused[4];
96 int signal;
97 compat_uptr_t handler;
98 unsigned int oldmask;
99 compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */
100};
101
102struct mcontext32 {
103 elf_gregset_t32 mc_gregs;
104 elf_fpregset_t mc_fregs;
105 unsigned int mc_pad[2];
106 elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
107};
108
109struct ucontext32 {
110 unsigned int uc_flags;
111 unsigned int uc_link;
112 stack_32_t uc_stack;
113 int uc_pad[7];
114 compat_uptr_t uc_regs; /* points to uc_mcontext field */
115 compat_sigset_t uc_sigmask; /* mask last for extensibility */
116 /* glibc has 1024-bit signal masks, ours are 64-bit */
117 int uc_maskext[30];
118 int uc_pad2[3];
119 struct mcontext32 uc_mcontext;
120};
121
122#endif /* _PPC64_PPC32_H */
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h
index 14cb895bb607..7d84fb5e39f1 100644
--- a/include/asm-ppc64/spinlock.h
+++ b/include/asm-ppc64/spinlock.h
@@ -21,7 +21,7 @@
21#include <linux/config.h> 21#include <linux/config.h>
22#include <asm/paca.h> 22#include <asm/paca.h>
23#include <asm/hvcall.h> 23#include <asm/hvcall.h>
24#include <asm/iSeries/HvCall.h> 24#include <asm/iseries/hv_call.h>
25 25
26#define __raw_spin_is_locked(x) ((x)->slock != 0) 26#define __raw_spin_is_locked(x) ((x)->slock != 0)
27 27
diff --git a/include/asm-ppc64/stat.h b/include/asm-ppc64/stat.h
deleted file mode 100644
index 973a5f97951d..000000000000
--- a/include/asm-ppc64/stat.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _PPC64_STAT_H
2#define _PPC64_STAT_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/types.h>
12
13struct stat {
14 unsigned long st_dev;
15 ino_t st_ino;
16 nlink_t st_nlink;
17 mode_t st_mode;
18 uid_t st_uid;
19 gid_t st_gid;
20 unsigned long st_rdev;
21 off_t st_size;
22 unsigned long st_blksize;
23 unsigned long st_blocks;
24 unsigned long st_atime;
25 unsigned long st_atime_nsec;
26 unsigned long st_mtime;
27 unsigned long st_mtime_nsec;
28 unsigned long st_ctime;
29 unsigned long st_ctime_nsec;
30 unsigned long __unused4;
31 unsigned long __unused5;
32 unsigned long __unused6;
33};
34
35#define STAT_HAVE_NSEC 1
36
37/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
38struct stat64 {
39 unsigned long st_dev; /* Device. */
40 unsigned long st_ino; /* File serial number. */
41 unsigned int st_mode; /* File mode. */
42 unsigned int st_nlink; /* Link count. */
43 unsigned int st_uid; /* User ID of the file's owner. */
44 unsigned int st_gid; /* Group ID of the file's group. */
45 unsigned long st_rdev; /* Device number, if device. */
46 unsigned short __pad2;
47 long st_size; /* Size of file, in bytes. */
48 int st_blksize; /* Optimal block size for I/O. */
49
50 long st_blocks; /* Number 512-byte blocks allocated. */
51 int st_atime; /* Time of last access. */
52 int st_atime_nsec;
53 int st_mtime; /* Time of last modification. */
54 int st_mtime_nsec;
55 int st_ctime; /* Time of last status change. */
56 int st_ctime_nsec;
57 unsigned int __unused4;
58 unsigned int __unused5;
59};
60#endif
diff --git a/include/asm-ppc64/tlb.h b/include/asm-ppc64/tlb.h
deleted file mode 100644
index 97cb696ce68d..000000000000
--- a/include/asm-ppc64/tlb.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * TLB shootdown specifics for PPC64
3 *
4 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
5 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#ifndef _PPC64_TLB_H
13#define _PPC64_TLB_H
14
15#include <asm/tlbflush.h>
16
17struct mmu_gather;
18
19extern void pte_free_finish(void);
20
21static inline void tlb_flush(struct mmu_gather *tlb)
22{
23 flush_tlb_pending();
24 pte_free_finish();
25}
26
27/* Avoid pulling in another include just for this */
28#define check_pgt_cache() do { } while (0)
29
30/* Get the generic bits... */
31#include <asm-generic/tlb.h>
32
33/* Nothing needed here in fact... */
34#define tlb_start_vma(tlb, vma) do { } while (0)
35#define tlb_end_vma(tlb, vma) do { } while (0)
36
37#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
38
39#endif /* _PPC64_TLB_H */
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
deleted file mode 100644
index 626f505c6ee3..000000000000
--- a/include/asm-ppc64/tlbflush.h
+++ /dev/null
@@ -1,52 +0,0 @@
1#ifndef _PPC64_TLBFLUSH_H
2#define _PPC64_TLBFLUSH_H
3
4/*
5 * TLB flushing:
6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
10 * - flush_tlb_range(vma, start, end) flushes a range of pages
11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
12 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
13 */
14
15#include <linux/percpu.h>
16#include <asm/page.h>
17
18#define PPC64_TLB_BATCH_NR 192
19
20struct mm_struct;
21struct ppc64_tlb_batch {
22 unsigned long index;
23 struct mm_struct *mm;
24 pte_t pte[PPC64_TLB_BATCH_NR];
25 unsigned long vaddr[PPC64_TLB_BATCH_NR];
26 unsigned int large;
27};
28DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
29
30extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
31
32static inline void flush_tlb_pending(void)
33{
34 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
35
36 if (batch->index)
37 __flush_tlb_pending(batch);
38 put_cpu_var(ppc64_tlb_batch);
39}
40
41#define flush_tlb_mm(mm) flush_tlb_pending()
42#define flush_tlb_page(vma, addr) flush_tlb_pending()
43#define flush_tlb_page_nohash(vma, addr) do { } while (0)
44#define flush_tlb_range(vma, start, end) \
45 do { (void)(start); flush_tlb_pending(); } while (0)
46#define flush_tlb_kernel_range(start, end) flush_tlb_pending()
47#define flush_tlb_pgtables(mm, start, end) do { } while (0)
48
49extern void flush_hash_page(unsigned long va, pte_t pte, int local);
50void flush_hash_range(unsigned long number, int local);
51
52#endif /* _PPC64_TLBFLUSH_H */
diff --git a/include/asm-ppc64/uaccess.h b/include/asm-ppc64/uaccess.h
deleted file mode 100644
index 132c1276547b..000000000000
--- a/include/asm-ppc64/uaccess.h
+++ /dev/null
@@ -1,341 +0,0 @@
1#ifndef _PPC64_UACCESS_H
2#define _PPC64_UACCESS_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#ifndef __ASSEMBLY__
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <asm/processor.h>
15
16#define VERIFY_READ 0
17#define VERIFY_WRITE 1
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
26
27#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
28
29#define KERNEL_DS MAKE_MM_SEG(0UL)
30#define USER_DS MAKE_MM_SEG(0xf000000000000000UL)
31
32#define get_ds() (KERNEL_DS)
33#define get_fs() (current->thread.fs)
34#define set_fs(val) (current->thread.fs = (val))
35
36#define segment_eq(a,b) ((a).seg == (b).seg)
37
38/*
39 * Use the alpha trick for checking ranges:
40 *
41 * Is a address valid? This does a straightforward calculation rather
42 * than tests.
43 *
44 * Address valid if:
45 * - "addr" doesn't have any high-bits set
46 * - AND "size" doesn't have any high-bits set
47 * - OR we are in kernel mode.
48 *
49 * We dont have to check for high bits in (addr+size) because the first
50 * two checks force the maximum result to be below the start of the
51 * kernel region.
52 */
53#define __access_ok(addr,size,segment) \
54 (((segment).seg & (addr | size )) == 0)
55
56#define access_ok(type,addr,size) \
57 __access_ok(((__force unsigned long)(addr)),(size),get_fs())
58
59/*
60 * The exception table consists of pairs of addresses: the first is the
61 * address of an instruction that is allowed to fault, and the second is
62 * the address at which the program should continue. No registers are
63 * modified, so it is entirely up to the continuation code to figure out
64 * what to do.
65 *
66 * All the routines below use bits of fixup code that are out of line
67 * with the main instruction path. This means when everything is well,
68 * we don't even have to jump over them. Further, they do not intrude
69 * on our cache or tlb entries.
70 */
71
72struct exception_table_entry
73{
74 unsigned long insn, fixup;
75};
76
77/* Returns 0 if exception not found and fixup otherwise. */
78extern unsigned long search_exception_table(unsigned long);
79
80/*
81 * These are the main single-value transfer routines. They automatically
82 * use the right size if we just have the right pointer type.
83 *
84 * This gets kind of ugly. We want to return _two_ values in "get_user()"
85 * and yet we don't want to do any pointers, because that is too much
86 * of a performance impact. Thus we have a few rather ugly macros here,
87 * and hide all the ugliness from the user.
88 *
89 * The "__xxx" versions of the user access functions are versions that
90 * do not verify the address space, that must have been done previously
91 * with a separate "access_ok()" call (this is used when we do multiple
92 * accesses to the same area of user memory).
93 *
94 * As we use the same address space for kernel and user data on the
95 * PowerPC, we can just do these as direct assignments. (Of course, the
96 * exception handling means that it's no longer "just"...)
97 */
98#define get_user(x,ptr) \
99 __get_user_check((x),(ptr),sizeof(*(ptr)))
100#define put_user(x,ptr) \
101 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
102
103#define __get_user(x,ptr) \
104 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
105#define __put_user(x,ptr) \
106 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
107
108#define __get_user_unaligned __get_user
109#define __put_user_unaligned __put_user
110
111extern long __put_user_bad(void);
112
113#define __put_user_nocheck(x,ptr,size) \
114({ \
115 long __pu_err; \
116 might_sleep(); \
117 __chk_user_ptr(ptr); \
118 __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \
119 __pu_err; \
120})
121
122#define __put_user_check(x,ptr,size) \
123({ \
124 long __pu_err = -EFAULT; \
125 void __user *__pu_addr = (ptr); \
126 might_sleep(); \
127 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
128 __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
129 __pu_err; \
130})
131
132#define __put_user_size(x,ptr,size,retval,errret) \
133do { \
134 retval = 0; \
135 switch (size) { \
136 case 1: __put_user_asm(x,ptr,retval,"stb",errret); break; \
137 case 2: __put_user_asm(x,ptr,retval,"sth",errret); break; \
138 case 4: __put_user_asm(x,ptr,retval,"stw",errret); break; \
139 case 8: __put_user_asm(x,ptr,retval,"std",errret); break; \
140 default: __put_user_bad(); \
141 } \
142} while (0)
143
144/*
145 * We don't tell gcc that we are accessing memory, but this is OK
146 * because we do not write to any memory gcc knows about, so there
147 * are no aliasing issues.
148 */
149#define __put_user_asm(x, addr, err, op, errret) \
150 __asm__ __volatile__( \
151 "1: "op" %1,0(%2) # put_user\n" \
152 "2:\n" \
153 ".section .fixup,\"ax\"\n" \
154 "3: li %0,%3\n" \
155 " b 2b\n" \
156 ".previous\n" \
157 ".section __ex_table,\"a\"\n" \
158 " .align 3\n" \
159 " .llong 1b,3b\n" \
160 ".previous" \
161 : "=r"(err) \
162 : "r"(x), "b"(addr), "i"(errret), "0"(err))
163
164
165#define __get_user_nocheck(x,ptr,size) \
166({ \
167 long __gu_err; \
168 unsigned long __gu_val; \
169 might_sleep(); \
170 __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
171 (x) = (__typeof__(*(ptr)))__gu_val; \
172 __gu_err; \
173})
174
175#define __get_user_check(x,ptr,size) \
176({ \
177 long __gu_err = -EFAULT; \
178 unsigned long __gu_val = 0; \
179 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
180 might_sleep(); \
181 if (access_ok(VERIFY_READ,__gu_addr,size)) \
182 __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\
183 (x) = (__typeof__(*(ptr)))__gu_val; \
184 __gu_err; \
185})
186
187extern long __get_user_bad(void);
188
189#define __get_user_size(x,ptr,size,retval,errret) \
190do { \
191 retval = 0; \
192 __chk_user_ptr(ptr); \
193 switch (size) { \
194 case 1: __get_user_asm(x,ptr,retval,"lbz",errret); break; \
195 case 2: __get_user_asm(x,ptr,retval,"lhz",errret); break; \
196 case 4: __get_user_asm(x,ptr,retval,"lwz",errret); break; \
197 case 8: __get_user_asm(x,ptr,retval,"ld",errret); break; \
198 default: (x) = __get_user_bad(); \
199 } \
200} while (0)
201
202#define __get_user_asm(x, addr, err, op, errret) \
203 __asm__ __volatile__( \
204 "1: "op" %1,0(%2) # get_user\n" \
205 "2:\n" \
206 ".section .fixup,\"ax\"\n" \
207 "3: li %0,%3\n" \
208 " li %1,0\n" \
209 " b 2b\n" \
210 ".previous\n" \
211 ".section __ex_table,\"a\"\n" \
212 " .align 3\n" \
213 " .llong 1b,3b\n" \
214 ".previous" \
215 : "=r"(err), "=r"(x) \
216 : "b"(addr), "i"(errret), "0"(err))
217
218/* more complex routines */
219
220extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from,
221 unsigned long size);
222
223static inline unsigned long
224__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
225{
226 if (__builtin_constant_p(n)) {
227 unsigned long ret;
228
229 switch (n) {
230 case 1:
231 __get_user_size(*(u8 *)to, from, 1, ret, 1);
232 return ret;
233 case 2:
234 __get_user_size(*(u16 *)to, from, 2, ret, 2);
235 return ret;
236 case 4:
237 __get_user_size(*(u32 *)to, from, 4, ret, 4);
238 return ret;
239 case 8:
240 __get_user_size(*(u64 *)to, from, 8, ret, 8);
241 return ret;
242 }
243 }
244 return __copy_tofrom_user((__force void __user *) to, from, n);
245}
246
247static inline unsigned long
248__copy_from_user(void *to, const void __user *from, unsigned long n)
249{
250 might_sleep();
251 return __copy_from_user_inatomic(to, from, n);
252}
253
254static inline unsigned long
255__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
256{
257 if (__builtin_constant_p(n)) {
258 unsigned long ret;
259
260 switch (n) {
261 case 1:
262 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
263 return ret;
264 case 2:
265 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
266 return ret;
267 case 4:
268 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
269 return ret;
270 case 8:
271 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret, 8);
272 return ret;
273 }
274 }
275 return __copy_tofrom_user(to, (__force const void __user *) from, n);
276}
277
278static inline unsigned long
279__copy_to_user(void __user *to, const void *from, unsigned long n)
280{
281 might_sleep();
282 return __copy_to_user_inatomic(to, from, n);
283}
284
285#define __copy_in_user(to, from, size) \
286 __copy_tofrom_user((to), (from), (size))
287
288extern unsigned long copy_from_user(void *to, const void __user *from,
289 unsigned long n);
290extern unsigned long copy_to_user(void __user *to, const void *from,
291 unsigned long n);
292extern unsigned long copy_in_user(void __user *to, const void __user *from,
293 unsigned long n);
294
295extern unsigned long __clear_user(void __user *addr, unsigned long size);
296
297static inline unsigned long
298clear_user(void __user *addr, unsigned long size)
299{
300 might_sleep();
301 if (likely(access_ok(VERIFY_WRITE, addr, size)))
302 size = __clear_user(addr, size);
303 return size;
304}
305
306extern int __strncpy_from_user(char *dst, const char __user *src, long count);
307
308static inline long
309strncpy_from_user(char *dst, const char __user *src, long count)
310{
311 might_sleep();
312 if (likely(access_ok(VERIFY_READ, src, 1)))
313 return __strncpy_from_user(dst, src, count);
314 return -EFAULT;
315}
316
317/*
318 * Return the size of a string (including the ending 0)
319 *
320 * Return 0 for error
321 */
322extern int __strnlen_user(const char __user *str, long len);
323
324/*
325 * Returns the length of the string at str (including the null byte),
326 * or 0 if we hit a page we can't access,
327 * or something > len if we didn't find a null byte.
328 */
329static inline int strnlen_user(const char __user *str, long len)
330{
331 might_sleep();
332 if (likely(access_ok(VERIFY_READ, str, 1)))
333 return __strnlen_user(str, len);
334 return 0;
335}
336
337#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
338
339#endif /* __ASSEMBLY__ */
340
341#endif /* _PPC64_UACCESS_H */
diff --git a/include/asm-ppc64/ucontext.h b/include/asm-ppc64/ucontext.h
deleted file mode 100644
index ef8cc5b37542..000000000000
--- a/include/asm-ppc64/ucontext.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASMPPC64_UCONTEXT_H
2#define _ASMPPC64_UCONTEXT_H
3
4#include <asm/sigcontext.h>
5
6/*
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13struct ucontext {
14 unsigned long uc_flags;
15 struct ucontext *uc_link;
16 stack_t uc_stack;
17 sigset_t uc_sigmask;
18 sigset_t __unsued[15]; /* Allow for uc_sigmask growth */
19 struct sigcontext uc_mcontext; /* last for extensibility */
20};
21
22#endif /* _ASMPPC64_UCONTEXT_H */