aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 19:27:50 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 19:27:50 -0500
commit602d4a7e2f4b843d1a67375d4d7104073495b758 (patch)
tree0b9f184e54fa693c27bd5986c114bdcf6949f788 /include/asm-powerpc
parent0bbacc402e67abca8794a8401c1621dc0c0202e9 (diff)
parentc51e3a417bb0f295e13a5bad86302b5212eafdf3 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc-merge
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/bitops.h437
-rw-r--r--include/asm-powerpc/bug.h34
-rw-r--r--include/asm-powerpc/elf.h22
-rw-r--r--include/asm-powerpc/futex.h84
-rw-r--r--include/asm-powerpc/ioctls.h3
-rw-r--r--include/asm-powerpc/ipcbuf.h34
-rw-r--r--include/asm-powerpc/irq.h2
-rw-r--r--include/asm-powerpc/iseries/hv_call.h113
-rw-r--r--include/asm-powerpc/iseries/hv_call_event.h253
-rw-r--r--include/asm-powerpc/iseries/hv_call_sc.h51
-rw-r--r--include/asm-powerpc/iseries/hv_call_xm.h78
-rw-r--r--include/asm-powerpc/iseries/hv_lp_config.h138
-rw-r--r--include/asm-powerpc/iseries/hv_lp_event.h142
-rw-r--r--include/asm-powerpc/iseries/hv_types.h113
-rw-r--r--include/asm-powerpc/iseries/iseries_io.h49
-rw-r--r--include/asm-powerpc/iseries/it_exp_vpd_panel.h52
-rw-r--r--include/asm-powerpc/iseries/it_lp_naca.h80
-rw-r--r--include/asm-powerpc/iseries/it_lp_queue.h81
-rw-r--r--include/asm-powerpc/iseries/it_lp_reg_save.h84
-rw-r--r--include/asm-powerpc/iseries/lpar_map.h83
-rw-r--r--include/asm-powerpc/iseries/mf.h57
-rw-r--r--include/asm-powerpc/iseries/vio.h130
-rw-r--r--include/asm-powerpc/kexec.h49
-rw-r--r--include/asm-powerpc/machdep.h1
-rw-r--r--include/asm-powerpc/numnodes.h7
-rw-r--r--include/asm-powerpc/ppc_asm.h7
-rw-r--r--include/asm-powerpc/processor.h2
-rw-r--r--include/asm-powerpc/ptrace.h248
-rw-r--r--include/asm-powerpc/rtas.h25
-rw-r--r--include/asm-powerpc/sigcontext.h52
-rw-r--r--include/asm-powerpc/smp.h119
-rw-r--r--include/asm-powerpc/sparsemem.h16
-rw-r--r--include/asm-powerpc/stat.h81
-rw-r--r--include/asm-powerpc/system.h48
-rw-r--r--include/asm-powerpc/termios.h135
-rw-r--r--include/asm-powerpc/time.h2
-rw-r--r--include/asm-powerpc/tlb.h70
-rw-r--r--include/asm-powerpc/tlbflush.h146
-rw-r--r--include/asm-powerpc/uaccess.h468
-rw-r--r--include/asm-powerpc/ucontext.h40
40 files changed, 3450 insertions, 186 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
new file mode 100644
index 000000000000..dc25c53704d5
--- /dev/null
+++ b/include/asm-powerpc/bitops.h
@@ -0,0 +1,437 @@
1/*
2 * PowerPC atomic bit operations.
3 *
4 * Merged version by David Gibson <david@gibson.dropbear.id.au>.
5 * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
6 * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
7 * originally took it from the ppc32 code.
8 *
9 * Within a word, bits are numbered LSB first. Lot's of places make
10 * this assumption by directly testing bits with (val & (1<<nr)).
11 * This can cause confusion for large (> 1 word) bitmaps on a
12 * big-endian system because, unlike little endian, the number of each
13 * bit depends on the word size.
14 *
15 * The bitop functions are defined to work on unsigned longs, so for a
16 * ppc64 system the bits end up numbered:
17 * |63..............0|127............64|191...........128|255...........196|
18 * and on ppc32:
19 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
20 *
21 * There are a few little-endian macros used mostly for filesystem
22 * bitmaps, these work on similar bit arrays layouts, but
23 * byte-oriented:
24 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
25 *
26 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
27 * number field needs to be reversed compared to the big-endian bit
28 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
29 *
30 * This program is free software; you can redistribute it and/or
31 * modify it under the terms of the GNU General Public License
32 * as published by the Free Software Foundation; either version
33 * 2 of the License, or (at your option) any later version.
34 */
35
36#ifndef _ASM_POWERPC_BITOPS_H
37#define _ASM_POWERPC_BITOPS_H
38
39#ifdef __KERNEL__
40
41#include <linux/compiler.h>
42#include <asm/atomic.h>
43#include <asm/synch.h>
44
45/*
46 * clear_bit doesn't imply a memory barrier
47 */
48#define smp_mb__before_clear_bit() smp_mb()
49#define smp_mb__after_clear_bit() smp_mb()
50
51#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
52#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
53#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
54
55#ifdef CONFIG_PPC64
56#define LARXL "ldarx"
57#define STCXL "stdcx."
58#define CNTLZL "cntlzd"
59#else
60#define LARXL "lwarx"
61#define STCXL "stwcx."
62#define CNTLZL "cntlzw"
63#endif
64
65static __inline__ void set_bit(int nr, volatile unsigned long *addr)
66{
67 unsigned long old;
68 unsigned long mask = BITOP_MASK(nr);
69 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
70
71 __asm__ __volatile__(
72"1:" LARXL " %0,0,%3 # set_bit\n"
73 "or %0,%0,%2\n"
74 PPC405_ERR77(0,%3)
75 STCXL " %0,0,%3\n"
76 "bne- 1b"
77 : "=&r"(old), "=m"(*p)
78 : "r"(mask), "r"(p), "m"(*p)
79 : "cc" );
80}
81
82static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
83{
84 unsigned long old;
85 unsigned long mask = BITOP_MASK(nr);
86 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
87
88 __asm__ __volatile__(
89"1:" LARXL " %0,0,%3 # set_bit\n"
90 "andc %0,%0,%2\n"
91 PPC405_ERR77(0,%3)
92 STCXL " %0,0,%3\n"
93 "bne- 1b"
94 : "=&r"(old), "=m"(*p)
95 : "r"(mask), "r"(p), "m"(*p)
96 : "cc" );
97}
98
99static __inline__ void change_bit(int nr, volatile unsigned long *addr)
100{
101 unsigned long old;
102 unsigned long mask = BITOP_MASK(nr);
103 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
104
105 __asm__ __volatile__(
106"1:" LARXL " %0,0,%3 # set_bit\n"
107 "xor %0,%0,%2\n"
108 PPC405_ERR77(0,%3)
109 STCXL " %0,0,%3\n"
110 "bne- 1b"
111 : "=&r"(old), "=m"(*p)
112 : "r"(mask), "r"(p), "m"(*p)
113 : "cc" );
114}
115
116static __inline__ int test_and_set_bit(unsigned long nr,
117 volatile unsigned long *addr)
118{
119 unsigned long old, t;
120 unsigned long mask = BITOP_MASK(nr);
121 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
122
123 __asm__ __volatile__(
124 EIEIO_ON_SMP
125"1:" LARXL " %0,0,%3 # test_and_set_bit\n"
126 "or %1,%0,%2 \n"
127 PPC405_ERR77(0,%3)
128 STCXL " %1,0,%3 \n"
129 "bne- 1b"
130 ISYNC_ON_SMP
131 : "=&r" (old), "=&r" (t)
132 : "r" (mask), "r" (p)
133 : "cc", "memory");
134
135 return (old & mask) != 0;
136}
137
138static __inline__ int test_and_clear_bit(unsigned long nr,
139 volatile unsigned long *addr)
140{
141 unsigned long old, t;
142 unsigned long mask = BITOP_MASK(nr);
143 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
144
145 __asm__ __volatile__(
146 EIEIO_ON_SMP
147"1:" LARXL " %0,0,%3 # test_and_clear_bit\n"
148 "andc %1,%0,%2 \n"
149 PPC405_ERR77(0,%3)
150 STCXL " %1,0,%3 \n"
151 "bne- 1b"
152 ISYNC_ON_SMP
153 : "=&r" (old), "=&r" (t)
154 : "r" (mask), "r" (p)
155 : "cc", "memory");
156
157 return (old & mask) != 0;
158}
159
160static __inline__ int test_and_change_bit(unsigned long nr,
161 volatile unsigned long *addr)
162{
163 unsigned long old, t;
164 unsigned long mask = BITOP_MASK(nr);
165 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
166
167 __asm__ __volatile__(
168 EIEIO_ON_SMP
169"1:" LARXL " %0,0,%3 # test_and_change_bit\n"
170 "xor %1,%0,%2 \n"
171 PPC405_ERR77(0,%3)
172 STCXL " %1,0,%3 \n"
173 "bne- 1b"
174 ISYNC_ON_SMP
175 : "=&r" (old), "=&r" (t)
176 : "r" (mask), "r" (p)
177 : "cc", "memory");
178
179 return (old & mask) != 0;
180}
181
182static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
183{
184 unsigned long old;
185
186 __asm__ __volatile__(
187"1:" LARXL " %0,0,%3 # set_bit\n"
188 "or %0,%0,%2\n"
189 STCXL " %0,0,%3\n"
190 "bne- 1b"
191 : "=&r" (old), "=m" (*addr)
192 : "r" (mask), "r" (addr), "m" (*addr)
193 : "cc");
194}
195
196/* Non-atomic versions */
197static __inline__ int test_bit(unsigned long nr,
198 __const__ volatile unsigned long *addr)
199{
200 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
201}
202
203static __inline__ void __set_bit(unsigned long nr,
204 volatile unsigned long *addr)
205{
206 unsigned long mask = BITOP_MASK(nr);
207 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
208
209 *p |= mask;
210}
211
212static __inline__ void __clear_bit(unsigned long nr,
213 volatile unsigned long *addr)
214{
215 unsigned long mask = BITOP_MASK(nr);
216 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
217
218 *p &= ~mask;
219}
220
221static __inline__ void __change_bit(unsigned long nr,
222 volatile unsigned long *addr)
223{
224 unsigned long mask = BITOP_MASK(nr);
225 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
226
227 *p ^= mask;
228}
229
230static __inline__ int __test_and_set_bit(unsigned long nr,
231 volatile unsigned long *addr)
232{
233 unsigned long mask = BITOP_MASK(nr);
234 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
235 unsigned long old = *p;
236
237 *p = old | mask;
238 return (old & mask) != 0;
239}
240
241static __inline__ int __test_and_clear_bit(unsigned long nr,
242 volatile unsigned long *addr)
243{
244 unsigned long mask = BITOP_MASK(nr);
245 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
246 unsigned long old = *p;
247
248 *p = old & ~mask;
249 return (old & mask) != 0;
250}
251
252static __inline__ int __test_and_change_bit(unsigned long nr,
253 volatile unsigned long *addr)
254{
255 unsigned long mask = BITOP_MASK(nr);
256 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
257 unsigned long old = *p;
258
259 *p = old ^ mask;
260 return (old & mask) != 0;
261}
262
263/*
264 * Return the zero-based bit position (LE, not IBM bit numbering) of
265 * the most significant 1-bit in a double word.
266 */
267static __inline__ int __ilog2(unsigned long x)
268{
269 int lz;
270
271 asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x));
272 return BITS_PER_LONG - 1 - lz;
273}
274
275/*
276 * Determines the bit position of the least significant 0 bit in the
277 * specified double word. The returned bit position will be
278 * zero-based, starting from the right side (63/31 - 0).
279 */
280static __inline__ unsigned long ffz(unsigned long x)
281{
282 /* no zero exists anywhere in the 8 byte area. */
283 if ((x = ~x) == 0)
284 return BITS_PER_LONG;
285
286 /*
287 * Calculate the bit position of the least signficant '1' bit in x
288 * (since x has been changed this will actually be the least signficant
289 * '0' bit in * the original x). Note: (x & -x) gives us a mask that
290 * is the least significant * (RIGHT-most) 1-bit of the value in x.
291 */
292 return __ilog2(x & -x);
293}
294
295static __inline__ int __ffs(unsigned long x)
296{
297 return __ilog2(x & -x);
298}
299
300/*
301 * ffs: find first bit set. This is defined the same way as
302 * the libc and compiler builtin ffs routines, therefore
303 * differs in spirit from the above ffz (man ffs).
304 */
305static __inline__ int ffs(int x)
306{
307 unsigned long i = (unsigned long)x;
308 return __ilog2(i & -i) + 1;
309}
310
311/*
312 * fls: find last (most-significant) bit set.
313 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
314 */
315static __inline__ int fls(unsigned int x)
316{
317 int lz;
318
319 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
320 return 32 - lz;
321}
322
323/*
324 * hweightN: returns the hamming weight (i.e. the number
325 * of bits set) of a N-bit word
326 */
327#define hweight64(x) generic_hweight64(x)
328#define hweight32(x) generic_hweight32(x)
329#define hweight16(x) generic_hweight16(x)
330#define hweight8(x) generic_hweight8(x)
331
332#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
333unsigned long find_next_zero_bit(const unsigned long *addr,
334 unsigned long size, unsigned long offset);
335/**
336 * find_first_bit - find the first set bit in a memory region
337 * @addr: The address to start the search at
338 * @size: The maximum size to search
339 *
340 * Returns the bit-number of the first set bit, not the number of the byte
341 * containing a bit.
342 */
343#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
344unsigned long find_next_bit(const unsigned long *addr,
345 unsigned long size, unsigned long offset);
346
347/* Little-endian versions */
348
349static __inline__ int test_le_bit(unsigned long nr,
350 __const__ unsigned long *addr)
351{
352 __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
353 return (tmp[nr >> 3] >> (nr & 7)) & 1;
354}
355
356#define __set_le_bit(nr, addr) \
357 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
358#define __clear_le_bit(nr, addr) \
359 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
360
361#define test_and_set_le_bit(nr, addr) \
362 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
363#define test_and_clear_le_bit(nr, addr) \
364 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
365
366#define __test_and_set_le_bit(nr, addr) \
367 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
368#define __test_and_clear_le_bit(nr, addr) \
369 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
370
371#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0)
372unsigned long find_next_zero_le_bit(const unsigned long *addr,
373 unsigned long size, unsigned long offset);
374
375/* Bitmap functions for the ext2 filesystem */
376
377#define ext2_set_bit(nr,addr) \
378 __test_and_set_le_bit((nr), (unsigned long*)addr)
379#define ext2_clear_bit(nr, addr) \
380 __test_and_clear_le_bit((nr), (unsigned long*)addr)
381
382#define ext2_set_bit_atomic(lock, nr, addr) \
383 test_and_set_le_bit((nr), (unsigned long*)addr)
384#define ext2_clear_bit_atomic(lock, nr, addr) \
385 test_and_clear_le_bit((nr), (unsigned long*)addr)
386
387#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
388
389#define ext2_find_first_zero_bit(addr, size) \
390 find_first_zero_le_bit((unsigned long*)addr, size)
391#define ext2_find_next_zero_bit(addr, size, off) \
392 find_next_zero_le_bit((unsigned long*)addr, size, off)
393
394/* Bitmap functions for the minix filesystem. */
395
396#define minix_test_and_set_bit(nr,addr) \
397 __test_and_set_le_bit(nr, (unsigned long *)addr)
398#define minix_set_bit(nr,addr) \
399 __set_le_bit(nr, (unsigned long *)addr)
400#define minix_test_and_clear_bit(nr,addr) \
401 __test_and_clear_le_bit(nr, (unsigned long *)addr)
402#define minix_test_bit(nr,addr) \
403 test_le_bit(nr, (unsigned long *)addr)
404
405#define minix_find_first_zero_bit(addr,size) \
406 find_first_zero_le_bit((unsigned long *)addr, size)
407
408/*
409 * Every architecture must define this function. It's the fastest
410 * way of searching a 140-bit bitmap where the first 100 bits are
411 * unlikely to be set. It's guaranteed that at least one of the 140
412 * bits is cleared.
413 */
414static inline int sched_find_first_bit(const unsigned long *b)
415{
416#ifdef CONFIG_PPC64
417 if (unlikely(b[0]))
418 return __ffs(b[0]);
419 if (unlikely(b[1]))
420 return __ffs(b[1]) + 64;
421 return __ffs(b[2]) + 128;
422#else
423 if (unlikely(b[0]))
424 return __ffs(b[0]);
425 if (unlikely(b[1]))
426 return __ffs(b[1]) + 32;
427 if (unlikely(b[2]))
428 return __ffs(b[2]) + 64;
429 if (b[3])
430 return __ffs(b[3]) + 96;
431 return __ffs(b[4]) + 128;
432#endif
433}
434
435#endif /* __KERNEL__ */
436
437#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index e4d028e87020..d625ee55f957 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -12,20 +12,16 @@
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
14#ifdef __powerpc64__ 14#ifdef __powerpc64__
15#define BUG_TABLE_ENTRY(label, line, file, func) \ 15#define BUG_TABLE_ENTRY ".llong"
16 ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n" 16#define BUG_TRAP_OP "tdnei"
17#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
18#define DATA_TYPE long long
19#else 17#else
20#define BUG_TABLE_ENTRY(label, line, file, func) \ 18#define BUG_TABLE_ENTRY ".long"
21 ".long " #label ", " #line ", " #file ", " #func "\n" 19#define BUG_TRAP_OP "twnei"
22#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
23#define DATA_TYPE int
24#endif /* __powerpc64__ */ 20#endif /* __powerpc64__ */
25 21
26struct bug_entry { 22struct bug_entry {
27 unsigned long bug_addr; 23 unsigned long bug_addr;
28 int line; 24 long line;
29 const char *file; 25 const char *file;
30 const char *function; 26 const char *function;
31}; 27};
@@ -43,29 +39,29 @@ struct bug_entry *find_bug(unsigned long bugaddr);
43#define BUG() do { \ 39#define BUG() do { \
44 __asm__ __volatile__( \ 40 __asm__ __volatile__( \
45 "1: twi 31,0,0\n" \ 41 "1: twi 31,0,0\n" \
46 ".section __bug_table,\"a\"\n\t" \ 42 ".section __bug_table,\"a\"\n" \
47 BUG_TABLE_ENTRY(1b,%0,%1,%2) \ 43 "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \
48 ".previous" \ 44 ".previous" \
49 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ 45 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
50} while (0) 46} while (0)
51 47
52#define BUG_ON(x) do { \ 48#define BUG_ON(x) do { \
53 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
54 TRAP_OP(%0,0) \ 50 "1: "BUG_TRAP_OP" %0,0\n" \
55 ".section __bug_table,\"a\"\n\t" \ 51 ".section __bug_table,\"a\"\n" \
56 BUG_TABLE_ENTRY(1b,%1,%2,%3) \ 52 "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
57 ".previous" \ 53 ".previous" \
58 : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \ 54 : : "r" ((long)(x)), "i" (__LINE__), \
59 "i" (__FILE__), "i" (__FUNCTION__)); \ 55 "i" (__FILE__), "i" (__FUNCTION__)); \
60} while (0) 56} while (0)
61 57
62#define WARN_ON(x) do { \ 58#define WARN_ON(x) do { \
63 __asm__ __volatile__( \ 59 __asm__ __volatile__( \
64 TRAP_OP(%0,0) \ 60 "1: "BUG_TRAP_OP" %0,0\n" \
65 ".section __bug_table,\"a\"\n\t" \ 61 ".section __bug_table,\"a\"\n" \
66 BUG_TABLE_ENTRY(1b,%1,%2,%3) \ 62 "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
67 ".previous" \ 63 ".previous" \
68 : : "r" ((DATA_TYPE)(x)), \ 64 : : "r" ((long)(x)), \
69 "i" (__LINE__ + BUG_WARNING_TRAP), \ 65 "i" (__LINE__ + BUG_WARNING_TRAP), \
70 "i" (__FILE__), "i" (__FUNCTION__)); \ 66 "i" (__FILE__), "i" (__FUNCTION__)); \
71} while (0) 67} while (0)
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index d22b10021b5d..d140577d0a05 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -178,18 +178,22 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
178static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, 178static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
179 struct pt_regs *regs) 179 struct pt_regs *regs)
180{ 180{
181 int i; 181 int i, nregs;
182 int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE);
183 182
184 if (gprs > ELF_NGREG) 183 memset((void *)elf_regs, 0, sizeof(elf_gregset_t));
185 gprs = ELF_NGREG;
186 184
187 for (i=0; i < gprs; i++) 185 /* Our registers are always unsigned longs, whether we're a 32 bit
188 elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i]; 186 * process or 64 bit, on either a 64 bit or 32 bit kernel.
189 187 * Don't use ELF_GREG_TYPE here. */
190 memset((char *)(elf_regs) + sizeof(struct pt_regs), 0, \ 188 nregs = sizeof(struct pt_regs) / sizeof(unsigned long);
191 sizeof(elf_gregset_t) - sizeof(struct pt_regs)); 189 if (nregs > ELF_NGREG)
190 nregs = ELF_NGREG;
192 191
192 for (i = 0; i < nregs; i++) {
193 /* This will correctly truncate 64 bit registers to 32 bits
194 * for a 32 bit process on a 64 bit kernel. */
195 elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
196 }
193} 197}
194#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); 198#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
195 199
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
new file mode 100644
index 000000000000..37c94e52ab6d
--- /dev/null
+++ b/include/asm-powerpc/futex.h
@@ -0,0 +1,84 @@
1#ifndef _ASM_POWERPC_FUTEX_H
2#define _ASM_POWERPC_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/synch.h>
9#include <asm/uaccess.h>
10#include <asm/ppc_asm.h>
11
12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
13 __asm__ __volatile ( \
14 SYNC_ON_SMP \
15"1: lwarx %0,0,%2\n" \
16 insn \
17"2: stwcx. %1,0,%2\n" \
18 "bne- 1b\n" \
19 "li %1,0\n" \
20"3: .section .fixup,\"ax\"\n" \
21"4: li %1,%3\n" \
22 "b 3b\n" \
23 ".previous\n" \
24 ".section __ex_table,\"a\"\n" \
25 ".align 3\n" \
26 DATAL " 1b,4b,2b,4b\n" \
27 ".previous" \
28 : "=&r" (oldval), "=&r" (ret) \
29 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
30 : "cr0", "memory")
31
32static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
33{
34 int op = (encoded_op >> 28) & 7;
35 int cmp = (encoded_op >> 24) & 15;
36 int oparg = (encoded_op << 8) >> 20;
37 int cmparg = (encoded_op << 20) >> 20;
38 int oldval = 0, ret;
39 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
40 oparg = 1 << oparg;
41
42 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
43 return -EFAULT;
44
45 inc_preempt_count();
46
47 switch (op) {
48 case FUTEX_OP_SET:
49 __futex_atomic_op("", ret, oldval, uaddr, oparg);
50 break;
51 case FUTEX_OP_ADD:
52 __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
53 break;
54 case FUTEX_OP_OR:
55 __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
56 break;
57 case FUTEX_OP_ANDN:
58 __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
59 break;
60 case FUTEX_OP_XOR:
61 __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
62 break;
63 default:
64 ret = -ENOSYS;
65 }
66
67 dec_preempt_count();
68
69 if (!ret) {
70 switch (cmp) {
71 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
72 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
73 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
74 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
75 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
76 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
77 default: ret = -ENOSYS;
78 }
79 }
80 return ret;
81}
82
83#endif /* __KERNEL__ */
84#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/include/asm-powerpc/ioctls.h b/include/asm-powerpc/ioctls.h
index 5b94ff489b8b..279a6229584b 100644
--- a/include/asm-powerpc/ioctls.h
+++ b/include/asm-powerpc/ioctls.h
@@ -62,6 +62,9 @@
62# define TIOCM_DSR 0x100 62# define TIOCM_DSR 0x100
63# define TIOCM_CD TIOCM_CAR 63# define TIOCM_CD TIOCM_CAR
64# define TIOCM_RI TIOCM_RNG 64# define TIOCM_RI TIOCM_RNG
65#define TIOCM_OUT1 0x2000
66#define TIOCM_OUT2 0x4000
67#define TIOCM_LOOP 0x8000
65 68
66#define TIOCGSOFTCAR 0x5419 69#define TIOCGSOFTCAR 0x5419
67#define TIOCSSOFTCAR 0x541A 70#define TIOCSSOFTCAR 0x541A
diff --git a/include/asm-powerpc/ipcbuf.h b/include/asm-powerpc/ipcbuf.h
new file mode 100644
index 000000000000..2c3e1d94db1d
--- /dev/null
+++ b/include/asm-powerpc/ipcbuf.h
@@ -0,0 +1,34 @@
1#ifndef _ASM_POWERPC_IPCBUF_H
2#define _ASM_POWERPC_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for the powerpc is identical to
6 * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
7 * kernel. Note extra padding because this structure is passed back
8 * and forth between kernel and user space. Pad space is left for:
9 * - 1 32-bit value to fill up for 8-byte alignment
10 * - 2 miscellaneous 64-bit values
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/types.h>
19
20struct ipc64_perm
21{
22 __kernel_key_t key;
23 __kernel_uid_t uid;
24 __kernel_gid_t gid;
25 __kernel_uid_t cuid;
26 __kernel_gid_t cgid;
27 __kernel_mode_t mode;
28 unsigned int seq;
29 unsigned int __pad1;
30 unsigned long long __unused1;
31 unsigned long long __unused2;
32};
33
34#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index c7c3f912a3c2..b3935ea28fff 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -73,7 +73,7 @@ extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
73#define IC_INVALID 0 73#define IC_INVALID 0
74#define IC_OPEN_PIC 1 74#define IC_OPEN_PIC 1
75#define IC_PPC_XIC 2 75#define IC_PPC_XIC 2
76#define IC_BPA_IIC 3 76#define IC_CELL_PIC 3
77#define IC_ISERIES 4 77#define IC_ISERIES 4
78 78
79extern u64 ppc64_interrupt_controller; 79extern u64 ppc64_interrupt_controller;
diff --git a/include/asm-powerpc/iseries/hv_call.h b/include/asm-powerpc/iseries/hv_call.h
new file mode 100644
index 000000000000..e9f831c9a5e5
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_call.h
@@ -0,0 +1,113 @@
1/*
2 * HvCall.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19/*
20 * This file contains the "hypervisor call" interface which is used to
21 * drive the hypervisor from the OS.
22 */
23#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H
24#define _ASM_POWERPC_ISERIES_HV_CALL_H
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28#include <asm/paca.h>
29
30/* Type of yield for HvCallBaseYieldProcessor */
31#define HvCall_YieldTimed 0 /* Yield until specified time (tb) */
32#define HvCall_YieldToActive 1 /* Yield until all active procs have run */
33#define HvCall_YieldToProc 2 /* Yield until the specified processor has run */
34
35/* interrupt masks for setEnabledInterrupts */
36#define HvCall_MaskIPI 0x00000001
37#define HvCall_MaskLpEvent 0x00000002
38#define HvCall_MaskLpProd 0x00000004
39#define HvCall_MaskTimeout 0x00000008
40
41/* Log buffer formats */
42#define HvCall_LogBuffer_ASCII 0
43#define HvCall_LogBuffer_EBCDIC 1
44
45#define HvCallBaseAckDeferredInts HvCallBase + 0
46#define HvCallBaseCpmPowerOff HvCallBase + 1
47#define HvCallBaseGetHwPatch HvCallBase + 2
48#define HvCallBaseReIplSpAttn HvCallBase + 3
49#define HvCallBaseSetASR HvCallBase + 4
50#define HvCallBaseSetASRAndRfi HvCallBase + 5
51#define HvCallBaseSetIMR HvCallBase + 6
52#define HvCallBaseSendIPI HvCallBase + 7
53#define HvCallBaseTerminateMachine HvCallBase + 8
54#define HvCallBaseTerminateMachineSrc HvCallBase + 9
55#define HvCallBaseProcessPlicInterrupts HvCallBase + 10
56#define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11
57#define HvCallBaseSetVirtualSIT HvCallBase + 12
58#define HvCallBaseVaryOffThisProcessor HvCallBase + 13
59#define HvCallBaseVaryOffMemoryChunk HvCallBase + 14
60#define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15
61#define HvCallBaseSendLpProd HvCallBase + 16
62#define HvCallBaseSetEnabledInterrupts HvCallBase + 17
63#define HvCallBaseYieldProcessor HvCallBase + 18
64#define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19
65#define HvCallBaseSetVirtualDecr HvCallBase + 20
66#define HvCallBaseClearLogBuffer HvCallBase + 21
67#define HvCallBaseGetLogBufferCodePage HvCallBase + 22
68#define HvCallBaseGetLogBufferFormat HvCallBase + 23
69#define HvCallBaseGetLogBufferLength HvCallBase + 24
70#define HvCallBaseReadLogBuffer HvCallBase + 25
71#define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26
72#define HvCallBaseWriteLogBuffer HvCallBase + 27
73#define HvCallBaseRouter28 HvCallBase + 28
74#define HvCallBaseRouter29 HvCallBase + 29
75#define HvCallBaseRouter30 HvCallBase + 30
76#define HvCallBaseSetDebugBus HvCallBase + 31
77
78#define HvCallCcSetDABR HvCallCc + 7
79
80static inline void HvCall_setVirtualDecr(void)
81{
82 /*
83 * Ignore any error return codes - most likely means that the
84 * target value for the LP has been increased and this vary off
85 * would bring us below the new target.
86 */
87 HvCall0(HvCallBaseSetVirtualDecr);
88}
89
90static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm)
91{
92 HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm);
93}
94
95static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts)
96{
97 HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts);
98}
99
100static inline void HvCall_setLogBufferFormatAndCodepage(int format,
101 u32 codePage)
102{
103 HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage);
104}
105
106extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen);
107
108static inline void HvCall_sendIPI(struct paca_struct *targetPaca)
109{
110 HvCall1(HvCallBaseSendIPI, targetPaca->paca_index);
111}
112
113#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */
diff --git a/include/asm-powerpc/iseries/hv_call_event.h b/include/asm-powerpc/iseries/hv_call_event.h
new file mode 100644
index 000000000000..46763a30590a
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_call_event.h
@@ -0,0 +1,253 @@
1/*
2 * HvCallEvent.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19/*
20 * This file contains the "hypervisor call" interface which is used to
21 * drive the hypervisor from the OS.
22 */
23#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
24#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28#include <asm/abs_addr.h>
29
30struct HvLpEvent;
31
32typedef u8 HvLpEvent_Type;
33typedef u8 HvLpEvent_AckInd;
34typedef u8 HvLpEvent_AckType;
35
36struct HvCallEvent_PackedParms {
37 u8 xAckType:1;
38 u8 xAckInd:1;
39 u8 xRsvd:1;
40 u8 xTargetLp:5;
41 u8 xType;
42 u16 xSubtype;
43 HvLpInstanceId xSourceInstId;
44 HvLpInstanceId xTargetInstId;
45};
46
47typedef u8 HvLpDma_Direction;
48typedef u8 HvLpDma_AddressType;
49
50struct HvCallEvent_PackedDmaParms {
51 u8 xDirection:1;
52 u8 xLocalAddrType:1;
53 u8 xRemoteAddrType:1;
54 u8 xRsvd1:5;
55 HvLpIndex xRemoteLp;
56 u8 xType;
57 u8 xRsvd2;
58 HvLpInstanceId xLocalInstId;
59 HvLpInstanceId xRemoteInstId;
60};
61
62typedef u64 HvLpEvent_Rc;
63typedef u64 HvLpDma_Rc;
64
65#define HvCallEventAckLpEvent HvCallEvent + 0
66#define HvCallEventCancelLpEvent HvCallEvent + 1
67#define HvCallEventCloseLpEventPath HvCallEvent + 2
68#define HvCallEventDmaBufList HvCallEvent + 3
69#define HvCallEventDmaSingle HvCallEvent + 4
70#define HvCallEventDmaToSp HvCallEvent + 5
71#define HvCallEventGetOverflowLpEvents HvCallEvent + 6
72#define HvCallEventGetSourceLpInstanceId HvCallEvent + 7
73#define HvCallEventGetTargetLpInstanceId HvCallEvent + 8
74#define HvCallEventOpenLpEventPath HvCallEvent + 9
75#define HvCallEventSetLpEventStack HvCallEvent + 10
76#define HvCallEventSignalLpEvent HvCallEvent + 11
77#define HvCallEventSignalLpEventParms HvCallEvent + 12
78#define HvCallEventSetInterLpQueueIndex HvCallEvent + 13
79#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
80#define HvCallEventRouter15 HvCallEvent + 15
81
82static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
83{
84 HvCall1(HvCallEventGetOverflowLpEvents, queueIndex);
85}
86
87static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
88{
89 HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex);
90}
91
92static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
93 char *eventStackAddr, u32 eventStackSize)
94{
95 u64 abs_addr;
96
97 abs_addr = virt_to_abs(eventStackAddr);
98 HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr,
99 eventStackSize);
100}
101
102static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
103 u16 lpLogicalProcIndex)
104{
105 HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex,
106 lpLogicalProcIndex);
107}
108
109static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
110{
111 u64 abs_addr;
112
113#ifdef DEBUG_SENDEVENT
114 printk("HvCallEvent_signalLpEvent: *event = %016lx\n ",
115 (unsigned long)event);
116#endif
117 abs_addr = virt_to_abs(event);
118 return HvCall1(HvCallEventSignalLpEvent, abs_addr);
119}
120
121static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
122 HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd,
123 HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId,
124 HvLpInstanceId targetInstanceId, u64 correlationToken,
125 u64 eventData1, u64 eventData2, u64 eventData3,
126 u64 eventData4, u64 eventData5)
127{
128 /* Pack the misc bits into a single Dword to pass to PLIC */
129 union {
130 struct HvCallEvent_PackedParms parms;
131 u64 dword;
132 } packed;
133 packed.parms.xAckType = ackType;
134 packed.parms.xAckInd = ackInd;
135 packed.parms.xRsvd = 0;
136 packed.parms.xTargetLp = targetLp;
137 packed.parms.xType = type;
138 packed.parms.xSubtype = subtype;
139 packed.parms.xSourceInstId = sourceInstanceId;
140 packed.parms.xTargetInstId = targetInstanceId;
141
142 return HvCall7(HvCallEventSignalLpEventParms, packed.dword,
143 correlationToken, eventData1, eventData2,
144 eventData3, eventData4, eventData5);
145}
146
147static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
148{
149 u64 abs_addr;
150
151 abs_addr = virt_to_abs(event);
152 return HvCall1(HvCallEventAckLpEvent, abs_addr);
153}
154
155static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
156{
157 u64 abs_addr;
158
159 abs_addr = virt_to_abs(event);
160 return HvCall1(HvCallEventCancelLpEvent, abs_addr);
161}
162
163static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
164 HvLpIndex targetLp, HvLpEvent_Type type)
165{
166 return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type);
167}
168
169static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(
170 HvLpIndex targetLp, HvLpEvent_Type type)
171{
172 return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type);
173}
174
175static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
176 HvLpEvent_Type type)
177{
178 HvCall2(HvCallEventOpenLpEventPath, targetLp, type);
179}
180
181static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
182 HvLpEvent_Type type)
183{
184 HvCall2(HvCallEventCloseLpEventPath, targetLp, type);
185}
186
187static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
188 HvLpIndex remoteLp, HvLpDma_Direction direction,
189 HvLpInstanceId localInstanceId,
190 HvLpInstanceId remoteInstanceId,
191 HvLpDma_AddressType localAddressType,
192 HvLpDma_AddressType remoteAddressType,
193 /* Do these need to be converted to absolute addresses? */
194 u64 localBufList, u64 remoteBufList, u32 transferLength)
195{
196 /* Pack the misc bits into a single Dword to pass to PLIC */
197 union {
198 struct HvCallEvent_PackedDmaParms parms;
199 u64 dword;
200 } packed;
201
202 packed.parms.xDirection = direction;
203 packed.parms.xLocalAddrType = localAddressType;
204 packed.parms.xRemoteAddrType = remoteAddressType;
205 packed.parms.xRsvd1 = 0;
206 packed.parms.xRemoteLp = remoteLp;
207 packed.parms.xType = type;
208 packed.parms.xRsvd2 = 0;
209 packed.parms.xLocalInstId = localInstanceId;
210 packed.parms.xRemoteInstId = remoteInstanceId;
211
212 return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList,
213 remoteBufList, transferLength);
214}
215
216static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
217 HvLpIndex remoteLp, HvLpDma_Direction direction,
218 HvLpInstanceId localInstanceId,
219 HvLpInstanceId remoteInstanceId,
220 HvLpDma_AddressType localAddressType,
221 HvLpDma_AddressType remoteAddressType,
222 u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength)
223{
224 /* Pack the misc bits into a single Dword to pass to PLIC */
225 union {
226 struct HvCallEvent_PackedDmaParms parms;
227 u64 dword;
228 } packed;
229
230 packed.parms.xDirection = direction;
231 packed.parms.xLocalAddrType = localAddressType;
232 packed.parms.xRemoteAddrType = remoteAddressType;
233 packed.parms.xRsvd1 = 0;
234 packed.parms.xRemoteLp = remoteLp;
235 packed.parms.xType = type;
236 packed.parms.xRsvd2 = 0;
237 packed.parms.xLocalInstId = localInstanceId;
238 packed.parms.xRemoteInstId = remoteInstanceId;
239
240 return (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, packed.dword,
241 localAddrOrTce, remoteAddrOrTce, transferLength);
242}
243
244static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
245 u32 length, HvLpDma_Direction dir)
246{
247 u64 abs_addr;
248
249 abs_addr = virt_to_abs(local);
250 return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir);
251}
252
253#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/include/asm-powerpc/iseries/hv_call_sc.h b/include/asm-powerpc/iseries/hv_call_sc.h
new file mode 100644
index 000000000000..dec7e9d9ab78
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_call_sc.h
@@ -0,0 +1,51 @@
1/*
2 * HvCallSc.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H
20#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H
21
22#include <linux/types.h>
23
24#define HvCallBase 0x8000000000000000ul
25#define HvCallCc 0x8001000000000000ul
26#define HvCallCfg 0x8002000000000000ul
27#define HvCallEvent 0x8003000000000000ul
28#define HvCallHpt 0x8004000000000000ul
29#define HvCallPci 0x8005000000000000ul
30#define HvCallSm 0x8007000000000000ul
31#define HvCallXm 0x8009000000000000ul
32
33extern u64 HvCall0(u64);
34extern u64 HvCall1(u64, u64);
35extern u64 HvCall2(u64, u64, u64);
36extern u64 HvCall3(u64, u64, u64, u64);
37extern u64 HvCall4(u64, u64, u64, u64, u64);
38extern u64 HvCall5(u64, u64, u64, u64, u64, u64);
39extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64);
40extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64);
41
42extern u64 HvCall0Ret16(u64, void *);
43extern u64 HvCall1Ret16(u64, void *, u64);
44extern u64 HvCall2Ret16(u64, void *, u64, u64);
45extern u64 HvCall3Ret16(u64, void *, u64, u64, u64);
46extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64);
47extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64);
48extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64);
49extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64);
50
51#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */
diff --git a/include/asm-powerpc/iseries/hv_call_xm.h b/include/asm-powerpc/iseries/hv_call_xm.h
new file mode 100644
index 000000000000..ca9202cb01ed
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_call_xm.h
@@ -0,0 +1,78 @@
1/*
2 * This file contains the "hypervisor call" interface which is used to
3 * drive the hypervisor from SLIC.
4 */
5#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H
6#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H
7
8#include <asm/iseries/hv_call_sc.h>
9#include <asm/iseries/hv_types.h>
10
11#define HvCallXmGetTceTableParms HvCallXm + 0
12#define HvCallXmTestBus HvCallXm + 1
13#define HvCallXmConnectBusUnit HvCallXm + 2
14#define HvCallXmLoadTod HvCallXm + 8
15#define HvCallXmTestBusUnit HvCallXm + 9
16#define HvCallXmSetTce HvCallXm + 11
17#define HvCallXmSetTces HvCallXm + 13
18
19/*
20 * Structure passed to HvCallXm_getTceTableParms
21 */
22struct iommu_table_cb {
23 unsigned long itc_busno; /* Bus number for this tce table */
24 unsigned long itc_start; /* Will be NULL for secondary */
25 unsigned long itc_totalsize; /* Size (in pages) of whole table */
26 unsigned long itc_offset; /* Index into real tce table of the
27 start of our section */
28 unsigned long itc_size; /* Size (in pages) of our section */
29 unsigned long itc_index; /* Index of this tce table */
30 unsigned short itc_maxtables; /* Max num of tables for partition */
31 unsigned char itc_virtbus; /* Flag to indicate virtual bus */
32 unsigned char itc_slotno; /* IOA Tce Slot Index */
33 unsigned char itc_rsvd[4];
34};
35
36static inline void HvCallXm_getTceTableParms(u64 cb)
37{
38 HvCall1(HvCallXmGetTceTableParms, cb);
39}
40
41static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce)
42{
43 return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce);
44}
45
46static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset,
47 u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4)
48{
49 return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces,
50 tce1, tce2, tce3, tce4);
51}
52
53static inline u64 HvCallXm_testBus(u16 busNumber)
54{
55 return HvCall1(HvCallXmTestBus, busNumber);
56}
57
58static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber,
59 u8 deviceId)
60{
61 return HvCall2(HvCallXmTestBusUnit, busNumber,
62 (subBusNumber << 8) | deviceId);
63}
64
65static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber,
66 u8 deviceId, u64 interruptToken)
67{
68 return HvCall5(HvCallXmConnectBusUnit, busNumber,
69 (subBusNumber << 8) | deviceId, interruptToken, 0,
70 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */);
71}
72
73static inline u64 HvCallXm_loadTod(void)
74{
75 return HvCall0(HvCallXmLoadTod);
76}
77
78#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */
diff --git a/include/asm-powerpc/iseries/hv_lp_config.h b/include/asm-powerpc/iseries/hv_lp_config.h
new file mode 100644
index 000000000000..bc00f036bca0
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_lp_config.h
@@ -0,0 +1,138 @@
1/*
2 * HvLpConfig.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
20#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
21
22/*
23 * This file contains the interface to the LPAR configuration data
24 * to determine which resources should be allocated to each partition.
25 */
26
27#include <asm/iseries/hv_call_sc.h>
28#include <asm/iseries/hv_types.h>
29#include <asm/iseries/it_lp_naca.h>
30
31enum {
32 HvCallCfg_Cur = 0,
33 HvCallCfg_Init = 1,
34 HvCallCfg_Max = 2,
35 HvCallCfg_Min = 3
36};
37
38#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6
39#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7
40#define HvCallCfgGetMsChunks HvCallCfg + 9
41#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20
42#define HvCallCfgGetSharedProcUnits HvCallCfg + 21
43#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22
44#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30
45#define HvCallCfgGetHostingLpIndex HvCallCfg + 32
46
47extern HvLpIndex HvLpConfig_getLpIndex_outline(void);
48
49static inline HvLpIndex HvLpConfig_getLpIndex(void)
50{
51 return itLpNaca.xLpIndex;
52}
53
54static inline HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
55{
56 return itLpNaca.xPrimaryLpIndex;
57}
58
59static inline u64 HvLpConfig_getMsChunks(void)
60{
61 return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(),
62 HvCallCfg_Cur);
63}
64
65static inline u64 HvLpConfig_getSystemPhysicalProcessors(void)
66{
67 return HvCall0(HvCallCfgGetSystemPhysicalProcessors);
68}
69
70static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI)
71{
72 return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI);
73}
74
75static inline u64 HvLpConfig_getPhysicalProcessors(void)
76{
77 return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
78 HvCallCfg_Cur);
79}
80
81static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void)
82{
83 return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex());
84}
85
86static inline u64 HvLpConfig_getSharedProcUnits(void)
87{
88 return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
89 HvCallCfg_Cur);
90}
91
92static inline u64 HvLpConfig_getMaxSharedProcUnits(void)
93{
94 return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
95 HvCallCfg_Max);
96}
97
98static inline u64 HvLpConfig_getMaxPhysicalProcessors(void)
99{
100 return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
101 HvCallCfg_Max);
102}
103
104static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp(
105 HvLpIndex lp)
106{
107 /*
108 * This is a new function in V5R1 so calls to this on older
109 * hypervisors will return -1
110 */
111 u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp);
112 if (retVal == -1)
113 retVal = 0;
114 return retVal;
115}
116
117static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void)
118{
119 return HvLpConfig_getVirtualLanIndexMapForLp(
120 HvLpConfig_getLpIndex_outline());
121}
122
123static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1,
124 HvLpIndex lp2)
125{
126 HvLpVirtualLanIndexMap virtualLanIndexMap1 =
127 HvLpConfig_getVirtualLanIndexMapForLp(lp1);
128 HvLpVirtualLanIndexMap virtualLanIndexMap2 =
129 HvLpConfig_getVirtualLanIndexMapForLp(lp2);
130 return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0);
131}
132
133static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
134{
135 return HvCall1(HvCallCfgGetHostingLpIndex, lp);
136}
137
138#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */
diff --git a/include/asm-powerpc/iseries/hv_lp_event.h b/include/asm-powerpc/iseries/hv_lp_event.h
new file mode 100644
index 000000000000..499ab1ad0185
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_lp_event.h
@@ -0,0 +1,142 @@
1/*
2 * HvLpEvent.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20/* This file contains the class for HV events in the system. */
21
22#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
23#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
24
25#include <asm/types.h>
26#include <asm/ptrace.h>
27#include <asm/iseries/hv_types.h>
28#include <asm/iseries/hv_call_event.h>
29
30/*
31 * HvLpEvent is the structure for Lp Event messages passed between
32 * partitions through PLIC.
33 */
34
35struct HvEventFlags {
36 u8 xValid:1; /* Indicates a valid request x00-x00 */
37 u8 xRsvd1:4; /* Reserved ... */
38 u8 xAckType:1; /* Immediate or deferred ... */
39 u8 xAckInd:1; /* Indicates if ACK required ... */
40 u8 xFunction:1; /* Interrupt or Acknowledge ... */
41};
42
43
44struct HvLpEvent {
45 struct HvEventFlags xFlags; /* Event flags x00-x00 */
46 u8 xType; /* Type of message x01-x01 */
47 u16 xSubtype; /* Subtype for event x02-x03 */
48 u8 xSourceLp; /* Source LP x04-x04 */
49 u8 xTargetLp; /* Target LP x05-x05 */
50 u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */
51 u8 xRc; /* RC for Ack flows x07-x07 */
52 u16 xSourceInstanceId; /* Source sides instance id x08-x09 */
53 u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */
54 union {
55 u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */
56 u16 xSubtypeDataShort[2]; /* Data as 2 shorts */
57 u8 xSubtypeDataChar[4]; /* Data as 4 chars */
58 } x;
59
60 u64 xCorrelationToken; /* Unique value for source/type x10-x17 */
61};
62
63typedef void (*LpEventHandler)(struct HvLpEvent *, struct pt_regs *);
64
65/* Register a handler for an event type - returns 0 on success */
66extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType,
67 LpEventHandler hdlr);
68
69/*
70 * Unregister a handler for an event type
71 *
72 * This call will sleep until the handler being removed is guaranteed to
73 * be no longer executing on any CPU. Do not call with locks held.
74 *
75 * returns 0 on success
76 * Unregister will fail if there are any paths open for the type
77 */
78extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType);
79
80/*
81 * Open an Lp Event Path for an event type
82 * returns 0 on success
83 * openPath will fail if there is no handler registered for the event type.
84 * The lpIndex specified is the partition index for the target partition
85 * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero)
86 */
87extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
88
89/*
90 * Close an Lp Event Path for a type and partition
91 * returns 0 on sucess
92 */
93extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
94
95#define HvLpEvent_Type_Hypervisor 0
96#define HvLpEvent_Type_MachineFac 1
97#define HvLpEvent_Type_SessionMgr 2
98#define HvLpEvent_Type_SpdIo 3
99#define HvLpEvent_Type_VirtualBus 4
100#define HvLpEvent_Type_PciIo 5
101#define HvLpEvent_Type_RioIo 6
102#define HvLpEvent_Type_VirtualLan 7
103#define HvLpEvent_Type_VirtualIo 8
104#define HvLpEvent_Type_NumTypes 9
105
106#define HvLpEvent_Rc_Good 0
107#define HvLpEvent_Rc_BufferNotAvailable 1
108#define HvLpEvent_Rc_Cancelled 2
109#define HvLpEvent_Rc_GenericError 3
110#define HvLpEvent_Rc_InvalidAddress 4
111#define HvLpEvent_Rc_InvalidPartition 5
112#define HvLpEvent_Rc_InvalidSize 6
113#define HvLpEvent_Rc_InvalidSubtype 7
114#define HvLpEvent_Rc_InvalidSubtypeData 8
115#define HvLpEvent_Rc_InvalidType 9
116#define HvLpEvent_Rc_PartitionDead 10
117#define HvLpEvent_Rc_PathClosed 11
118#define HvLpEvent_Rc_SubtypeError 12
119
120#define HvLpEvent_Function_Ack 0
121#define HvLpEvent_Function_Int 1
122
123#define HvLpEvent_AckInd_NoAck 0
124#define HvLpEvent_AckInd_DoAck 1
125
126#define HvLpEvent_AckType_ImmediateAck 0
127#define HvLpEvent_AckType_DeferredAck 1
128
129#define HvLpDma_Direction_LocalToRemote 0
130#define HvLpDma_Direction_RemoteToLocal 1
131
132#define HvLpDma_AddressType_TceIndex 0
133#define HvLpDma_AddressType_RealAddress 1
134
135#define HvLpDma_Rc_Good 0
136#define HvLpDma_Rc_Error 1
137#define HvLpDma_Rc_PartitionDead 2
138#define HvLpDma_Rc_PathClosed 3
139#define HvLpDma_Rc_InvalidAddress 4
140#define HvLpDma_Rc_InvalidLength 5
141
142#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/include/asm-powerpc/iseries/hv_types.h b/include/asm-powerpc/iseries/hv_types.h
new file mode 100644
index 000000000000..c38f7e3d01dc
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_types.h
@@ -0,0 +1,113 @@
1/*
2 * HvTypes.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H
20#define _ASM_POWERPC_ISERIES_HV_TYPES_H
21
22/*
23 * General typedefs for the hypervisor.
24 */
25
26#include <asm/types.h>
27
28typedef u8 HvLpIndex;
29typedef u16 HvLpInstanceId;
30typedef u64 HvLpTOD;
31typedef u64 HvLpSystemSerialNum;
32typedef u8 HvLpDeviceSerialNum[12];
33typedef u16 HvLpSanHwSet;
34typedef u16 HvLpBus;
35typedef u16 HvLpBoard;
36typedef u16 HvLpCard;
37typedef u8 HvLpDeviceType[4];
38typedef u8 HvLpDeviceModel[3];
39typedef u64 HvIoToken;
40typedef u8 HvLpName[8];
41typedef u32 HvIoId;
42typedef u64 HvRealMemoryIndex;
43typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */
44typedef u16 HvLpVrmIndex;
45typedef u32 HvXmGenerationId;
46typedef u8 HvLpBusPool;
47typedef u8 HvLpSharedPoolIndex;
48typedef u16 HvLpSharedProcUnitsX100;
49typedef u8 HvLpVirtualLanIndex;
50typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */
51typedef u16 HvBusNumber; /* Hypervisor Bus Number */
52typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */
53typedef u8 HvAgentId; /* Hypervisor DevFn */
54
55
56#define HVMAXARCHITECTEDLPS 32
57#define HVMAXARCHITECTEDVIRTUALLANS 16
58#define HVMAXARCHITECTEDVIRTUALDISKS 32
59#define HVMAXARCHITECTEDVIRTUALCDROMS 8
60#define HVMAXARCHITECTEDVIRTUALTAPES 8
61#define HVCHUNKSIZE (256 * 1024)
62#define HVPAGESIZE (4 * 1024)
63#define HVLPMINMEGSPRIMARY 256
64#define HVLPMINMEGSSECONDARY 64
65#define HVCHUNKSPERMEG 4
66#define HVPAGESPERMEG 256
67#define HVPAGESPERCHUNK 64
68
69#define HvLpIndexInvalid ((HvLpIndex)0xff)
70
71/*
72 * Enums for the sub-components under PLIC
73 * Used in HvCall and HvPrimaryCall
74 */
75enum {
76 HvCallCompId = 0,
77 HvCallCpuCtlsCompId = 1,
78 HvCallCfgCompId = 2,
79 HvCallEventCompId = 3,
80 HvCallHptCompId = 4,
81 HvCallPciCompId = 5,
82 HvCallSlmCompId = 6,
83 HvCallSmCompId = 7,
84 HvCallSpdCompId = 8,
85 HvCallXmCompId = 9,
86 HvCallRioCompId = 10,
87 HvCallRsvd3CompId = 11,
88 HvCallRsvd2CompId = 12,
89 HvCallRsvd1CompId = 13,
90 HvCallMaxCompId = 14,
91 HvPrimaryCallCompId = 0,
92 HvPrimaryCallCfgCompId = 1,
93 HvPrimaryCallPciCompId = 2,
94 HvPrimaryCallSmCompId = 3,
95 HvPrimaryCallSpdCompId = 4,
96 HvPrimaryCallXmCompId = 5,
97 HvPrimaryCallRioCompId = 6,
98 HvPrimaryCallRsvd7CompId = 7,
99 HvPrimaryCallRsvd6CompId = 8,
100 HvPrimaryCallRsvd5CompId = 9,
101 HvPrimaryCallRsvd4CompId = 10,
102 HvPrimaryCallRsvd3CompId = 11,
103 HvPrimaryCallRsvd2CompId = 12,
104 HvPrimaryCallRsvd1CompId = 13,
105 HvPrimaryCallMaxCompId = HvCallMaxCompId
106};
107
108struct HvLpBufferList {
109 u64 addr;
110 u64 len;
111};
112
113#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */
diff --git a/include/asm-powerpc/iseries/iseries_io.h b/include/asm-powerpc/iseries/iseries_io.h
new file mode 100644
index 000000000000..56b2113ff0f5
--- /dev/null
+++ b/include/asm-powerpc/iseries/iseries_io.h
@@ -0,0 +1,49 @@
1#ifndef _ASM_POWERPC_ISERIES_ISERIES_IO_H
2#define _ASM_POWERPC_ISERIES_ISERIES_IO_H
3
4#include <linux/config.h>
5
6#ifdef CONFIG_PPC_ISERIES
7#include <linux/types.h>
8/*
9 * File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000.
10 *
11 * Remaps the io.h for the iSeries Io
12 * Copyright (C) 2000 Allan H Trautman, IBM Corporation
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the:
26 * Free Software Foundation, Inc.,
27 * 59 Temple Place, Suite 330,
28 * Boston, MA 02111-1307 USA
29 *
30 * Change Activity:
31 * Created December 28, 2000
32 * End Change Activity
33 */
34
35extern u8 iSeries_Read_Byte(const volatile void __iomem * IoAddress);
36extern u16 iSeries_Read_Word(const volatile void __iomem * IoAddress);
37extern u32 iSeries_Read_Long(const volatile void __iomem * IoAddress);
38extern void iSeries_Write_Byte(u8 IoData, volatile void __iomem * IoAddress);
39extern void iSeries_Write_Word(u16 IoData, volatile void __iomem * IoAddress);
40extern void iSeries_Write_Long(u32 IoData, volatile void __iomem * IoAddress);
41
42extern void iSeries_memset_io(volatile void __iomem *dest, char x, size_t n);
43extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source,
44 size_t n);
45extern void iSeries_memcpy_fromio(void *dest,
46 const volatile void __iomem *source, size_t n);
47
48#endif /* CONFIG_PPC_ISERIES */
49#endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */
diff --git a/include/asm-powerpc/iseries/it_exp_vpd_panel.h b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
new file mode 100644
index 000000000000..66a17a230c52
--- /dev/null
+++ b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
@@ -0,0 +1,52 @@
1/*
2 * ItExtVpdPanel.h
3 * Copyright (C) 2002 Dave Boutcher IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H
20#define _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H
21
22/*
23 * This struct maps the panel information
24 *
25 * Warning:
26 * This data must match the architecture for the panel information
27 */
28
29#include <asm/types.h>
30
31struct ItExtVpdPanel {
32 /* Definition of the Extended Vpd On Panel Data Area */
33 char systemSerial[8];
34 char mfgID[4];
35 char reserved1[24];
36 char machineType[4];
37 char systemID[6];
38 char somUniqueCnt[4];
39 char serialNumberCount;
40 char reserved2[7];
41 u16 bbu3;
42 u16 bbu2;
43 u16 bbu1;
44 char xLocationLabel[8];
45 u8 xRsvd1[6];
46 u16 xFrameId;
47 u8 xRsvd2[48];
48};
49
50extern struct ItExtVpdPanel xItExtVpdPanel;
51
52#endif /* _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H */
diff --git a/include/asm-powerpc/iseries/it_lp_naca.h b/include/asm-powerpc/iseries/it_lp_naca.h
new file mode 100644
index 000000000000..c3ef1de45d82
--- /dev/null
+++ b/include/asm-powerpc/iseries/it_lp_naca.h
@@ -0,0 +1,80 @@
1/*
2 * ItLpNaca.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef _ASM_POWERPC_ISERIES_IT_LP_NACA_H
20#define _ASM_POWERPC_ISERIES_IT_LP_NACA_H
21
22#include <linux/types.h>
23
24/*
25 * This control block contains the data that is shared between the
26 * hypervisor (PLIC) and the OS.
27 */
28
29struct ItLpNaca {
30// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
31 u32 xDesc; // Eye catcher x00-x03
32 u16 xSize; // Size of this class x04-x05
33 u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07
34 u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08
35 u8 xPrimaryLpIndex; // LP Index of Primary x09-x09
36 u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A
37 u8 xLpIndex; // LP Index x0B-x0B
38 u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
39 u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
40 u8 xPirEnvironMode:8; // Piranha or hardware x10-x10
41 u8 xPirConsoleMode:8; // Piranha console indicator x11-x11
42 u8 xPirDasdMode:8; // Piranha dasd indicator x12-x12
43 u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
44 u8 xLparInstalled:1; // Is LPAR installed on system x18-x1F
45 u8 xSysPartitioned:1; // Is the system partitioned ...
46 u8 xHwSyncedTBs:1; // Hardware synced TBs ...
47 u8 xIntProcUtilHmt:1; // Utilize HMT for interrupts ...
48 u8 xRsvd1_1:4; // Reserved ...
49 u8 xSpVpdFormat:8; // VPD areas are in CSP format ...
50 u8 xIntProcRatio:8; // Ratio of int procs to procs ...
51 u8 xRsvd1_2[5]; // Reserved ...
52 u16 xRsvd1_3; // Reserved x20-x21
53 u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
54 u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25
55 u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27
56 u64 xLoadAreaAddr; // ER address of load area x28-x2F
57 u32 xLoadAreaChunks; // Chunks for the load area x30-x33
58 u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37
59 // doing an ASR switch on PASE
60 // system call.
61 u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f
62 u8 xRsvd1_4[64]; // x40-x7F
63
64// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
65 u8 xRsvd2_0[128]; // Reserved x00-x7F
66
67// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators
68// NB: Padding required to keep xInterrruptHdlr at x300 which is required
69// for v4r4 PLIC.
70 u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F
71 u8 xRsvd3_0[384]; // Reserved 180-2FF
72
73// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt
74// handlers
75 u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF
76};
77
78extern struct ItLpNaca itLpNaca;
79
80#endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */
diff --git a/include/asm-powerpc/iseries/it_lp_queue.h b/include/asm-powerpc/iseries/it_lp_queue.h
new file mode 100644
index 000000000000..a60d03afbf95
--- /dev/null
+++ b/include/asm-powerpc/iseries/it_lp_queue.h
@@ -0,0 +1,81 @@
1/*
2 * ItLpQueue.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
20#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
21
22/*
23 * This control block defines the simple LP queue structure that is
24 * shared between the hypervisor (PLIC) and the OS in order to send
25 * events to an LP.
26 */
27
28#include <asm/types.h>
29#include <asm/ptrace.h>
30
31struct HvLpEvent;
32
33#define ITMaxLpQueues 8
34
35#define NotUsed 0 // Queue will not be used by PLIC
36#define DedicatedIo 1 // Queue dedicated to IO processor specified
37#define DedicatedLp 2 // Queue dedicated to LP specified
38#define Shared 3 // Queue shared for both IO and LP
39
40#define LpEventStackSize 4096
41#define LpEventMaxSize 256
42#define LpEventAlign 64
43
44struct hvlpevent_queue {
45/*
46 * The xSlicCurEventPtr is the pointer to the next event stack entry
47 * that will become valid. The OS must peek at this entry to determine
48 * if it is valid. PLIC will set the valid indicator as the very last
49 * store into that entry.
50 *
51 * When the OS has completed processing of the event then it will mark
52 * the event as invalid so that PLIC knows it can store into that event
53 * location again.
54 *
55 * If the event stack fills and there are overflow events, then PLIC
56 * will set the xPlicOverflowIntPending flag in which case the OS will
57 * have to fetch the additional LP events once they have drained the
58 * event stack.
59 *
60 * The first 16-bytes are known by both the OS and PLIC. The remainder
61 * of the cache line is for use by the OS.
62 */
63 u8 xPlicOverflowIntPending;// 0x00 Overflow events are pending
64 u8 xPlicStatus; // 0x01 DedicatedIo or DedicatedLp or NotUsed
65 u16 xSlicLogicalProcIndex; // 0x02 Logical Proc Index for correlation
66 u8 xPlicRsvd[12]; // 0x04
67 char *xSlicCurEventPtr; // 0x10
68 char *xSlicLastValidEventPtr; // 0x18
69 char *xSlicEventStackPtr; // 0x20
70 u8 xIndex; // 0x28 unique sequential index.
71 u8 xSlicRsvd[3]; // 0x29-2b
72 spinlock_t lock;
73};
74
75extern struct hvlpevent_queue hvlpevent_queue;
76
77extern int hvlpevent_is_pending(void);
78extern void process_hvlpevents(struct pt_regs *);
79extern void setup_hvlpevent_queue(void);
80
81#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */
diff --git a/include/asm-powerpc/iseries/it_lp_reg_save.h b/include/asm-powerpc/iseries/it_lp_reg_save.h
new file mode 100644
index 000000000000..288044b702de
--- /dev/null
+++ b/include/asm-powerpc/iseries/it_lp_reg_save.h
@@ -0,0 +1,84 @@
1/*
2 * ItLpRegSave.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H
20#define _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H
21
22/*
23 * This control block contains the data that is shared between PLIC
24 * and the OS
25 */
26
27struct ItLpRegSave {
28 u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
29 u16 xSize; // Size of this class 004-005
30 u8 xInUse; // Area is live 006-007
31 u8 xRsvd1[9]; // Reserved 007-00F
32
33 u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F
34 u32 xCTRL; // Control Register 170-173
35 u32 xDEC; // Decrementer 174-177
36 u32 xFPSCR; // FP Status and Control Reg 178-17B
37 u32 xPVR; // Processor Version Number 17C-17F
38
39 u64 xMMCR0; // Monitor Mode Control Reg 0 180-187
40 u32 xPMC1; // Perf Monitor Counter 1 188-18B
41 u32 xPMC2; // Perf Monitor Counter 2 18C-18F
42 u32 xPMC3; // Perf Monitor Counter 3 190-193
43 u32 xPMC4; // Perf Monitor Counter 4 194-197
44 u32 xPIR; // Processor ID Reg 198-19B
45
46 u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F
47 u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3
48 u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7
49 u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB
50 u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF
51 u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3
52 u32 xTSC; // Thread Switch Control 1B4-1B7
53 u32 xTST; // Thread Switch Timeout 1B8-1BB
54 u32 xRsvd; // Reserved 1BC-1BF
55
56 u64 xACCR; // Address Compare Control Reg 1C0-1C7
57 u64 xIMR; // Instruction Match Register 1C8-1CF
58 u64 xSDR1; // Storage Description Reg 1 1D0-1D7
59 u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF
60 u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7
61 u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF
62 u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7
63 u64 xTB; // Time Base Register 1F8-1FF
64
65 u64 xFPR[32]; // Floating Point Registers 200-2FF
66
67 u64 xMSR; // Machine State Register 300-307
68 u64 xNIA; // Next Instruction Address 308-30F
69
70 u64 xDABR; // Data Address Breakpoint Reg 310-317
71 u64 xIABR; // Inst Address Breakpoint Reg 318-31F
72
73 u64 xHID0; // HW Implementation Dependent0 320-327
74
75 u64 xHID4; // HW Implementation Dependent4 328-32F
76 u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337
77 u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F
78 u64 xSDAR; // Sample Data Address Register 340-347
79 u64 xSIAR; // Sample Inst Address Register 348-34F
80
81 u8 xRsvd3[176]; // Reserved 350-3FF
82};
83
84#endif /* _ITLPREGSAVE_H */
diff --git a/include/asm-powerpc/iseries/lpar_map.h b/include/asm-powerpc/iseries/lpar_map.h
new file mode 100644
index 000000000000..84fc321615bf
--- /dev/null
+++ b/include/asm-powerpc/iseries/lpar_map.h
@@ -0,0 +1,83 @@
1/*
2 * LparMap.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H
20#define _ASM_POWERPC_ISERIES_LPAR_MAP_H
21
22#ifndef __ASSEMBLY__
23
24#include <asm/types.h>
25
26/*
27 * The iSeries hypervisor will set up mapping for one or more
28 * ESID/VSID pairs (in SLB/segment registers) and will set up
29 * mappings of one or more ranges of pages to VAs.
30 * We will have the hypervisor set up the ESID->VSID mapping
31 * for the four kernel segments (C-F). With shared processors,
32 * the hypervisor will clear all segment registers and reload
33 * these four whenever the processor is switched from one
34 * partition to another.
35 */
36
37/* The Vsid and Esid identified below will be used by the hypervisor
38 * to set up a memory mapping for part of the load area before giving
39 * control to the Linux kernel. The load area is 64 MB, but this must
40 * not attempt to map the whole load area. The Hashed Page Table may
41 * need to be located within the load area (if the total partition size
42 * is 64 MB), but cannot be mapped. Typically, this should specify
43 * to map half (32 MB) of the load area.
44 *
45 * The hypervisor will set up page table entries for the number of
46 * pages specified.
47 *
48 * In 32-bit mode, the hypervisor will load all four of the
49 * segment registers (identified by the low-order four bits of the
50 * Esid field. In 64-bit mode, the hypervisor will load one SLB
51 * entry to map the Esid to the Vsid.
52*/
53
54#define HvEsidsToMap 2
55#define HvRangesToMap 1
56
57/* Hypervisor initially maps 32MB of the load area */
58#define HvPagesToMap 8192
59
60struct LparMap {
61 u64 xNumberEsids; // Number of ESID/VSID pairs
62 u64 xNumberRanges; // Number of VA ranges to map
63 u64 xSegmentTableOffs; // Page number within load area of seg table
64 u64 xRsvd[5];
65 struct {
66 u64 xKernelEsid; // Esid used to map kernel load
67 u64 xKernelVsid; // Vsid used to map kernel load
68 } xEsids[HvEsidsToMap];
69 struct {
70 u64 xPages; // Number of pages to be mapped
71 u64 xOffset; // Offset from start of load area
72 u64 xVPN; // Virtual Page Number
73 } xRanges[HvRangesToMap];
74};
75
76extern const struct LparMap xLparMap;
77
78#endif /* __ASSEMBLY__ */
79
80/* the fixed address where the LparMap exists */
81#define LPARMAP_PHYS 0x7000
82
83#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */
diff --git a/include/asm-powerpc/iseries/mf.h b/include/asm-powerpc/iseries/mf.h
new file mode 100644
index 000000000000..e7bd57a03fb1
--- /dev/null
+++ b/include/asm-powerpc/iseries/mf.h
@@ -0,0 +1,57 @@
1/*
2 * mf.h
3 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
4 * Copyright (C) 2004 Stephen Rothwell IBM Corporation
5 *
6 * This modules exists as an interface between a Linux secondary partition
7 * running on an iSeries and the primary partition's Virtual Service
8 * Processor (VSP) object. The VSP has final authority over powering on/off
9 * all partitions in the iSeries. It also provides miscellaneous low-level
10 * machine facility type operations.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26#ifndef _ASM_POWERPC_ISERIES_MF_H
27#define _ASM_POWERPC_ISERIES_MF_H
28
29#include <linux/types.h>
30
31#include <asm/iseries/hv_types.h>
32#include <asm/iseries/hv_call_event.h>
33
34struct rtc_time;
35
36typedef void (*MFCompleteHandler)(void *clientToken, int returnCode);
37
38extern void mf_allocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
39 unsigned size, unsigned amount, MFCompleteHandler hdlr,
40 void *userToken);
41extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
42 unsigned count, MFCompleteHandler hdlr, void *userToken);
43
44extern void mf_power_off(void);
45extern void mf_reboot(void);
46
47extern void mf_display_src(u32 word);
48extern void mf_display_progress(u16 value);
49extern void mf_clear_src(void);
50
51extern void mf_init(void);
52
53extern int mf_get_rtc(struct rtc_time *tm);
54extern int mf_get_boot_rtc(struct rtc_time *tm);
55extern int mf_set_rtc(struct rtc_time *tm);
56
57#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/include/asm-powerpc/iseries/vio.h b/include/asm-powerpc/iseries/vio.h
new file mode 100644
index 000000000000..7e3a469420dd
--- /dev/null
+++ b/include/asm-powerpc/iseries/vio.h
@@ -0,0 +1,130 @@
1/* -*- linux-c -*-
2 * drivers/char/vio.h
3 *
4 * iSeries Virtual I/O Message Path header
5 *
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
9 *
10 * (C) Copyright 2000 IBM Corporation
11 *
12 * This header file is used by the iSeries virtual I/O device
13 * drivers. It defines the interfaces to the common functions
14 * (implemented in drivers/char/viopath.h) as well as defining
15 * common functions and structures. Currently (at the time I
16 * wrote this comment) the iSeries virtual I/O device drivers
17 * that use this are
18 * drivers/block/viodasd.c
19 * drivers/char/viocons.c
20 * drivers/char/viotape.c
21 * drivers/cdrom/viocd.c
22 *
23 * The iSeries virtual ethernet support (veth.c) uses a whole
24 * different set of functions.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License as
28 * published by the Free Software Foundation; either version 2 of the
29 * License, or (at your option) anyu later version.
30 *
31 * This program is distributed in the hope that it will be useful, but
32 * WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
34 * General Public License for more details.
35 *
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software Foundation,
38 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 *
40 */
41#ifndef _ASM_POWERPC_ISERIES_VIO_H
42#define _ASM_POWERPC_ISERIES_VIO_H
43
44#include <asm/iseries/hv_types.h>
45#include <asm/iseries/hv_lp_event.h>
46
47/*
48 * iSeries virtual I/O events use the subtype field in
49 * HvLpEvent to figure out what kind of vio event is coming
50 * in. We use a table to route these, and this defines
51 * the maximum number of distinct subtypes
52 */
53#define VIO_MAX_SUBTYPES 8
54
55/*
56 * Each subtype can register a handler to process their events.
57 * The handler must have this interface.
58 */
59typedef void (vio_event_handler_t) (struct HvLpEvent * event);
60
61extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq);
62extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq);
63extern int vio_setHandler(int subtype, vio_event_handler_t * beh);
64extern int vio_clearHandler(int subtype);
65extern int viopath_isactive(HvLpIndex lp);
66extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp);
67extern HvLpInstanceId viopath_targetinst(HvLpIndex lp);
68extern void vio_set_hostlp(void);
69extern void *vio_get_event_buffer(int subtype);
70extern void vio_free_event_buffer(int subtype, void *buffer);
71
72extern HvLpIndex viopath_hostLp;
73extern HvLpIndex viopath_ourLp;
74
75#define VIOCHAR_MAX_DATA 200
76
77#define VIOMAJOR_SUBTYPE_MASK 0xff00
78#define VIOMINOR_SUBTYPE_MASK 0x00ff
79#define VIOMAJOR_SUBTYPE_SHIFT 8
80
81#define VIOVERSION 0x0101
82
83/*
84 * This is the general structure for VIO errors; each module should have
85 * a table of them, and each table should be terminated by an entry of
86 * { 0, 0, NULL }. Then, to find a specific error message, a module
87 * should pass its local table and the return code.
88 */
89struct vio_error_entry {
90 u16 rc;
91 int errno;
92 const char *msg;
93};
94extern const struct vio_error_entry *vio_lookup_rc(
95 const struct vio_error_entry *local_table, u16 rc);
96
97enum viosubtypes {
98 viomajorsubtype_monitor = 0x0100,
99 viomajorsubtype_blockio = 0x0200,
100 viomajorsubtype_chario = 0x0300,
101 viomajorsubtype_config = 0x0400,
102 viomajorsubtype_cdio = 0x0500,
103 viomajorsubtype_tape = 0x0600,
104 viomajorsubtype_scsi = 0x0700
105};
106
107enum vioconfigsubtype {
108 vioconfigget = 0x0001,
109};
110
111enum viorc {
112 viorc_good = 0x0000,
113 viorc_noConnection = 0x0001,
114 viorc_noReceiver = 0x0002,
115 viorc_noBufferAvailable = 0x0003,
116 viorc_invalidMessageType = 0x0004,
117 viorc_invalidRange = 0x0201,
118 viorc_invalidToken = 0x0202,
119 viorc_DMAError = 0x0203,
120 viorc_useError = 0x0204,
121 viorc_releaseError = 0x0205,
122 viorc_invalidDisk = 0x0206,
123 viorc_openRejected = 0x0301
124};
125
126struct device;
127
128extern struct device *iSeries_vio_dev;
129
130#endif /* _ASM_POWERPC_ISERIES_VIO_H */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
new file mode 100644
index 000000000000..062ab9ba68eb
--- /dev/null
+++ b/include/asm-powerpc/kexec.h
@@ -0,0 +1,49 @@
1#ifndef _ASM_POWERPC_KEXEC_H
2#define _ASM_POWERPC_KEXEC_H
3
4/*
5 * Maximum page that is mapped directly into kernel memory.
6 * XXX: Since we copy virt we can use any page we allocate
7 */
8#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
9
10/*
11 * Maximum address we can reach in physical address mode.
12 * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR.
13 */
14#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
15
16/* Maximum address we can use for the control code buffer */
17#ifdef __powerpc64__
18#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
19#else
20/* TASK_SIZE, probably left over from use_mm ?? */
21#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
22#endif
23
24#define KEXEC_CONTROL_CODE_SIZE 4096
25
26/* The native architecture */
27#ifdef __powerpc64__
28#define KEXEC_ARCH KEXEC_ARCH_PPC64
29#else
30#define KEXEC_ARCH KEXEC_ARCH_PPC
31#endif
32
33#ifndef __ASSEMBLY__
34
35#define MAX_NOTE_BYTES 1024
36typedef u32 note_buf_t[MAX_NOTE_BYTES / sizeof(u32)];
37
38extern note_buf_t crash_notes[];
39
40#ifdef __powerpc64__
41extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
42 master to copy new code to 0 */
43#else
44struct kimage;
45extern void machine_kexec_simple(struct kimage *image);
46#endif
47
48#endif /* ! __ASSEMBLY__ */
49#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 451b345cfc78..629ca964b974 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -80,6 +80,7 @@ struct machdep_calls {
80 void (*iommu_dev_setup)(struct pci_dev *dev); 80 void (*iommu_dev_setup)(struct pci_dev *dev);
81 void (*iommu_bus_setup)(struct pci_bus *bus); 81 void (*iommu_bus_setup)(struct pci_bus *bus);
82 void (*irq_bus_setup)(struct pci_bus *bus); 82 void (*irq_bus_setup)(struct pci_bus *bus);
83 int (*set_dabr)(unsigned long dabr);
83#endif 84#endif
84 85
85 int (*probe)(int platform); 86 int (*probe)(int platform);
diff --git a/include/asm-powerpc/numnodes.h b/include/asm-powerpc/numnodes.h
new file mode 100644
index 000000000000..795533aca095
--- /dev/null
+++ b/include/asm-powerpc/numnodes.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_POWERPC_MAX_NUMNODES_H
2#define _ASM_POWERPC_MAX_NUMNODES_H
3
4/* Max 16 Nodes */
5#define NODES_SHIFT 4
6
7#endif /* _ASM_POWERPC_MAX_NUMNODES_H */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index f99f2af82ca5..c534ca41224b 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -506,6 +506,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
506#else 506#else
507 #define __ASM_CONST(x) x##UL 507 #define __ASM_CONST(x) x##UL
508 #define ASM_CONST(x) __ASM_CONST(x) 508 #define ASM_CONST(x) __ASM_CONST(x)
509
510#ifdef CONFIG_PPC64
511#define DATAL ".llong"
512#else
513#define DATAL ".long"
514#endif
515
509#endif /* __ASSEMBLY__ */ 516#endif /* __ASSEMBLY__ */
510 517
511#endif /* _ASM_POWERPC_PPC_ASM_H */ 518#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index eee954a001fd..1dc4bf7b52b3 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -70,7 +70,7 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
70#define PLATFORM_LPAR 0x0001 70#define PLATFORM_LPAR 0x0001
71#define PLATFORM_POWERMAC 0x0400 71#define PLATFORM_POWERMAC 0x0400
72#define PLATFORM_MAPLE 0x0500 72#define PLATFORM_MAPLE 0x0500
73#define PLATFORM_BPA 0x1000 73#define PLATFORM_CELL 0x1000
74 74
75/* Compatibility with drivers coming from PPC32 world */ 75/* Compatibility with drivers coming from PPC32 world */
76#define _machine (systemcfg->platform) 76#define _machine (systemcfg->platform)
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
new file mode 100644
index 000000000000..1f7ecdb0b6ce
--- /dev/null
+++ b/include/asm-powerpc/ptrace.h
@@ -0,0 +1,248 @@
1#ifndef _ASM_POWERPC_PTRACE_H
2#define _ASM_POWERPC_PTRACE_H
3
4/*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This struct defines the way the registers are stored on the
8 * kernel stack during a system call or other kernel entry.
9 *
10 * this should only contain volatile regs
11 * since we can keep non-volatile in the thread_struct
12 * should set this up when only volatiles are saved
13 * by intr code.
14 *
15 * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
16 * that the overall structure is a multiple of 16 bytes in length.
17 *
18 * Note that the offsets of the fields in this struct correspond with
19 * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
20 *
21 * This program is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU General Public License
23 * as published by the Free Software Foundation; either version
24 * 2 of the License, or (at your option) any later version.
25 */
26
27#ifndef __ASSEMBLY__
28
29struct pt_regs {
30 unsigned long gpr[32];
31 unsigned long nip;
32 unsigned long msr;
33 unsigned long orig_gpr3; /* Used for restarting system calls */
34 unsigned long ctr;
35 unsigned long link;
36 unsigned long xer;
37 unsigned long ccr;
38#ifdef __powerpc64__
39 unsigned long softe; /* Soft enabled/disabled */
40#else
41 unsigned long mq; /* 601 only (not used at present) */
42 /* Used on APUS to hold IPL value. */
43#endif
44 unsigned long trap; /* Reason for being here */
45 /* N.B. for critical exceptions on 4xx, the dar and dsisr
46 fields are overloaded to hold srr0 and srr1. */
47 unsigned long dar; /* Fault registers */
48 unsigned long dsisr; /* on 4xx/Book-E used for ESR */
49 unsigned long result; /* Result of a system call */
50};
51
52#endif /* __ASSEMBLY__ */
53
54#ifdef __KERNEL__
55
56#ifdef __powerpc64__
57
58#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
59
60/* Size of dummy stack frame allocated when calling signal handler. */
61#define __SIGNAL_FRAMESIZE 128
62#define __SIGNAL_FRAMESIZE32 64
63
64#else /* __powerpc64__ */
65
66#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
67
68/* Size of stack frame allocated when calling signal handler. */
69#define __SIGNAL_FRAMESIZE 64
70
71#endif /* __powerpc64__ */
72
73#ifndef __ASSEMBLY__
74
75#define instruction_pointer(regs) ((regs)->nip)
76#ifdef CONFIG_SMP
77extern unsigned long profile_pc(struct pt_regs *regs);
78#else
79#define profile_pc(regs) instruction_pointer(regs)
80#endif
81
82#ifdef __powerpc64__
83#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
84#else
85#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
86#endif
87
88#define force_successful_syscall_return() \
89 do { \
90 current_thread_info()->syscall_noerror = 1; \
91 } while(0)
92
93/*
94 * We use the least-significant bit of the trap field to indicate
95 * whether we have saved the full set of registers, or only a
96 * partial set. A 1 there means the partial set.
97 * On 4xx we use the next bit to indicate whether the exception
98 * is a critical exception (1 means it is).
99 */
100#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
101#ifndef __powerpc64__
102#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0)
103#endif /* ! __powerpc64__ */
104#define TRAP(regs) ((regs)->trap & ~0xF)
105#ifdef __powerpc64__
106#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
107#else
108#define CHECK_FULL_REGS(regs) \
109do { \
110 if ((regs)->trap & 1) \
111 printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
112} while (0)
113#endif /* __powerpc64__ */
114
115#endif /* __ASSEMBLY__ */
116
117#endif /* __KERNEL__ */
118
119/*
120 * Offsets used by 'ptrace' system call interface.
121 * These can't be changed without breaking binary compatibility
122 * with MkLinux, etc.
123 */
124#define PT_R0 0
125#define PT_R1 1
126#define PT_R2 2
127#define PT_R3 3
128#define PT_R4 4
129#define PT_R5 5
130#define PT_R6 6
131#define PT_R7 7
132#define PT_R8 8
133#define PT_R9 9
134#define PT_R10 10
135#define PT_R11 11
136#define PT_R12 12
137#define PT_R13 13
138#define PT_R14 14
139#define PT_R15 15
140#define PT_R16 16
141#define PT_R17 17
142#define PT_R18 18
143#define PT_R19 19
144#define PT_R20 20
145#define PT_R21 21
146#define PT_R22 22
147#define PT_R23 23
148#define PT_R24 24
149#define PT_R25 25
150#define PT_R26 26
151#define PT_R27 27
152#define PT_R28 28
153#define PT_R29 29
154#define PT_R30 30
155#define PT_R31 31
156
157#define PT_NIP 32
158#define PT_MSR 33
159#ifdef __KERNEL__
160#define PT_ORIG_R3 34
161#endif
162#define PT_CTR 35
163#define PT_LNK 36
164#define PT_XER 37
165#define PT_CCR 38
166#ifndef __powerpc64__
167#define PT_MQ 39
168#else
169#define PT_SOFTE 39
170#define PT_TRAP 40
171#define PT_DAR 41
172#define PT_DSISR 42
173#define PT_RESULT 43
174#endif
175
176#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
177
178#ifndef __powerpc64__
179
180#define PT_FPR31 (PT_FPR0 + 2*31)
181#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
182
183#else /* __powerpc64__ */
184
185#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */
186
187#ifdef __KERNEL__
188#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
189#endif
190
191#define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */
192#define PT_VSCR (PT_VR0 + 32*2 + 1)
193#define PT_VRSAVE (PT_VR0 + 33*2)
194
195#ifdef __KERNEL__
196#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
197#define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
198#define PT_VRSAVE_32 (PT_VR0 + 33*4)
199#endif
200
201#endif /* __powerpc64__ */
202
203/*
204 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
205 * The transfer totals 34 quadword. Quadwords 0-31 contain the
206 * corresponding vector registers. Quadword 32 contains the vscr as the
207 * last word (offset 12) within that quadword. Quadword 33 contains the
208 * vrsave as the first word (offset 0) within the quadword.
209 *
210 * This definition of the VMX state is compatible with the current PPC32
211 * ptrace interface. This allows signal handling and ptrace to use the same
212 * structures. This also simplifies the implementation of a bi-arch
213 * (combined (32- and 64-bit) gdb.
214 */
215#define PTRACE_GETVRREGS 18
216#define PTRACE_SETVRREGS 19
217
218#ifndef __powerpc64__
219/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
220 * spefscr, in one go */
221#define PTRACE_GETEVRREGS 20
222#define PTRACE_SETEVRREGS 21
223#endif /* __powerpc64__ */
224
225/*
226 * Get or set a debug register. The first 16 are DABR registers and the
227 * second 16 are IABR registers.
228 */
229#define PTRACE_GET_DEBUGREG 25
230#define PTRACE_SET_DEBUGREG 26
231
232#ifdef __powerpc64__
233/* Additional PTRACE requests implemented on PowerPC. */
234#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
235#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
236#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */
237#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */
238
239/* Calls to trace a 64bit program from a 32bit program */
240#define PPC_PTRACE_PEEKTEXT_3264 0x95
241#define PPC_PTRACE_PEEKDATA_3264 0x94
242#define PPC_PTRACE_POKETEXT_3264 0x93
243#define PPC_PTRACE_POKEDATA_3264 0x92
244#define PPC_PTRACE_PEEKUSR_3264 0x91
245#define PPC_PTRACE_POKEUSR_3264 0x90
246#endif /* __powerpc64__ */
247
248#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index 2c050332471d..d1bb611ea626 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -149,28 +149,11 @@ struct rtas_error_log {
149 unsigned char buffer[1]; 149 unsigned char buffer[1];
150}; 150};
151 151
152struct flash_block { 152/*
153 char *data; 153 * This can be set by the rtas_flash module so that it can get called
154 unsigned long length; 154 * as the absolutely last thing before the kernel terminates.
155};
156
157/* This struct is very similar but not identical to
158 * that needed by the rtas flash update.
159 * All we need to do for rtas is rewrite num_blocks
160 * into a version/length and translate the pointers
161 * to absolute.
162 */ 155 */
163#define FLASH_BLOCKS_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct flash_block)) 156extern void (*rtas_flash_term_hook)(int);
164struct flash_block_list {
165 unsigned long num_blocks;
166 struct flash_block_list *next;
167 struct flash_block blocks[FLASH_BLOCKS_PER_NODE];
168};
169struct flash_block_list_header { /* just the header of flash_block_list */
170 unsigned long num_blocks;
171 struct flash_block_list *next;
172};
173extern struct flash_block_list_header rtas_firmware_flash_list;
174 157
175extern struct rtas_t rtas; 158extern struct rtas_t rtas;
176 159
diff --git a/include/asm-powerpc/sigcontext.h b/include/asm-powerpc/sigcontext.h
new file mode 100644
index 000000000000..165d630e1cf3
--- /dev/null
+++ b/include/asm-powerpc/sigcontext.h
@@ -0,0 +1,52 @@
1#ifndef _ASM_POWERPC_SIGCONTEXT_H
2#define _ASM_POWERPC_SIGCONTEXT_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10#include <linux/compiler.h>
11#include <asm/ptrace.h>
12#ifdef __powerpc64__
13#include <asm/elf.h>
14#endif
15
16struct sigcontext {
17 unsigned long _unused[4];
18 int signal;
19#ifdef __powerpc64__
20 int _pad0;
21#endif
22 unsigned long handler;
23 unsigned long oldmask;
24 struct pt_regs __user *regs;
25#ifdef __powerpc64__
26 elf_gregset_t gp_regs;
27 elf_fpregset_t fp_regs;
28/*
29 * To maintain compatibility with current implementations the sigcontext is
30 * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
31 * followed by an unstructured (vmx_reserve) field of 69 doublewords. This
32 * allows the array of vector registers to be quadword aligned independent of
33 * the alignment of the containing sigcontext or ucontext. It is the
34 * responsibility of the code setting the sigcontext to set this pointer to
35 * either NULL (if this processor does not support the VMX feature) or the
36 * address of the first quadword within the allocated (vmx_reserve) area.
37 *
38 * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with
39 * an array of 34 quadword entries (elf_vrregset_t). The entries with
40 * indexes 0-31 contain the corresponding vector registers. The entry with
41 * index 32 contains the vscr as the last word (offset 12) within the
42 * quadword. This allows the vscr to be stored as either a quadword (since
43 * it must be copied via a vector register to/from storage) or as a word.
44 * The entry with index 33 contains the vrsave as the first word (offset 0)
45 * within the quadword.
46 */
47 elf_vrreg_t __user *v_regs;
48 long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1];
49#endif
50};
51
52#endif /* _ASM_POWERPC_SIGCONTEXT_H */
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
new file mode 100644
index 000000000000..8bcdd0faefea
--- /dev/null
+++ b/include/asm-powerpc/smp.h
@@ -0,0 +1,119 @@
1/*
2 * smp.h: PowerPC-specific SMP code.
3 *
4 * Original was a copy of sparc smp.h. Now heavily modified
5 * for PPC.
6 *
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#ifndef _ASM_POWERPC_SMP_H
17#define _ASM_POWERPC_SMP_H
18#ifdef __KERNEL__
19
20#include <linux/config.h>
21#include <linux/threads.h>
22#include <linux/cpumask.h>
23#include <linux/kernel.h>
24
25#ifndef __ASSEMBLY__
26
27#ifdef CONFIG_PPC64
28#include <asm/paca.h>
29#endif
30
31extern int boot_cpuid;
32extern int boot_cpuid_phys;
33
34extern void cpu_die(void);
35
36#ifdef CONFIG_SMP
37
38extern void smp_send_debugger_break(int cpu);
39struct pt_regs;
40extern void smp_message_recv(int, struct pt_regs *);
41
42#ifdef CONFIG_HOTPLUG_CPU
43extern void fixup_irqs(cpumask_t map);
44int generic_cpu_disable(void);
45int generic_cpu_enable(unsigned int cpu);
46void generic_cpu_die(unsigned int cpu);
47void generic_mach_cpu_die(void);
48#endif
49
50#ifdef CONFIG_PPC64
51#define raw_smp_processor_id() (get_paca()->paca_index)
52#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
53#else
54/* 32-bit */
55extern int smp_hw_index[];
56
57#define raw_smp_processor_id() (current_thread_info()->cpu)
58#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
59#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
60#define set_hard_smp_processor_id(cpu, phys)\
61 (smp_hw_index[(cpu)] = (phys))
62#endif
63
64extern cpumask_t cpu_sibling_map[NR_CPUS];
65
66/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
67 *
68 * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
69 * in /proc/interrupts will be wrong!!! --Troy */
70#define PPC_MSG_CALL_FUNCTION 0
71#define PPC_MSG_RESCHEDULE 1
72/* This is unused now */
73#if 0
74#define PPC_MSG_MIGRATE_TASK 2
75#endif
76#define PPC_MSG_DEBUGGER_BREAK 3
77
78void smp_init_iSeries(void);
79void smp_init_pSeries(void);
80void smp_init_cell(void);
81void smp_setup_cpu_maps(void);
82
83extern int __cpu_disable(void);
84extern void __cpu_die(unsigned int cpu);
85
86#else
87/* for UP */
88#define smp_setup_cpu_maps()
89#define smp_release_cpus()
90
91#endif /* CONFIG_SMP */
92
93#ifdef CONFIG_PPC64
94#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id)
95#define set_hard_smp_processor_id(CPU, VAL) \
96 do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0)
97#else
98/* 32-bit */
99#ifndef CONFIG_SMP
100#define get_hard_smp_processor_id(cpu) boot_cpuid_phys
101#define set_hard_smp_processor_id(cpu, phys)
102#endif
103#endif
104
105extern int smt_enabled_at_boot;
106
107extern int smp_mpic_probe(void);
108extern void smp_mpic_setup_cpu(int cpu);
109extern void smp_generic_kick_cpu(int nr);
110
111extern void smp_generic_give_timebase(void);
112extern void smp_generic_take_timebase(void);
113
114extern struct smp_ops_t *smp_ops;
115
116#endif /* __ASSEMBLY__ */
117
118#endif /* __KERNEL__ */
119#endif /* _ASM_POWERPC_SMP_H) */
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
new file mode 100644
index 000000000000..1c95ab99deb3
--- /dev/null
+++ b/include/asm-powerpc/sparsemem.h
@@ -0,0 +1,16 @@
1#ifndef _ASM_POWERPC_SPARSEMEM_H
2#define _ASM_POWERPC_SPARSEMEM_H 1
3
4#ifdef CONFIG_SPARSEMEM
5/*
6 * SECTION_SIZE_BITS 2^N: how big each section will be
7 * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
8 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
9 */
10#define SECTION_SIZE_BITS 24
11#define MAX_PHYSADDR_BITS 38
12#define MAX_PHYSMEM_BITS 36
13
14#endif /* CONFIG_SPARSEMEM */
15
16#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/stat.h b/include/asm-powerpc/stat.h
new file mode 100644
index 000000000000..e4edc510b530
--- /dev/null
+++ b/include/asm-powerpc/stat.h
@@ -0,0 +1,81 @@
1#ifndef _ASM_POWERPC_STAT_H
2#define _ASM_POWERPC_STAT_H
3/*
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/types.h>
10
11#define STAT_HAVE_NSEC 1
12
13#ifndef __powerpc64__
14struct __old_kernel_stat {
15 unsigned short st_dev;
16 unsigned short st_ino;
17 unsigned short st_mode;
18 unsigned short st_nlink;
19 unsigned short st_uid;
20 unsigned short st_gid;
21 unsigned short st_rdev;
22 unsigned long st_size;
23 unsigned long st_atime;
24 unsigned long st_mtime;
25 unsigned long st_ctime;
26};
27#endif /* !__powerpc64__ */
28
29struct stat {
30 unsigned long st_dev;
31 ino_t st_ino;
32#ifdef __powerpc64__
33 nlink_t st_nlink;
34 mode_t st_mode;
35#else
36 mode_t st_mode;
37 nlink_t st_nlink;
38#endif
39 uid_t st_uid;
40 gid_t st_gid;
41 unsigned long st_rdev;
42 off_t st_size;
43 unsigned long st_blksize;
44 unsigned long st_blocks;
45 unsigned long st_atime;
46 unsigned long st_atime_nsec;
47 unsigned long st_mtime;
48 unsigned long st_mtime_nsec;
49 unsigned long st_ctime;
50 unsigned long st_ctime_nsec;
51 unsigned long __unused4;
52 unsigned long __unused5;
53#ifdef __powerpc64__
54 unsigned long __unused6;
55#endif
56};
57
58/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
59struct stat64 {
60 unsigned long long st_dev; /* Device. */
61 unsigned long long st_ino; /* File serial number. */
62 unsigned int st_mode; /* File mode. */
63 unsigned int st_nlink; /* Link count. */
64 unsigned int st_uid; /* User ID of the file's owner. */
65 unsigned int st_gid; /* Group ID of the file's group. */
66 unsigned long long st_rdev; /* Device number, if device. */
67 unsigned short __pad2;
68 long long st_size; /* Size of file, in bytes. */
69 int st_blksize; /* Optimal block size for I/O. */
70 long long st_blocks; /* Number 512-byte blocks allocated. */
71 int st_atime; /* Time of last access. */
72 unsigned int st_atime_nsec;
73 int st_mtime; /* Time of last modification. */
74 unsigned int st_mtime_nsec;
75 int st_ctime; /* Time of last status change. */
76 unsigned int st_ctime_nsec;
77 unsigned int __unused4;
78 unsigned int __unused5;
79};
80
81#endif /* _ASM_POWERPC_STAT_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 5b2ecbc47907..b5da0b851e02 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -359,5 +359,53 @@ extern void reloc_got2(unsigned long);
359 359
360#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) 360#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
361 361
362static inline void create_instruction(unsigned long addr, unsigned int instr)
363{
364 unsigned int *p;
365 p = (unsigned int *)addr;
366 *p = instr;
367 asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
368}
369
370/* Flags for create_branch:
371 * "b" == create_branch(addr, target, 0);
372 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
373 * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
374 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
375 */
376#define BRANCH_SET_LINK 0x1
377#define BRANCH_ABSOLUTE 0x2
378
379static inline void create_branch(unsigned long addr,
380 unsigned long target, int flags)
381{
382 unsigned int instruction;
383
384 if (! (flags & BRANCH_ABSOLUTE))
385 target = target - addr;
386
387 /* Mask out the flags and target, so they don't step on each other. */
388 instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
389
390 create_instruction(addr, instruction);
391}
392
393static inline void create_function_call(unsigned long addr, void * func)
394{
395 unsigned long func_addr;
396
397#ifdef CONFIG_PPC64
398 /*
399 * On PPC64 the function pointer actually points to the function's
400 * descriptor. The first entry in the descriptor is the address
401 * of the function text.
402 */
403 func_addr = *(unsigned long *)func;
404#else
405 func_addr = (unsigned long)func;
406#endif
407 create_branch(addr, func_addr, BRANCH_SET_LINK);
408}
409
362#endif /* __KERNEL__ */ 410#endif /* __KERNEL__ */
363#endif /* _ASM_POWERPC_SYSTEM_H */ 411#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/termios.h b/include/asm-powerpc/termios.h
index c5b8e5358f83..7f80a019b6a0 100644
--- a/include/asm-powerpc/termios.h
+++ b/include/asm-powerpc/termios.h
@@ -94,142 +94,9 @@ struct termio {
94#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" 94#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
95#endif 95#endif
96 96
97#define FIOCLEX _IO('f', 1)
98#define FIONCLEX _IO('f', 2)
99#define FIOASYNC _IOW('f', 125, int)
100#define FIONBIO _IOW('f', 126, int)
101#define FIONREAD _IOR('f', 127, int)
102#define TIOCINQ FIONREAD
103
104#define TIOCGETP _IOR('t', 8, struct sgttyb)
105#define TIOCSETP _IOW('t', 9, struct sgttyb)
106#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
107
108#define TIOCSETC _IOW('t', 17, struct tchars)
109#define TIOCGETC _IOR('t', 18, struct tchars)
110#define TCGETS _IOR('t', 19, struct termios)
111#define TCSETS _IOW('t', 20, struct termios)
112#define TCSETSW _IOW('t', 21, struct termios)
113#define TCSETSF _IOW('t', 22, struct termios)
114
115#define TCGETA _IOR('t', 23, struct termio)
116#define TCSETA _IOW('t', 24, struct termio)
117#define TCSETAW _IOW('t', 25, struct termio)
118#define TCSETAF _IOW('t', 28, struct termio)
119
120#define TCSBRK _IO('t', 29)
121#define TCXONC _IO('t', 30)
122#define TCFLSH _IO('t', 31)
123
124#define TIOCSWINSZ _IOW('t', 103, struct winsize)
125#define TIOCGWINSZ _IOR('t', 104, struct winsize)
126#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
127#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
128#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
129
130#define TIOCGLTC _IOR('t', 116, struct ltchars)
131#define TIOCSLTC _IOW('t', 117, struct ltchars)
132#define TIOCSPGRP _IOW('t', 118, int)
133#define TIOCGPGRP _IOR('t', 119, int)
134
135#define TIOCEXCL 0x540C
136#define TIOCNXCL 0x540D
137#define TIOCSCTTY 0x540E
138
139#define TIOCSTI 0x5412
140#define TIOCMGET 0x5415
141#define TIOCMBIS 0x5416
142#define TIOCMBIC 0x5417
143#define TIOCMSET 0x5418
144#define TIOCGSOFTCAR 0x5419
145#define TIOCSSOFTCAR 0x541A
146#define TIOCLINUX 0x541C
147#define TIOCCONS 0x541D
148#define TIOCGSERIAL 0x541E
149#define TIOCSSERIAL 0x541F
150#define TIOCPKT 0x5420
151
152#define TIOCNOTTY 0x5422
153#define TIOCSETD 0x5423
154#define TIOCGETD 0x5424
155#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
156
157#define TIOCSERCONFIG 0x5453
158#define TIOCSERGWILD 0x5454
159#define TIOCSERSWILD 0x5455
160#define TIOCGLCKTRMIOS 0x5456
161#define TIOCSLCKTRMIOS 0x5457
162#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
163#define TIOCSERGETLSR 0x5459 /* Get line status register */
164#define TIOCSERGETMULTI 0x545A /* Get multiport config */
165#define TIOCSERSETMULTI 0x545B /* Set multiport config */
166
167#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
168#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
169
170/* Used for packet mode */
171#define TIOCPKT_DATA 0
172#define TIOCPKT_FLUSHREAD 1
173#define TIOCPKT_FLUSHWRITE 2
174#define TIOCPKT_STOP 4
175#define TIOCPKT_START 8
176#define TIOCPKT_NOSTOP 16
177#define TIOCPKT_DOSTOP 32
178
179/* modem lines */
180#define TIOCM_LE 0x001
181#define TIOCM_DTR 0x002
182#define TIOCM_RTS 0x004
183#define TIOCM_ST 0x008
184#define TIOCM_SR 0x010
185#define TIOCM_CTS 0x020
186#define TIOCM_CAR 0x040
187#define TIOCM_RNG 0x080
188#define TIOCM_DSR 0x100
189#define TIOCM_CD TIOCM_CAR
190#define TIOCM_RI TIOCM_RNG
191#define TIOCM_OUT1 0x2000
192#define TIOCM_OUT2 0x4000
193#define TIOCM_LOOP 0x8000
194
195/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
196#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
197
198#ifdef __KERNEL__ 97#ifdef __KERNEL__
199 98
200/* 99#include <asm-generic/termios.h>
201 * Translate a "termio" structure into a "termios". Ugh.
202 */
203#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
204 unsigned short __tmp; \
205 get_user(__tmp,&(termio)->x); \
206 (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \
207}
208
209#define user_termio_to_kernel_termios(termios, termio) \
210({ \
211 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
212 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
213 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
214 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
215 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
216})
217
218/*
219 * Translate a "termios" structure into a "termio". Ugh.
220 */
221#define kernel_termios_to_user_termio(termio, termios) \
222({ \
223 put_user((termios)->c_iflag, &(termio)->c_iflag); \
224 put_user((termios)->c_oflag, &(termio)->c_oflag); \
225 put_user((termios)->c_cflag, &(termio)->c_cflag); \
226 put_user((termios)->c_lflag, &(termio)->c_lflag); \
227 put_user((termios)->c_line, &(termio)->c_line); \
228 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
229})
230
231#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
232#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
233 100
234#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
235 102
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index 410e795f7d43..d9b86a17271b 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -21,7 +21,7 @@
21#include <asm/processor.h> 21#include <asm/processor.h>
22#ifdef CONFIG_PPC64 22#ifdef CONFIG_PPC64
23#include <asm/paca.h> 23#include <asm/paca.h>
24#include <asm/iSeries/HvCall.h> 24#include <asm/iseries/hv_call.h>
25#endif 25#endif
26 26
27/* time.c */ 27/* time.c */
diff --git a/include/asm-powerpc/tlb.h b/include/asm-powerpc/tlb.h
new file mode 100644
index 000000000000..56659f121779
--- /dev/null
+++ b/include/asm-powerpc/tlb.h
@@ -0,0 +1,70 @@
1/*
2 * TLB shootdown specifics for powerpc
3 *
4 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
5 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#ifndef _ASM_POWERPC_TLB_H
13#define _ASM_POWERPC_TLB_H
14
15#include <linux/config.h>
16#ifndef __powerpc64__
17#include <asm/pgtable.h>
18#endif
19#include <asm/pgalloc.h>
20#include <asm/tlbflush.h>
21#ifndef __powerpc64__
22#include <asm/page.h>
23#include <asm/mmu.h>
24#endif
25
26struct mmu_gather;
27
28#define tlb_start_vma(tlb, vma) do { } while (0)
29#define tlb_end_vma(tlb, vma) do { } while (0)
30
31#if !defined(CONFIG_PPC_STD_MMU)
32
33#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
34
35#elif defined(__powerpc64__)
36
37extern void pte_free_finish(void);
38
39static inline void tlb_flush(struct mmu_gather *tlb)
40{
41 flush_tlb_pending();
42 pte_free_finish();
43}
44
45#else
46
47extern void tlb_flush(struct mmu_gather *tlb);
48
49#endif
50
51/* Get the generic bits... */
52#include <asm-generic/tlb.h>
53
54#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
55
56#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
57
58#else
59extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
60 unsigned long address);
61
62static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
63 unsigned long address)
64{
65 if (pte_val(*ptep) & _PAGE_HASHPTE)
66 flush_hash_entry(tlb->mm, ptep, address);
67}
68
69#endif
70#endif /* __ASM_POWERPC_TLB_H */
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
new file mode 100644
index 000000000000..ca3655672bbc
--- /dev/null
+++ b/include/asm-powerpc/tlbflush.h
@@ -0,0 +1,146 @@
1#ifndef _ASM_POWERPC_TLBFLUSH_H
2#define _ASM_POWERPC_TLBFLUSH_H
3/*
4 * TLB flushing:
5 *
6 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
7 * - flush_tlb_page(vma, vmaddr) flushes one page
8 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
9 * - flush_tlb_range(vma, start, end) flushes a range of pages
10 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
11 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifdef __KERNEL__
19
20#include <linux/config.h>
21
22struct mm_struct;
23
24#ifdef CONFIG_PPC64
25
26#include <linux/percpu.h>
27#include <asm/page.h>
28
29#define PPC64_TLB_BATCH_NR 192
30
31struct ppc64_tlb_batch {
32 unsigned long index;
33 struct mm_struct *mm;
34 pte_t pte[PPC64_TLB_BATCH_NR];
35 unsigned long vaddr[PPC64_TLB_BATCH_NR];
36 unsigned int large;
37};
38DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
39
40extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
41
42static inline void flush_tlb_pending(void)
43{
44 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
45
46 if (batch->index)
47 __flush_tlb_pending(batch);
48 put_cpu_var(ppc64_tlb_batch);
49}
50
51extern void flush_hash_page(unsigned long va, pte_t pte, int local);
52void flush_hash_range(unsigned long number, int local);
53
54#else /* CONFIG_PPC64 */
55
56#include <linux/mm.h>
57
58extern void _tlbie(unsigned long address);
59extern void _tlbia(void);
60
61/*
62 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
63 * flush_tlb_kernel_range are best implemented as tlbia vs
64 * specific tlbie's
65 */
66
67#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
68#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory")
69#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
70#define flush_tlb_pending() _tlbia()
71#endif
72
73/*
74 * This gets called at the end of handling a page fault, when
75 * the kernel has put a new PTE into the page table for the process.
76 * We use it to ensure coherency between the i-cache and d-cache
77 * for the page which has just been mapped in.
78 * On machines which use an MMU hash table, we use this to put a
79 * corresponding HPTE into the hash table ahead of time, instead of
80 * waiting for the inevitable extra hash-table miss exception.
81 */
82extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
83
84#endif /* CONFIG_PPC64 */
85
86#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
87 defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
88
89static inline void flush_tlb_mm(struct mm_struct *mm)
90{
91 flush_tlb_pending();
92}
93
94static inline void flush_tlb_page(struct vm_area_struct *vma,
95 unsigned long vmaddr)
96{
97#ifdef CONFIG_PPC64
98 flush_tlb_pending();
99#else
100 _tlbie(vmaddr);
101#endif
102}
103
104static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
105 unsigned long vmaddr)
106{
107#ifndef CONFIG_PPC64
108 _tlbie(vmaddr);
109#endif
110}
111
112static inline void flush_tlb_range(struct vm_area_struct *vma,
113 unsigned long start, unsigned long end)
114{
115 flush_tlb_pending();
116}
117
118static inline void flush_tlb_kernel_range(unsigned long start,
119 unsigned long end)
120{
121 flush_tlb_pending();
122}
123
124#else /* 6xx, 7xx, 7xxx cpus */
125
126extern void flush_tlb_mm(struct mm_struct *mm);
127extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
128extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
129extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
130 unsigned long end);
131extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
132
133#endif
134
135/*
136 * This is called in munmap when we have freed up some page-table
137 * pages. We don't need to do anything here, there's nothing special
138 * about our page-table pages. -- paulus
139 */
140static inline void flush_tlb_pgtables(struct mm_struct *mm,
141 unsigned long start, unsigned long end)
142{
143}
144
145#endif /*__KERNEL__ */
146#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
new file mode 100644
index 000000000000..33af730f0d19
--- /dev/null
+++ b/include/asm-powerpc/uaccess.h
@@ -0,0 +1,468 @@
1#ifndef _ARCH_POWERPC_UACCESS_H
2#define _ARCH_POWERPC_UACCESS_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/sched.h>
8#include <linux/errno.h>
9#include <asm/processor.h>
10
11#define VERIFY_READ 0
12#define VERIFY_WRITE 1
13
14/*
15 * The fs value determines whether argument validity checking should be
16 * performed or not. If get_fs() == USER_DS, checking is performed, with
17 * get_fs() == KERNEL_DS, checking is bypassed.
18 *
19 * For historical reasons, these macros are grossly misnamed.
20 *
21 * The fs/ds values are now the highest legal address in the "segment".
22 * This simplifies the checking in the routines below.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(~0UL)
28#ifdef __powerpc64__
29/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
30#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
31#else
32#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
33#endif
34
35#define get_ds() (KERNEL_DS)
36#define get_fs() (current->thread.fs)
37#define set_fs(val) (current->thread.fs = (val))
38
39#define segment_eq(a, b) ((a).seg == (b).seg)
40
41#ifdef __powerpc64__
42/*
43 * This check is sufficient because there is a large enough
44 * gap between user addresses and the kernel addresses
45 */
46#define __access_ok(addr, size, segment) \
47 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
48
49#else
50
51#define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && \
53 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
54
55#endif
56
57#define access_ok(type, addr, size) \
58 (__chk_user_ptr(addr), \
59 __access_ok((__force unsigned long)(addr), (size), get_fs()))
60
61/*
62 * The exception table consists of pairs of addresses: the first is the
63 * address of an instruction that is allowed to fault, and the second is
64 * the address at which the program should continue. No registers are
65 * modified, so it is entirely up to the continuation code to figure out
66 * what to do.
67 *
68 * All the routines below use bits of fixup code that are out of line
69 * with the main instruction path. This means when everything is well,
70 * we don't even have to jump over them. Further, they do not intrude
71 * on our cache or tlb entries.
72 */
73
74struct exception_table_entry {
75 unsigned long insn;
76 unsigned long fixup;
77};
78
79/*
80 * These are the main single-value transfer routines. They automatically
81 * use the right size if we just have the right pointer type.
82 *
83 * This gets kind of ugly. We want to return _two_ values in "get_user()"
84 * and yet we don't want to do any pointers, because that is too much
85 * of a performance impact. Thus we have a few rather ugly macros here,
86 * and hide all the ugliness from the user.
87 *
88 * The "__xxx" versions of the user access functions are versions that
89 * do not verify the address space, that must have been done previously
90 * with a separate "access_ok()" call (this is used when we do multiple
91 * accesses to the same area of user memory).
92 *
93 * As we use the same address space for kernel and user data on the
94 * PowerPC, we can just do these as direct assignments. (Of course, the
95 * exception handling means that it's no longer "just"...)
96 *
97 * The "user64" versions of the user access functions are versions that
98 * allow access of 64-bit data. The "get_user" functions do not
99 * properly handle 64-bit data because the value gets down cast to a long.
100 * The "put_user" functions already handle 64-bit data properly but we add
101 * "user64" versions for completeness
102 */
103#define get_user(x, ptr) \
104 __get_user_check((x), (ptr), sizeof(*(ptr)))
105#define put_user(x, ptr) \
106 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
107
108#define __get_user(x, ptr) \
109 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
110#define __put_user(x, ptr) \
111 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
112#ifndef __powerpc64__
113#define __get_user64(x, ptr) \
114 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
115#define __put_user64(x, ptr) __put_user(x, ptr)
116#endif
117
118#define __get_user_unaligned __get_user
119#define __put_user_unaligned __put_user
120
121extern long __put_user_bad(void);
122
123#ifdef __powerpc64__
124#define __EX_TABLE_ALIGN "3"
125#define __EX_TABLE_TYPE "llong"
126#else
127#define __EX_TABLE_ALIGN "2"
128#define __EX_TABLE_TYPE "long"
129#endif
130
131/*
132 * We don't tell gcc that we are accessing memory, but this is OK
133 * because we do not write to any memory gcc knows about, so there
134 * are no aliasing issues.
135 */
136#define __put_user_asm(x, addr, err, op) \
137 __asm__ __volatile__( \
138 "1: " op " %1,0(%2) # put_user\n" \
139 "2:\n" \
140 ".section .fixup,\"ax\"\n" \
141 "3: li %0,%3\n" \
142 " b 2b\n" \
143 ".previous\n" \
144 ".section __ex_table,\"a\"\n" \
145 " .align " __EX_TABLE_ALIGN "\n" \
146 " ."__EX_TABLE_TYPE" 1b,3b\n" \
147 ".previous" \
148 : "=r" (err) \
149 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
150
151#ifdef __powerpc64__
152#define __put_user_asm2(x, ptr, retval) \
153 __put_user_asm(x, ptr, retval, "std")
154#else /* __powerpc64__ */
155#define __put_user_asm2(x, addr, err) \
156 __asm__ __volatile__( \
157 "1: stw %1,0(%2)\n" \
158 "2: stw %1+1,4(%2)\n" \
159 "3:\n" \
160 ".section .fixup,\"ax\"\n" \
161 "4: li %0,%3\n" \
162 " b 3b\n" \
163 ".previous\n" \
164 ".section __ex_table,\"a\"\n" \
165 " .align " __EX_TABLE_ALIGN "\n" \
166 " ." __EX_TABLE_TYPE " 1b,4b\n" \
167 " ." __EX_TABLE_TYPE " 2b,4b\n" \
168 ".previous" \
169 : "=r" (err) \
170 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
171#endif /* __powerpc64__ */
172
173#define __put_user_size(x, ptr, size, retval) \
174do { \
175 retval = 0; \
176 switch (size) { \
177 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
178 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
179 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
180 case 8: __put_user_asm2(x, ptr, retval); break; \
181 default: __put_user_bad(); \
182 } \
183} while (0)
184
185#define __put_user_nocheck(x, ptr, size) \
186({ \
187 long __pu_err; \
188 might_sleep(); \
189 __chk_user_ptr(ptr); \
190 __put_user_size((x), (ptr), (size), __pu_err); \
191 __pu_err; \
192})
193
194#define __put_user_check(x, ptr, size) \
195({ \
196 long __pu_err = -EFAULT; \
197 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
198 might_sleep(); \
199 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
200 __put_user_size((x), __pu_addr, (size), __pu_err); \
201 __pu_err; \
202})
203
204extern long __get_user_bad(void);
205
206#define __get_user_asm(x, addr, err, op) \
207 __asm__ __volatile__( \
208 "1: "op" %1,0(%2) # get_user\n" \
209 "2:\n" \
210 ".section .fixup,\"ax\"\n" \
211 "3: li %0,%3\n" \
212 " li %1,0\n" \
213 " b 2b\n" \
214 ".previous\n" \
215 ".section __ex_table,\"a\"\n" \
216 " .align "__EX_TABLE_ALIGN "\n" \
217 " ." __EX_TABLE_TYPE " 1b,3b\n" \
218 ".previous" \
219 : "=r" (err), "=r" (x) \
220 : "b" (addr), "i" (-EFAULT), "0" (err))
221
222#ifdef __powerpc64__
223#define __get_user_asm2(x, addr, err) \
224 __get_user_asm(x, addr, err, "ld")
225#else /* __powerpc64__ */
226#define __get_user_asm2(x, addr, err) \
227 __asm__ __volatile__( \
228 "1: lwz %1,0(%2)\n" \
229 "2: lwz %1+1,4(%2)\n" \
230 "3:\n" \
231 ".section .fixup,\"ax\"\n" \
232 "4: li %0,%3\n" \
233 " li %1,0\n" \
234 " li %1+1,0\n" \
235 " b 3b\n" \
236 ".previous\n" \
237 ".section __ex_table,\"a\"\n" \
238 " .align " __EX_TABLE_ALIGN "\n" \
239 " ." __EX_TABLE_TYPE " 1b,4b\n" \
240 " ." __EX_TABLE_TYPE " 2b,4b\n" \
241 ".previous" \
242 : "=r" (err), "=&r" (x) \
243 : "b" (addr), "i" (-EFAULT), "0" (err))
244#endif /* __powerpc64__ */
245
246#define __get_user_size(x, ptr, size, retval) \
247do { \
248 retval = 0; \
249 __chk_user_ptr(ptr); \
250 if (size > sizeof(x)) \
251 (x) = __get_user_bad(); \
252 switch (size) { \
253 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
254 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
255 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
256 case 8: __get_user_asm2(x, ptr, retval); break; \
257 default: (x) = __get_user_bad(); \
258 } \
259} while (0)
260
261#define __get_user_nocheck(x, ptr, size) \
262({ \
263 long __gu_err; \
264 unsigned long __gu_val; \
265 __chk_user_ptr(ptr); \
266 might_sleep(); \
267 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
268 (x) = (__typeof__(*(ptr)))__gu_val; \
269 __gu_err; \
270})
271
272#ifndef __powerpc64__
273#define __get_user64_nocheck(x, ptr, size) \
274({ \
275 long __gu_err; \
276 long long __gu_val; \
277 __chk_user_ptr(ptr); \
278 might_sleep(); \
279 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
280 (x) = (__typeof__(*(ptr)))__gu_val; \
281 __gu_err; \
282})
283#endif /* __powerpc64__ */
284
285#define __get_user_check(x, ptr, size) \
286({ \
287 long __gu_err = -EFAULT; \
288 unsigned long __gu_val = 0; \
289 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
290 might_sleep(); \
291 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
292 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
293 (x) = (__typeof__(*(ptr)))__gu_val; \
294 __gu_err; \
295})
296
297/* more complex routines */
298
299extern unsigned long __copy_tofrom_user(void __user *to,
300 const void __user *from, unsigned long size);
301
302#ifndef __powerpc64__
303
304extern inline unsigned long copy_from_user(void *to,
305 const void __user *from, unsigned long n)
306{
307 unsigned long over;
308
309 if (access_ok(VERIFY_READ, from, n))
310 return __copy_tofrom_user((__force void __user *)to, from, n);
311 if ((unsigned long)from < TASK_SIZE) {
312 over = (unsigned long)from + n - TASK_SIZE;
313 return __copy_tofrom_user((__force void __user *)to, from,
314 n - over) + over;
315 }
316 return n;
317}
318
319extern inline unsigned long copy_to_user(void __user *to,
320 const void *from, unsigned long n)
321{
322 unsigned long over;
323
324 if (access_ok(VERIFY_WRITE, to, n))
325 return __copy_tofrom_user(to, (__force void __user *)from, n);
326 if ((unsigned long)to < TASK_SIZE) {
327 over = (unsigned long)to + n - TASK_SIZE;
328 return __copy_tofrom_user(to, (__force void __user *)from,
329 n - over) + over;
330 }
331 return n;
332}
333
334#else /* __powerpc64__ */
335
336#define __copy_in_user(to, from, size) \
337 __copy_tofrom_user((to), (from), (size))
338
339extern unsigned long copy_from_user(void *to, const void __user *from,
340 unsigned long n);
341extern unsigned long copy_to_user(void __user *to, const void *from,
342 unsigned long n);
343extern unsigned long copy_in_user(void __user *to, const void __user *from,
344 unsigned long n);
345
346#endif /* __powerpc64__ */
347
348static inline unsigned long __copy_from_user_inatomic(void *to,
349 const void __user *from, unsigned long n)
350{
351 if (__builtin_constant_p(n) && (n <= 8)) {
352 unsigned long ret;
353
354 switch (n) {
355 case 1:
356 __get_user_size(*(u8 *)to, from, 1, ret);
357 break;
358 case 2:
359 __get_user_size(*(u16 *)to, from, 2, ret);
360 break;
361 case 4:
362 __get_user_size(*(u32 *)to, from, 4, ret);
363 break;
364 case 8:
365 __get_user_size(*(u64 *)to, from, 8, ret);
366 break;
367 }
368 if (ret == 0)
369 return 0;
370 }
371 return __copy_tofrom_user((__force void __user *)to, from, n);
372}
373
374static inline unsigned long __copy_to_user_inatomic(void __user *to,
375 const void *from, unsigned long n)
376{
377 if (__builtin_constant_p(n) && (n <= 8)) {
378 unsigned long ret;
379
380 switch (n) {
381 case 1:
382 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
383 break;
384 case 2:
385 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
386 break;
387 case 4:
388 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
389 break;
390 case 8:
391 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
392 break;
393 }
394 if (ret == 0)
395 return 0;
396 }
397 return __copy_tofrom_user(to, (__force const void __user *)from, n);
398}
399
400static inline unsigned long __copy_from_user(void *to,
401 const void __user *from, unsigned long size)
402{
403 might_sleep();
404 return __copy_from_user_inatomic(to, from, size);
405}
406
407static inline unsigned long __copy_to_user(void __user *to,
408 const void *from, unsigned long size)
409{
410 might_sleep();
411 return __copy_to_user_inatomic(to, from, size);
412}
413
414extern unsigned long __clear_user(void __user *addr, unsigned long size);
415
416static inline unsigned long clear_user(void __user *addr, unsigned long size)
417{
418 might_sleep();
419 if (likely(access_ok(VERIFY_WRITE, addr, size)))
420 return __clear_user(addr, size);
421 if ((unsigned long)addr < TASK_SIZE) {
422 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
423 return __clear_user(addr, size - over) + over;
424 }
425 return size;
426}
427
428extern int __strncpy_from_user(char *dst, const char __user *src, long count);
429
430static inline long strncpy_from_user(char *dst, const char __user *src,
431 long count)
432{
433 might_sleep();
434 if (likely(access_ok(VERIFY_READ, src, 1)))
435 return __strncpy_from_user(dst, src, count);
436 return -EFAULT;
437}
438
439/*
440 * Return the size of a string (including the ending 0)
441 *
442 * Return 0 for error
443 */
444extern int __strnlen_user(const char __user *str, long len, unsigned long top);
445
446/*
447 * Returns the length of the string at str (including the null byte),
448 * or 0 if we hit a page we can't access,
449 * or something > len if we didn't find a null byte.
450 *
451 * The `top' parameter to __strnlen_user is to make sure that
452 * we can never overflow from the user area into kernel space.
453 */
454static inline int strnlen_user(const char __user *str, long len)
455{
456 unsigned long top = current->thread.fs.seg;
457
458 if ((unsigned long)str > top)
459 return 0;
460 return __strnlen_user(str, len, top);
461}
462
463#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
464
465#endif /* __ASSEMBLY__ */
466#endif /* __KERNEL__ */
467
468#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/include/asm-powerpc/ucontext.h b/include/asm-powerpc/ucontext.h
new file mode 100644
index 000000000000..d9a4ddf0cc86
--- /dev/null
+++ b/include/asm-powerpc/ucontext.h
@@ -0,0 +1,40 @@
1#ifndef _ASM_POWERPC_UCONTEXT_H
2#define _ASM_POWERPC_UCONTEXT_H
3
4#ifdef __powerpc64__
5#include <asm/sigcontext.h>
6#else
7#include <asm/elf.h>
8#endif
9#include <asm/signal.h>
10
11#ifndef __powerpc64__
12struct mcontext {
13 elf_gregset_t mc_gregs;
14 elf_fpregset_t mc_fregs;
15 unsigned long mc_pad[2];
16 elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
17};
18#endif
19
20struct ucontext {
21 unsigned long uc_flags;
22 struct ucontext __user *uc_link;
23 stack_t uc_stack;
24#ifndef __powerpc64__
25 int uc_pad[7];
26 struct mcontext __user *uc_regs;/* points to uc_mcontext field */
27#endif
28 sigset_t uc_sigmask;
29 /* glibc has 1024-bit signal masks, ours are 64-bit */
30#ifdef __powerpc64__
31 sigset_t __unused[15]; /* Allow for uc_sigmask growth */
32 struct sigcontext uc_mcontext; /* last for extensibility */
33#else
34 int uc_maskext[30];
35 int uc_pad2[3];
36 struct mcontext uc_mcontext;
37#endif
38};
39
40#endif /* _ASM_POWERPC_UCONTEXT_H */