diff options
author | Tony Luck <tony.luck@intel.com> | 2005-11-07 12:05:22 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-11-07 12:05:22 -0500 |
commit | 0ad3a96f8ad910ecf87a25ec69ed360b284dee2e (patch) | |
tree | 12d292fd58fc0f7a3eb56c89dfc23569f3ab6c00 /include/asm-ppc64 | |
parent | f79b348856fbaf77e4a0c5cb08a808e5879967a9 (diff) | |
parent | 5b2f7ffcb734d3046144dfbd5ac6d76254a9e522 (diff) |
Auto-update from upstream
Diffstat (limited to 'include/asm-ppc64')
46 files changed, 541 insertions, 3496 deletions
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h deleted file mode 100644 index dbfa42ef4a99..000000000000 --- a/include/asm-ppc64/bitops.h +++ /dev/null | |||
@@ -1,360 +0,0 @@ | |||
1 | /* | ||
2 | * PowerPC64 atomic bit operations. | ||
3 | * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner, | ||
4 | * Anton Blanchard | ||
5 | * | ||
6 | * Originally taken from the 32b PPC code. Modified to use 64b values for | ||
7 | * the various counters & memory references. | ||
8 | * | ||
9 | * Bitops are odd when viewed on big-endian systems. They were designed | ||
10 | * on little endian so the size of the bitset doesn't matter (low order bytes | ||
11 | * come first) as long as the bit in question is valid. | ||
12 | * | ||
13 | * Bits are "tested" often using the C expression (val & (1<<nr)) so we do | ||
14 | * our best to stay compatible with that. The assumption is that val will | ||
15 | * be unsigned long for such tests. As such, we assume the bits are stored | ||
16 | * as an array of unsigned long (the usual case is a single unsigned long, | ||
17 | * of course). Here's an example bitset with bit numbering: | ||
18 | * | ||
19 | * |63..........0|127........64|195.......128|255.......196| | ||
20 | * | ||
21 | * This leads to a problem. If an int, short or char is passed as a bitset | ||
22 | * it will be a bad memory reference since we want to store in chunks | ||
23 | * of unsigned long (64 bits here) size. | ||
24 | * | ||
25 | * There are a few little-endian macros used mostly for filesystem bitmaps, | ||
26 | * these work on similar bit arrays layouts, but byte-oriented: | ||
27 | * | ||
28 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| | ||
29 | * | ||
30 | * The main difference is that bit 3-5 in the bit number field needs to be | ||
31 | * reversed compared to the big-endian bit fields. This can be achieved | ||
32 | * by XOR with 0b111000 (0x38). | ||
33 | * | ||
34 | * This program is free software; you can redistribute it and/or | ||
35 | * modify it under the terms of the GNU General Public License | ||
36 | * as published by the Free Software Foundation; either version | ||
37 | * 2 of the License, or (at your option) any later version. | ||
38 | */ | ||
39 | |||
40 | #ifndef _PPC64_BITOPS_H | ||
41 | #define _PPC64_BITOPS_H | ||
42 | |||
43 | #ifdef __KERNEL__ | ||
44 | |||
45 | #include <asm/synch.h> | ||
46 | |||
47 | /* | ||
48 | * clear_bit doesn't imply a memory barrier | ||
49 | */ | ||
50 | #define smp_mb__before_clear_bit() smp_mb() | ||
51 | #define smp_mb__after_clear_bit() smp_mb() | ||
52 | |||
53 | static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr) | ||
54 | { | ||
55 | return (1UL & (addr[nr >> 6] >> (nr & 63))); | ||
56 | } | ||
57 | |||
58 | static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr) | ||
59 | { | ||
60 | unsigned long old; | ||
61 | unsigned long mask = 1UL << (nr & 0x3f); | ||
62 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
63 | |||
64 | __asm__ __volatile__( | ||
65 | "1: ldarx %0,0,%3 # set_bit\n\ | ||
66 | or %0,%0,%2\n\ | ||
67 | stdcx. %0,0,%3\n\ | ||
68 | bne- 1b" | ||
69 | : "=&r" (old), "=m" (*p) | ||
70 | : "r" (mask), "r" (p), "m" (*p) | ||
71 | : "cc"); | ||
72 | } | ||
73 | |||
74 | static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
75 | { | ||
76 | unsigned long old; | ||
77 | unsigned long mask = 1UL << (nr & 0x3f); | ||
78 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
79 | |||
80 | __asm__ __volatile__( | ||
81 | "1: ldarx %0,0,%3 # clear_bit\n\ | ||
82 | andc %0,%0,%2\n\ | ||
83 | stdcx. %0,0,%3\n\ | ||
84 | bne- 1b" | ||
85 | : "=&r" (old), "=m" (*p) | ||
86 | : "r" (mask), "r" (p), "m" (*p) | ||
87 | : "cc"); | ||
88 | } | ||
89 | |||
90 | static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr) | ||
91 | { | ||
92 | unsigned long old; | ||
93 | unsigned long mask = 1UL << (nr & 0x3f); | ||
94 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
95 | |||
96 | __asm__ __volatile__( | ||
97 | "1: ldarx %0,0,%3 # change_bit\n\ | ||
98 | xor %0,%0,%2\n\ | ||
99 | stdcx. %0,0,%3\n\ | ||
100 | bne- 1b" | ||
101 | : "=&r" (old), "=m" (*p) | ||
102 | : "r" (mask), "r" (p), "m" (*p) | ||
103 | : "cc"); | ||
104 | } | ||
105 | |||
106 | static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) | ||
107 | { | ||
108 | unsigned long old, t; | ||
109 | unsigned long mask = 1UL << (nr & 0x3f); | ||
110 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
111 | |||
112 | __asm__ __volatile__( | ||
113 | EIEIO_ON_SMP | ||
114 | "1: ldarx %0,0,%3 # test_and_set_bit\n\ | ||
115 | or %1,%0,%2 \n\ | ||
116 | stdcx. %1,0,%3 \n\ | ||
117 | bne- 1b" | ||
118 | ISYNC_ON_SMP | ||
119 | : "=&r" (old), "=&r" (t) | ||
120 | : "r" (mask), "r" (p) | ||
121 | : "cc", "memory"); | ||
122 | |||
123 | return (old & mask) != 0; | ||
124 | } | ||
125 | |||
126 | static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
127 | { | ||
128 | unsigned long old, t; | ||
129 | unsigned long mask = 1UL << (nr & 0x3f); | ||
130 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
131 | |||
132 | __asm__ __volatile__( | ||
133 | EIEIO_ON_SMP | ||
134 | "1: ldarx %0,0,%3 # test_and_clear_bit\n\ | ||
135 | andc %1,%0,%2\n\ | ||
136 | stdcx. %1,0,%3\n\ | ||
137 | bne- 1b" | ||
138 | ISYNC_ON_SMP | ||
139 | : "=&r" (old), "=&r" (t) | ||
140 | : "r" (mask), "r" (p) | ||
141 | : "cc", "memory"); | ||
142 | |||
143 | return (old & mask) != 0; | ||
144 | } | ||
145 | |||
146 | static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | ||
147 | { | ||
148 | unsigned long old, t; | ||
149 | unsigned long mask = 1UL << (nr & 0x3f); | ||
150 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
151 | |||
152 | __asm__ __volatile__( | ||
153 | EIEIO_ON_SMP | ||
154 | "1: ldarx %0,0,%3 # test_and_change_bit\n\ | ||
155 | xor %1,%0,%2\n\ | ||
156 | stdcx. %1,0,%3\n\ | ||
157 | bne- 1b" | ||
158 | ISYNC_ON_SMP | ||
159 | : "=&r" (old), "=&r" (t) | ||
160 | : "r" (mask), "r" (p) | ||
161 | : "cc", "memory"); | ||
162 | |||
163 | return (old & mask) != 0; | ||
164 | } | ||
165 | |||
166 | static __inline__ void set_bits(unsigned long mask, unsigned long *addr) | ||
167 | { | ||
168 | unsigned long old; | ||
169 | |||
170 | __asm__ __volatile__( | ||
171 | "1: ldarx %0,0,%3 # set_bit\n\ | ||
172 | or %0,%0,%2\n\ | ||
173 | stdcx. %0,0,%3\n\ | ||
174 | bne- 1b" | ||
175 | : "=&r" (old), "=m" (*addr) | ||
176 | : "r" (mask), "r" (addr), "m" (*addr) | ||
177 | : "cc"); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * non-atomic versions | ||
182 | */ | ||
183 | static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr) | ||
184 | { | ||
185 | unsigned long mask = 1UL << (nr & 0x3f); | ||
186 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
187 | |||
188 | *p |= mask; | ||
189 | } | ||
190 | |||
191 | static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
192 | { | ||
193 | unsigned long mask = 1UL << (nr & 0x3f); | ||
194 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
195 | |||
196 | *p &= ~mask; | ||
197 | } | ||
198 | |||
199 | static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr) | ||
200 | { | ||
201 | unsigned long mask = 1UL << (nr & 0x3f); | ||
202 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
203 | |||
204 | *p ^= mask; | ||
205 | } | ||
206 | |||
207 | static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr) | ||
208 | { | ||
209 | unsigned long mask = 1UL << (nr & 0x3f); | ||
210 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
211 | unsigned long old = *p; | ||
212 | |||
213 | *p = old | mask; | ||
214 | return (old & mask) != 0; | ||
215 | } | ||
216 | |||
217 | static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
218 | { | ||
219 | unsigned long mask = 1UL << (nr & 0x3f); | ||
220 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
221 | unsigned long old = *p; | ||
222 | |||
223 | *p = old & ~mask; | ||
224 | return (old & mask) != 0; | ||
225 | } | ||
226 | |||
227 | static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | ||
228 | { | ||
229 | unsigned long mask = 1UL << (nr & 0x3f); | ||
230 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
231 | unsigned long old = *p; | ||
232 | |||
233 | *p = old ^ mask; | ||
234 | return (old & mask) != 0; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the | ||
239 | * most significant (left-most) 1-bit in a double word. | ||
240 | */ | ||
241 | static __inline__ int __ilog2(unsigned long x) | ||
242 | { | ||
243 | int lz; | ||
244 | |||
245 | asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); | ||
246 | return 63 - lz; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Determines the bit position of the least significant (rightmost) 0 bit | ||
251 | * in the specified double word. The returned bit position will be zero-based, | ||
252 | * starting from the right side (63 - 0). | ||
253 | */ | ||
254 | static __inline__ unsigned long ffz(unsigned long x) | ||
255 | { | ||
256 | /* no zero exists anywhere in the 8 byte area. */ | ||
257 | if ((x = ~x) == 0) | ||
258 | return 64; | ||
259 | |||
260 | /* | ||
261 | * Calculate the bit position of the least signficant '1' bit in x | ||
262 | * (since x has been changed this will actually be the least signficant | ||
263 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that | ||
264 | * is the least significant * (RIGHT-most) 1-bit of the value in x. | ||
265 | */ | ||
266 | return __ilog2(x & -x); | ||
267 | } | ||
268 | |||
269 | static __inline__ int __ffs(unsigned long x) | ||
270 | { | ||
271 | return __ilog2(x & -x); | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * ffs: find first bit set. This is defined the same way as | ||
276 | * the libc and compiler builtin ffs routines, therefore | ||
277 | * differs in spirit from the above ffz (man ffs). | ||
278 | */ | ||
279 | static __inline__ int ffs(int x) | ||
280 | { | ||
281 | unsigned long i = (unsigned long)x; | ||
282 | return __ilog2(i & -i) + 1; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * fls: find last (most-significant) bit set. | ||
287 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
288 | */ | ||
289 | #define fls(x) generic_fls(x) | ||
290 | |||
291 | /* | ||
292 | * hweightN: returns the hamming weight (i.e. the number | ||
293 | * of bits set) of a N-bit word | ||
294 | */ | ||
295 | #define hweight64(x) generic_hweight64(x) | ||
296 | #define hweight32(x) generic_hweight32(x) | ||
297 | #define hweight16(x) generic_hweight16(x) | ||
298 | #define hweight8(x) generic_hweight8(x) | ||
299 | |||
300 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
301 | #define find_first_zero_bit(addr, size) \ | ||
302 | find_next_zero_bit((addr), (size), 0) | ||
303 | |||
304 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
305 | #define find_first_bit(addr, size) \ | ||
306 | find_next_bit((addr), (size), 0) | ||
307 | |||
308 | extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
309 | #define find_first_zero_le_bit(addr, size) \ | ||
310 | find_next_zero_le_bit((addr), (size), 0) | ||
311 | |||
312 | static __inline__ int test_le_bit(unsigned long nr, __const__ unsigned long * addr) | ||
313 | { | ||
314 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
315 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
316 | } | ||
317 | |||
318 | #define test_and_clear_le_bit(nr, addr) \ | ||
319 | test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
320 | #define test_and_set_le_bit(nr, addr) \ | ||
321 | test_and_set_bit((nr) ^ 0x38, (addr)) | ||
322 | |||
323 | /* | ||
324 | * non-atomic versions | ||
325 | */ | ||
326 | |||
327 | #define __set_le_bit(nr, addr) \ | ||
328 | __set_bit((nr) ^ 0x38, (addr)) | ||
329 | #define __clear_le_bit(nr, addr) \ | ||
330 | __clear_bit((nr) ^ 0x38, (addr)) | ||
331 | #define __test_and_clear_le_bit(nr, addr) \ | ||
332 | __test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
333 | #define __test_and_set_le_bit(nr, addr) \ | ||
334 | __test_and_set_bit((nr) ^ 0x38, (addr)) | ||
335 | |||
336 | #define ext2_set_bit(nr,addr) \ | ||
337 | __test_and_set_le_bit((nr), (unsigned long*)addr) | ||
338 | #define ext2_clear_bit(nr, addr) \ | ||
339 | __test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
340 | |||
341 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
342 | test_and_set_le_bit((nr), (unsigned long*)addr) | ||
343 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
344 | test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
345 | |||
346 | |||
347 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | ||
348 | #define ext2_find_first_zero_bit(addr, size) \ | ||
349 | find_first_zero_le_bit((unsigned long*)addr, size) | ||
350 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
351 | find_next_zero_le_bit((unsigned long*)addr, size, off) | ||
352 | |||
353 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
354 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
355 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
356 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
357 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
358 | |||
359 | #endif /* __KERNEL__ */ | ||
360 | #endif /* _PPC64_BITOPS_H */ | ||
diff --git a/include/asm-ppc64/dart.h b/include/asm-ppc64/dart.h deleted file mode 100644 index cdf8a2dec05f..000000000000 --- a/include/asm-ppc64/dart.h +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_DART_H | ||
20 | #define _ASM_DART_H | ||
21 | |||
22 | |||
23 | /* physical base of DART registers */ | ||
24 | #define DART_BASE 0xf8033000UL | ||
25 | |||
26 | /* Offset from base to control register */ | ||
27 | #define DARTCNTL 0 | ||
28 | /* Offset from base to exception register */ | ||
29 | #define DARTEXCP 0x10 | ||
30 | /* Offset from base to TLB tag registers */ | ||
31 | #define DARTTAG 0x1000 | ||
32 | |||
33 | |||
34 | /* Control Register fields */ | ||
35 | |||
36 | /* base address of table (pfn) */ | ||
37 | #define DARTCNTL_BASE_MASK 0xfffff | ||
38 | #define DARTCNTL_BASE_SHIFT 12 | ||
39 | |||
40 | #define DARTCNTL_FLUSHTLB 0x400 | ||
41 | #define DARTCNTL_ENABLE 0x200 | ||
42 | |||
43 | /* size of table in pages */ | ||
44 | #define DARTCNTL_SIZE_MASK 0x1ff | ||
45 | #define DARTCNTL_SIZE_SHIFT 0 | ||
46 | |||
47 | |||
48 | /* DART table fields */ | ||
49 | |||
50 | #define DARTMAP_VALID 0x80000000 | ||
51 | #define DARTMAP_RPNMASK 0x00ffffff | ||
52 | |||
53 | |||
54 | #define DART_PAGE_SHIFT 12 | ||
55 | #define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT) | ||
56 | #define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT) | ||
57 | |||
58 | |||
59 | #endif | ||
diff --git a/include/asm-ppc64/futex.h b/include/asm-ppc64/futex.h deleted file mode 100644 index 266b460de44e..000000000000 --- a/include/asm-ppc64/futex.h +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | #ifndef _ASM_FUTEX_H | ||
2 | #define _ASM_FUTEX_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/futex.h> | ||
7 | #include <asm/errno.h> | ||
8 | #include <asm/synch.h> | ||
9 | #include <asm/uaccess.h> | ||
10 | |||
11 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | ||
12 | __asm__ __volatile (SYNC_ON_SMP \ | ||
13 | "1: lwarx %0,0,%2\n" \ | ||
14 | insn \ | ||
15 | "2: stwcx. %1,0,%2\n\ | ||
16 | bne- 1b\n\ | ||
17 | li %1,0\n\ | ||
18 | 3: .section .fixup,\"ax\"\n\ | ||
19 | 4: li %1,%3\n\ | ||
20 | b 3b\n\ | ||
21 | .previous\n\ | ||
22 | .section __ex_table,\"a\"\n\ | ||
23 | .align 3\n\ | ||
24 | .llong 1b,4b,2b,4b\n\ | ||
25 | .previous" \ | ||
26 | : "=&r" (oldval), "=&r" (ret) \ | ||
27 | : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ | ||
28 | : "cr0", "memory") | ||
29 | |||
30 | static inline int | ||
31 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
32 | { | ||
33 | int op = (encoded_op >> 28) & 7; | ||
34 | int cmp = (encoded_op >> 24) & 15; | ||
35 | int oparg = (encoded_op << 8) >> 20; | ||
36 | int cmparg = (encoded_op << 20) >> 20; | ||
37 | int oldval = 0, ret; | ||
38 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
39 | oparg = 1 << oparg; | ||
40 | |||
41 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | ||
42 | return -EFAULT; | ||
43 | |||
44 | inc_preempt_count(); | ||
45 | |||
46 | switch (op) { | ||
47 | case FUTEX_OP_SET: | ||
48 | __futex_atomic_op("", ret, oldval, uaddr, oparg); | ||
49 | break; | ||
50 | case FUTEX_OP_ADD: | ||
51 | __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg); | ||
52 | break; | ||
53 | case FUTEX_OP_OR: | ||
54 | __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg); | ||
55 | break; | ||
56 | case FUTEX_OP_ANDN: | ||
57 | __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg); | ||
58 | break; | ||
59 | case FUTEX_OP_XOR: | ||
60 | __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg); | ||
61 | break; | ||
62 | default: | ||
63 | ret = -ENOSYS; | ||
64 | } | ||
65 | |||
66 | dec_preempt_count(); | ||
67 | |||
68 | if (!ret) { | ||
69 | switch (cmp) { | ||
70 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
71 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
72 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
73 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
74 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
75 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
76 | default: ret = -ENOSYS; | ||
77 | } | ||
78 | } | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | #endif | ||
83 | #endif | ||
diff --git a/include/asm-ppc64/iSeries/HvCall.h b/include/asm-ppc64/iSeries/HvCall.h deleted file mode 100644 index c3f19475c0d9..000000000000 --- a/include/asm-ppc64/iSeries/HvCall.h +++ /dev/null | |||
@@ -1,113 +0,0 @@ | |||
1 | /* | ||
2 | * HvCall.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | /* | ||
20 | * This file contains the "hypervisor call" interface which is used to | ||
21 | * drive the hypervisor from the OS. | ||
22 | */ | ||
23 | #ifndef _HVCALL_H | ||
24 | #define _HVCALL_H | ||
25 | |||
26 | #include <asm/iSeries/HvCallSc.h> | ||
27 | #include <asm/iSeries/HvTypes.h> | ||
28 | #include <asm/paca.h> | ||
29 | |||
30 | /* Type of yield for HvCallBaseYieldProcessor */ | ||
31 | #define HvCall_YieldTimed 0 /* Yield until specified time (tb) */ | ||
32 | #define HvCall_YieldToActive 1 /* Yield until all active procs have run */ | ||
33 | #define HvCall_YieldToProc 2 /* Yield until the specified processor has run */ | ||
34 | |||
35 | /* interrupt masks for setEnabledInterrupts */ | ||
36 | #define HvCall_MaskIPI 0x00000001 | ||
37 | #define HvCall_MaskLpEvent 0x00000002 | ||
38 | #define HvCall_MaskLpProd 0x00000004 | ||
39 | #define HvCall_MaskTimeout 0x00000008 | ||
40 | |||
41 | /* Log buffer formats */ | ||
42 | #define HvCall_LogBuffer_ASCII 0 | ||
43 | #define HvCall_LogBuffer_EBCDIC 1 | ||
44 | |||
45 | #define HvCallBaseAckDeferredInts HvCallBase + 0 | ||
46 | #define HvCallBaseCpmPowerOff HvCallBase + 1 | ||
47 | #define HvCallBaseGetHwPatch HvCallBase + 2 | ||
48 | #define HvCallBaseReIplSpAttn HvCallBase + 3 | ||
49 | #define HvCallBaseSetASR HvCallBase + 4 | ||
50 | #define HvCallBaseSetASRAndRfi HvCallBase + 5 | ||
51 | #define HvCallBaseSetIMR HvCallBase + 6 | ||
52 | #define HvCallBaseSendIPI HvCallBase + 7 | ||
53 | #define HvCallBaseTerminateMachine HvCallBase + 8 | ||
54 | #define HvCallBaseTerminateMachineSrc HvCallBase + 9 | ||
55 | #define HvCallBaseProcessPlicInterrupts HvCallBase + 10 | ||
56 | #define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11 | ||
57 | #define HvCallBaseSetVirtualSIT HvCallBase + 12 | ||
58 | #define HvCallBaseVaryOffThisProcessor HvCallBase + 13 | ||
59 | #define HvCallBaseVaryOffMemoryChunk HvCallBase + 14 | ||
60 | #define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15 | ||
61 | #define HvCallBaseSendLpProd HvCallBase + 16 | ||
62 | #define HvCallBaseSetEnabledInterrupts HvCallBase + 17 | ||
63 | #define HvCallBaseYieldProcessor HvCallBase + 18 | ||
64 | #define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19 | ||
65 | #define HvCallBaseSetVirtualDecr HvCallBase + 20 | ||
66 | #define HvCallBaseClearLogBuffer HvCallBase + 21 | ||
67 | #define HvCallBaseGetLogBufferCodePage HvCallBase + 22 | ||
68 | #define HvCallBaseGetLogBufferFormat HvCallBase + 23 | ||
69 | #define HvCallBaseGetLogBufferLength HvCallBase + 24 | ||
70 | #define HvCallBaseReadLogBuffer HvCallBase + 25 | ||
71 | #define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26 | ||
72 | #define HvCallBaseWriteLogBuffer HvCallBase + 27 | ||
73 | #define HvCallBaseRouter28 HvCallBase + 28 | ||
74 | #define HvCallBaseRouter29 HvCallBase + 29 | ||
75 | #define HvCallBaseRouter30 HvCallBase + 30 | ||
76 | #define HvCallBaseSetDebugBus HvCallBase + 31 | ||
77 | |||
78 | #define HvCallCcSetDABR HvCallCc + 7 | ||
79 | |||
80 | static inline void HvCall_setVirtualDecr(void) | ||
81 | { | ||
82 | /* | ||
83 | * Ignore any error return codes - most likely means that the | ||
84 | * target value for the LP has been increased and this vary off | ||
85 | * would bring us below the new target. | ||
86 | */ | ||
87 | HvCall0(HvCallBaseSetVirtualDecr); | ||
88 | } | ||
89 | |||
90 | static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm) | ||
91 | { | ||
92 | HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm); | ||
93 | } | ||
94 | |||
95 | static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts) | ||
96 | { | ||
97 | HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts); | ||
98 | } | ||
99 | |||
100 | static inline void HvCall_setLogBufferFormatAndCodepage(int format, | ||
101 | u32 codePage) | ||
102 | { | ||
103 | HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage); | ||
104 | } | ||
105 | |||
106 | extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen); | ||
107 | |||
108 | static inline void HvCall_sendIPI(struct paca_struct *targetPaca) | ||
109 | { | ||
110 | HvCall1(HvCallBaseSendIPI, targetPaca->paca_index); | ||
111 | } | ||
112 | |||
113 | #endif /* _HVCALL_H */ | ||
diff --git a/include/asm-ppc64/iSeries/HvCallEvent.h b/include/asm-ppc64/iSeries/HvCallEvent.h deleted file mode 100644 index 5d9a327d0122..000000000000 --- a/include/asm-ppc64/iSeries/HvCallEvent.h +++ /dev/null | |||
@@ -1,253 +0,0 @@ | |||
1 | /* | ||
2 | * HvCallEvent.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | /* | ||
20 | * This file contains the "hypervisor call" interface which is used to | ||
21 | * drive the hypervisor from the OS. | ||
22 | */ | ||
23 | #ifndef _HVCALLEVENT_H | ||
24 | #define _HVCALLEVENT_H | ||
25 | |||
26 | #include <asm/iSeries/HvCallSc.h> | ||
27 | #include <asm/iSeries/HvTypes.h> | ||
28 | #include <asm/abs_addr.h> | ||
29 | |||
30 | struct HvLpEvent; | ||
31 | |||
32 | typedef u8 HvLpEvent_Type; | ||
33 | typedef u8 HvLpEvent_AckInd; | ||
34 | typedef u8 HvLpEvent_AckType; | ||
35 | |||
36 | struct HvCallEvent_PackedParms { | ||
37 | u8 xAckType:1; | ||
38 | u8 xAckInd:1; | ||
39 | u8 xRsvd:1; | ||
40 | u8 xTargetLp:5; | ||
41 | u8 xType; | ||
42 | u16 xSubtype; | ||
43 | HvLpInstanceId xSourceInstId; | ||
44 | HvLpInstanceId xTargetInstId; | ||
45 | }; | ||
46 | |||
47 | typedef u8 HvLpDma_Direction; | ||
48 | typedef u8 HvLpDma_AddressType; | ||
49 | |||
50 | struct HvCallEvent_PackedDmaParms { | ||
51 | u8 xDirection:1; | ||
52 | u8 xLocalAddrType:1; | ||
53 | u8 xRemoteAddrType:1; | ||
54 | u8 xRsvd1:5; | ||
55 | HvLpIndex xRemoteLp; | ||
56 | u8 xType; | ||
57 | u8 xRsvd2; | ||
58 | HvLpInstanceId xLocalInstId; | ||
59 | HvLpInstanceId xRemoteInstId; | ||
60 | }; | ||
61 | |||
62 | typedef u64 HvLpEvent_Rc; | ||
63 | typedef u64 HvLpDma_Rc; | ||
64 | |||
65 | #define HvCallEventAckLpEvent HvCallEvent + 0 | ||
66 | #define HvCallEventCancelLpEvent HvCallEvent + 1 | ||
67 | #define HvCallEventCloseLpEventPath HvCallEvent + 2 | ||
68 | #define HvCallEventDmaBufList HvCallEvent + 3 | ||
69 | #define HvCallEventDmaSingle HvCallEvent + 4 | ||
70 | #define HvCallEventDmaToSp HvCallEvent + 5 | ||
71 | #define HvCallEventGetOverflowLpEvents HvCallEvent + 6 | ||
72 | #define HvCallEventGetSourceLpInstanceId HvCallEvent + 7 | ||
73 | #define HvCallEventGetTargetLpInstanceId HvCallEvent + 8 | ||
74 | #define HvCallEventOpenLpEventPath HvCallEvent + 9 | ||
75 | #define HvCallEventSetLpEventStack HvCallEvent + 10 | ||
76 | #define HvCallEventSignalLpEvent HvCallEvent + 11 | ||
77 | #define HvCallEventSignalLpEventParms HvCallEvent + 12 | ||
78 | #define HvCallEventSetInterLpQueueIndex HvCallEvent + 13 | ||
79 | #define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14 | ||
80 | #define HvCallEventRouter15 HvCallEvent + 15 | ||
81 | |||
82 | static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex) | ||
83 | { | ||
84 | HvCall1(HvCallEventGetOverflowLpEvents, queueIndex); | ||
85 | } | ||
86 | |||
87 | static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex) | ||
88 | { | ||
89 | HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex); | ||
90 | } | ||
91 | |||
92 | static inline void HvCallEvent_setLpEventStack(u8 queueIndex, | ||
93 | char *eventStackAddr, u32 eventStackSize) | ||
94 | { | ||
95 | u64 abs_addr; | ||
96 | |||
97 | abs_addr = virt_to_abs(eventStackAddr); | ||
98 | HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr, | ||
99 | eventStackSize); | ||
100 | } | ||
101 | |||
102 | static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex, | ||
103 | u16 lpLogicalProcIndex) | ||
104 | { | ||
105 | HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex, | ||
106 | lpLogicalProcIndex); | ||
107 | } | ||
108 | |||
109 | static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event) | ||
110 | { | ||
111 | u64 abs_addr; | ||
112 | |||
113 | #ifdef DEBUG_SENDEVENT | ||
114 | printk("HvCallEvent_signalLpEvent: *event = %016lx\n ", | ||
115 | (unsigned long)event); | ||
116 | #endif | ||
117 | abs_addr = virt_to_abs(event); | ||
118 | return HvCall1(HvCallEventSignalLpEvent, abs_addr); | ||
119 | } | ||
120 | |||
121 | static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp, | ||
122 | HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd, | ||
123 | HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId, | ||
124 | HvLpInstanceId targetInstanceId, u64 correlationToken, | ||
125 | u64 eventData1, u64 eventData2, u64 eventData3, | ||
126 | u64 eventData4, u64 eventData5) | ||
127 | { | ||
128 | /* Pack the misc bits into a single Dword to pass to PLIC */ | ||
129 | union { | ||
130 | struct HvCallEvent_PackedParms parms; | ||
131 | u64 dword; | ||
132 | } packed; | ||
133 | packed.parms.xAckType = ackType; | ||
134 | packed.parms.xAckInd = ackInd; | ||
135 | packed.parms.xRsvd = 0; | ||
136 | packed.parms.xTargetLp = targetLp; | ||
137 | packed.parms.xType = type; | ||
138 | packed.parms.xSubtype = subtype; | ||
139 | packed.parms.xSourceInstId = sourceInstanceId; | ||
140 | packed.parms.xTargetInstId = targetInstanceId; | ||
141 | |||
142 | return HvCall7(HvCallEventSignalLpEventParms, packed.dword, | ||
143 | correlationToken, eventData1, eventData2, | ||
144 | eventData3, eventData4, eventData5); | ||
145 | } | ||
146 | |||
147 | static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event) | ||
148 | { | ||
149 | u64 abs_addr; | ||
150 | |||
151 | abs_addr = virt_to_abs(event); | ||
152 | return HvCall1(HvCallEventAckLpEvent, abs_addr); | ||
153 | } | ||
154 | |||
155 | static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event) | ||
156 | { | ||
157 | u64 abs_addr; | ||
158 | |||
159 | abs_addr = virt_to_abs(event); | ||
160 | return HvCall1(HvCallEventCancelLpEvent, abs_addr); | ||
161 | } | ||
162 | |||
163 | static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId( | ||
164 | HvLpIndex targetLp, HvLpEvent_Type type) | ||
165 | { | ||
166 | return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type); | ||
167 | } | ||
168 | |||
169 | static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId( | ||
170 | HvLpIndex targetLp, HvLpEvent_Type type) | ||
171 | { | ||
172 | return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type); | ||
173 | } | ||
174 | |||
175 | static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp, | ||
176 | HvLpEvent_Type type) | ||
177 | { | ||
178 | HvCall2(HvCallEventOpenLpEventPath, targetLp, type); | ||
179 | } | ||
180 | |||
181 | static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp, | ||
182 | HvLpEvent_Type type) | ||
183 | { | ||
184 | HvCall2(HvCallEventCloseLpEventPath, targetLp, type); | ||
185 | } | ||
186 | |||
187 | static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type, | ||
188 | HvLpIndex remoteLp, HvLpDma_Direction direction, | ||
189 | HvLpInstanceId localInstanceId, | ||
190 | HvLpInstanceId remoteInstanceId, | ||
191 | HvLpDma_AddressType localAddressType, | ||
192 | HvLpDma_AddressType remoteAddressType, | ||
193 | /* Do these need to be converted to absolute addresses? */ | ||
194 | u64 localBufList, u64 remoteBufList, u32 transferLength) | ||
195 | { | ||
196 | /* Pack the misc bits into a single Dword to pass to PLIC */ | ||
197 | union { | ||
198 | struct HvCallEvent_PackedDmaParms parms; | ||
199 | u64 dword; | ||
200 | } packed; | ||
201 | |||
202 | packed.parms.xDirection = direction; | ||
203 | packed.parms.xLocalAddrType = localAddressType; | ||
204 | packed.parms.xRemoteAddrType = remoteAddressType; | ||
205 | packed.parms.xRsvd1 = 0; | ||
206 | packed.parms.xRemoteLp = remoteLp; | ||
207 | packed.parms.xType = type; | ||
208 | packed.parms.xRsvd2 = 0; | ||
209 | packed.parms.xLocalInstId = localInstanceId; | ||
210 | packed.parms.xRemoteInstId = remoteInstanceId; | ||
211 | |||
212 | return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList, | ||
213 | remoteBufList, transferLength); | ||
214 | } | ||
215 | |||
216 | static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type, | ||
217 | HvLpIndex remoteLp, HvLpDma_Direction direction, | ||
218 | HvLpInstanceId localInstanceId, | ||
219 | HvLpInstanceId remoteInstanceId, | ||
220 | HvLpDma_AddressType localAddressType, | ||
221 | HvLpDma_AddressType remoteAddressType, | ||
222 | u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength) | ||
223 | { | ||
224 | /* Pack the misc bits into a single Dword to pass to PLIC */ | ||
225 | union { | ||
226 | struct HvCallEvent_PackedDmaParms parms; | ||
227 | u64 dword; | ||
228 | } packed; | ||
229 | |||
230 | packed.parms.xDirection = direction; | ||
231 | packed.parms.xLocalAddrType = localAddressType; | ||
232 | packed.parms.xRemoteAddrType = remoteAddressType; | ||
233 | packed.parms.xRsvd1 = 0; | ||
234 | packed.parms.xRemoteLp = remoteLp; | ||
235 | packed.parms.xType = type; | ||
236 | packed.parms.xRsvd2 = 0; | ||
237 | packed.parms.xLocalInstId = localInstanceId; | ||
238 | packed.parms.xRemoteInstId = remoteInstanceId; | ||
239 | |||
240 | return (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, packed.dword, | ||
241 | localAddrOrTce, remoteAddrOrTce, transferLength); | ||
242 | } | ||
243 | |||
244 | static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote, | ||
245 | u32 length, HvLpDma_Direction dir) | ||
246 | { | ||
247 | u64 abs_addr; | ||
248 | |||
249 | abs_addr = virt_to_abs(local); | ||
250 | return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir); | ||
251 | } | ||
252 | |||
253 | #endif /* _HVCALLEVENT_H */ | ||
diff --git a/include/asm-ppc64/iSeries/HvCallSc.h b/include/asm-ppc64/iSeries/HvCallSc.h deleted file mode 100644 index a62cef3822f9..000000000000 --- a/include/asm-ppc64/iSeries/HvCallSc.h +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | /* | ||
2 | * HvCallSc.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | #ifndef _HVCALLSC_H | ||
20 | #define _HVCALLSC_H | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | |||
24 | #define HvCallBase 0x8000000000000000ul | ||
25 | #define HvCallCc 0x8001000000000000ul | ||
26 | #define HvCallCfg 0x8002000000000000ul | ||
27 | #define HvCallEvent 0x8003000000000000ul | ||
28 | #define HvCallHpt 0x8004000000000000ul | ||
29 | #define HvCallPci 0x8005000000000000ul | ||
30 | #define HvCallSm 0x8007000000000000ul | ||
31 | #define HvCallXm 0x8009000000000000ul | ||
32 | |||
33 | extern u64 HvCall0(u64); | ||
34 | extern u64 HvCall1(u64, u64); | ||
35 | extern u64 HvCall2(u64, u64, u64); | ||
36 | extern u64 HvCall3(u64, u64, u64, u64); | ||
37 | extern u64 HvCall4(u64, u64, u64, u64, u64); | ||
38 | extern u64 HvCall5(u64, u64, u64, u64, u64, u64); | ||
39 | extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64); | ||
40 | extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64); | ||
41 | |||
42 | extern u64 HvCall0Ret16(u64, void *); | ||
43 | extern u64 HvCall1Ret16(u64, void *, u64); | ||
44 | extern u64 HvCall2Ret16(u64, void *, u64, u64); | ||
45 | extern u64 HvCall3Ret16(u64, void *, u64, u64, u64); | ||
46 | extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64); | ||
47 | extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64); | ||
48 | extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64); | ||
49 | extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64); | ||
50 | |||
51 | #endif /* _HVCALLSC_H */ | ||
diff --git a/include/asm-ppc64/iSeries/HvCallXm.h b/include/asm-ppc64/iSeries/HvCallXm.h deleted file mode 100644 index 8b9ba608daaf..000000000000 --- a/include/asm-ppc64/iSeries/HvCallXm.h +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | /* | ||
2 | * This file contains the "hypervisor call" interface which is used to | ||
3 | * drive the hypervisor from SLIC. | ||
4 | */ | ||
5 | #ifndef _HVCALLXM_H | ||
6 | #define _HVCALLXM_H | ||
7 | |||
8 | #include <asm/iSeries/HvCallSc.h> | ||
9 | #include <asm/iSeries/HvTypes.h> | ||
10 | |||
11 | #define HvCallXmGetTceTableParms HvCallXm + 0 | ||
12 | #define HvCallXmTestBus HvCallXm + 1 | ||
13 | #define HvCallXmConnectBusUnit HvCallXm + 2 | ||
14 | #define HvCallXmLoadTod HvCallXm + 8 | ||
15 | #define HvCallXmTestBusUnit HvCallXm + 9 | ||
16 | #define HvCallXmSetTce HvCallXm + 11 | ||
17 | #define HvCallXmSetTces HvCallXm + 13 | ||
18 | |||
19 | /* | ||
20 | * Structure passed to HvCallXm_getTceTableParms | ||
21 | */ | ||
22 | struct iommu_table_cb { | ||
23 | unsigned long itc_busno; /* Bus number for this tce table */ | ||
24 | unsigned long itc_start; /* Will be NULL for secondary */ | ||
25 | unsigned long itc_totalsize; /* Size (in pages) of whole table */ | ||
26 | unsigned long itc_offset; /* Index into real tce table of the | ||
27 | start of our section */ | ||
28 | unsigned long itc_size; /* Size (in pages) of our section */ | ||
29 | unsigned long itc_index; /* Index of this tce table */ | ||
30 | unsigned short itc_maxtables; /* Max num of tables for partition */ | ||
31 | unsigned char itc_virtbus; /* Flag to indicate virtual bus */ | ||
32 | unsigned char itc_slotno; /* IOA Tce Slot Index */ | ||
33 | unsigned char itc_rsvd[4]; | ||
34 | }; | ||
35 | |||
36 | static inline void HvCallXm_getTceTableParms(u64 cb) | ||
37 | { | ||
38 | HvCall1(HvCallXmGetTceTableParms, cb); | ||
39 | } | ||
40 | |||
41 | static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce) | ||
42 | { | ||
43 | return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce); | ||
44 | } | ||
45 | |||
46 | static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset, | ||
47 | u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4) | ||
48 | { | ||
49 | return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces, | ||
50 | tce1, tce2, tce3, tce4); | ||
51 | } | ||
52 | |||
53 | static inline u64 HvCallXm_testBus(u16 busNumber) | ||
54 | { | ||
55 | return HvCall1(HvCallXmTestBus, busNumber); | ||
56 | } | ||
57 | |||
58 | static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber, | ||
59 | u8 deviceId) | ||
60 | { | ||
61 | return HvCall2(HvCallXmTestBusUnit, busNumber, | ||
62 | (subBusNumber << 8) | deviceId); | ||
63 | } | ||
64 | |||
65 | static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber, | ||
66 | u8 deviceId, u64 interruptToken) | ||
67 | { | ||
68 | return HvCall5(HvCallXmConnectBusUnit, busNumber, | ||
69 | (subBusNumber << 8) | deviceId, interruptToken, 0, | ||
70 | 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */); | ||
71 | } | ||
72 | |||
73 | static inline u64 HvCallXm_loadTod(void) | ||
74 | { | ||
75 | return HvCall0(HvCallXmLoadTod); | ||
76 | } | ||
77 | |||
78 | #endif /* _HVCALLXM_H */ | ||
diff --git a/include/asm-ppc64/iSeries/HvLpConfig.h b/include/asm-ppc64/iSeries/HvLpConfig.h deleted file mode 100644 index f1cf1e70ca3c..000000000000 --- a/include/asm-ppc64/iSeries/HvLpConfig.h +++ /dev/null | |||
@@ -1,138 +0,0 @@ | |||
1 | /* | ||
2 | * HvLpConfig.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | #ifndef _HVLPCONFIG_H | ||
20 | #define _HVLPCONFIG_H | ||
21 | |||
22 | /* | ||
23 | * This file contains the interface to the LPAR configuration data | ||
24 | * to determine which resources should be allocated to each partition. | ||
25 | */ | ||
26 | |||
27 | #include <asm/iSeries/HvCallSc.h> | ||
28 | #include <asm/iSeries/HvTypes.h> | ||
29 | #include <asm/iSeries/ItLpNaca.h> | ||
30 | |||
31 | enum { | ||
32 | HvCallCfg_Cur = 0, | ||
33 | HvCallCfg_Init = 1, | ||
34 | HvCallCfg_Max = 2, | ||
35 | HvCallCfg_Min = 3 | ||
36 | }; | ||
37 | |||
38 | #define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6 | ||
39 | #define HvCallCfgGetPhysicalProcessors HvCallCfg + 7 | ||
40 | #define HvCallCfgGetMsChunks HvCallCfg + 9 | ||
41 | #define HvCallCfgGetSharedPoolIndex HvCallCfg + 20 | ||
42 | #define HvCallCfgGetSharedProcUnits HvCallCfg + 21 | ||
43 | #define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22 | ||
44 | #define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30 | ||
45 | #define HvCallCfgGetHostingLpIndex HvCallCfg + 32 | ||
46 | |||
47 | extern HvLpIndex HvLpConfig_getLpIndex_outline(void); | ||
48 | |||
49 | static inline HvLpIndex HvLpConfig_getLpIndex(void) | ||
50 | { | ||
51 | return itLpNaca.xLpIndex; | ||
52 | } | ||
53 | |||
54 | static inline HvLpIndex HvLpConfig_getPrimaryLpIndex(void) | ||
55 | { | ||
56 | return itLpNaca.xPrimaryLpIndex; | ||
57 | } | ||
58 | |||
59 | static inline u64 HvLpConfig_getMsChunks(void) | ||
60 | { | ||
61 | return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(), | ||
62 | HvCallCfg_Cur); | ||
63 | } | ||
64 | |||
65 | static inline u64 HvLpConfig_getSystemPhysicalProcessors(void) | ||
66 | { | ||
67 | return HvCall0(HvCallCfgGetSystemPhysicalProcessors); | ||
68 | } | ||
69 | |||
70 | static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI) | ||
71 | { | ||
72 | return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI); | ||
73 | } | ||
74 | |||
75 | static inline u64 HvLpConfig_getPhysicalProcessors(void) | ||
76 | { | ||
77 | return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(), | ||
78 | HvCallCfg_Cur); | ||
79 | } | ||
80 | |||
81 | static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void) | ||
82 | { | ||
83 | return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex()); | ||
84 | } | ||
85 | |||
86 | static inline u64 HvLpConfig_getSharedProcUnits(void) | ||
87 | { | ||
88 | return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(), | ||
89 | HvCallCfg_Cur); | ||
90 | } | ||
91 | |||
92 | static inline u64 HvLpConfig_getMaxSharedProcUnits(void) | ||
93 | { | ||
94 | return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(), | ||
95 | HvCallCfg_Max); | ||
96 | } | ||
97 | |||
98 | static inline u64 HvLpConfig_getMaxPhysicalProcessors(void) | ||
99 | { | ||
100 | return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(), | ||
101 | HvCallCfg_Max); | ||
102 | } | ||
103 | |||
104 | static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp( | ||
105 | HvLpIndex lp) | ||
106 | { | ||
107 | /* | ||
108 | * This is a new function in V5R1 so calls to this on older | ||
109 | * hypervisors will return -1 | ||
110 | */ | ||
111 | u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp); | ||
112 | if (retVal == -1) | ||
113 | retVal = 0; | ||
114 | return retVal; | ||
115 | } | ||
116 | |||
117 | static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void) | ||
118 | { | ||
119 | return HvLpConfig_getVirtualLanIndexMapForLp( | ||
120 | HvLpConfig_getLpIndex_outline()); | ||
121 | } | ||
122 | |||
123 | static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1, | ||
124 | HvLpIndex lp2) | ||
125 | { | ||
126 | HvLpVirtualLanIndexMap virtualLanIndexMap1 = | ||
127 | HvLpConfig_getVirtualLanIndexMapForLp(lp1); | ||
128 | HvLpVirtualLanIndexMap virtualLanIndexMap2 = | ||
129 | HvLpConfig_getVirtualLanIndexMapForLp(lp2); | ||
130 | return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0); | ||
131 | } | ||
132 | |||
133 | static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp) | ||
134 | { | ||
135 | return HvCall1(HvCallCfgGetHostingLpIndex, lp); | ||
136 | } | ||
137 | |||
138 | #endif /* _HVLPCONFIG_H */ | ||
diff --git a/include/asm-ppc64/iSeries/HvLpEvent.h b/include/asm-ppc64/iSeries/HvLpEvent.h deleted file mode 100644 index 865000de79b6..000000000000 --- a/include/asm-ppc64/iSeries/HvLpEvent.h +++ /dev/null | |||
@@ -1,142 +0,0 @@ | |||
1 | /* | ||
2 | * HvLpEvent.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | /* This file contains the class for HV events in the system. */ | ||
21 | |||
22 | #ifndef _HVLPEVENT_H | ||
23 | #define _HVLPEVENT_H | ||
24 | |||
25 | #include <asm/types.h> | ||
26 | #include <asm/ptrace.h> | ||
27 | #include <asm/iSeries/HvTypes.h> | ||
28 | #include <asm/iSeries/HvCallEvent.h> | ||
29 | |||
30 | /* | ||
31 | * HvLpEvent is the structure for Lp Event messages passed between | ||
32 | * partitions through PLIC. | ||
33 | */ | ||
34 | |||
35 | struct HvEventFlags { | ||
36 | u8 xValid:1; /* Indicates a valid request x00-x00 */ | ||
37 | u8 xRsvd1:4; /* Reserved ... */ | ||
38 | u8 xAckType:1; /* Immediate or deferred ... */ | ||
39 | u8 xAckInd:1; /* Indicates if ACK required ... */ | ||
40 | u8 xFunction:1; /* Interrupt or Acknowledge ... */ | ||
41 | }; | ||
42 | |||
43 | |||
44 | struct HvLpEvent { | ||
45 | struct HvEventFlags xFlags; /* Event flags x00-x00 */ | ||
46 | u8 xType; /* Type of message x01-x01 */ | ||
47 | u16 xSubtype; /* Subtype for event x02-x03 */ | ||
48 | u8 xSourceLp; /* Source LP x04-x04 */ | ||
49 | u8 xTargetLp; /* Target LP x05-x05 */ | ||
50 | u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */ | ||
51 | u8 xRc; /* RC for Ack flows x07-x07 */ | ||
52 | u16 xSourceInstanceId; /* Source sides instance id x08-x09 */ | ||
53 | u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */ | ||
54 | union { | ||
55 | u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */ | ||
56 | u16 xSubtypeDataShort[2]; /* Data as 2 shorts */ | ||
57 | u8 xSubtypeDataChar[4]; /* Data as 4 chars */ | ||
58 | } x; | ||
59 | |||
60 | u64 xCorrelationToken; /* Unique value for source/type x10-x17 */ | ||
61 | }; | ||
62 | |||
63 | typedef void (*LpEventHandler)(struct HvLpEvent *, struct pt_regs *); | ||
64 | |||
65 | /* Register a handler for an event type - returns 0 on success */ | ||
66 | extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType, | ||
67 | LpEventHandler hdlr); | ||
68 | |||
69 | /* | ||
70 | * Unregister a handler for an event type | ||
71 | * | ||
72 | * This call will sleep until the handler being removed is guaranteed to | ||
73 | * be no longer executing on any CPU. Do not call with locks held. | ||
74 | * | ||
75 | * returns 0 on success | ||
76 | * Unregister will fail if there are any paths open for the type | ||
77 | */ | ||
78 | extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType); | ||
79 | |||
80 | /* | ||
81 | * Open an Lp Event Path for an event type | ||
82 | * returns 0 on success | ||
83 | * openPath will fail if there is no handler registered for the event type. | ||
84 | * The lpIndex specified is the partition index for the target partition | ||
85 | * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero) | ||
86 | */ | ||
87 | extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex); | ||
88 | |||
89 | /* | ||
90 | * Close an Lp Event Path for a type and partition | ||
91 | * returns 0 on sucess | ||
92 | */ | ||
93 | extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex); | ||
94 | |||
95 | #define HvLpEvent_Type_Hypervisor 0 | ||
96 | #define HvLpEvent_Type_MachineFac 1 | ||
97 | #define HvLpEvent_Type_SessionMgr 2 | ||
98 | #define HvLpEvent_Type_SpdIo 3 | ||
99 | #define HvLpEvent_Type_VirtualBus 4 | ||
100 | #define HvLpEvent_Type_PciIo 5 | ||
101 | #define HvLpEvent_Type_RioIo 6 | ||
102 | #define HvLpEvent_Type_VirtualLan 7 | ||
103 | #define HvLpEvent_Type_VirtualIo 8 | ||
104 | #define HvLpEvent_Type_NumTypes 9 | ||
105 | |||
106 | #define HvLpEvent_Rc_Good 0 | ||
107 | #define HvLpEvent_Rc_BufferNotAvailable 1 | ||
108 | #define HvLpEvent_Rc_Cancelled 2 | ||
109 | #define HvLpEvent_Rc_GenericError 3 | ||
110 | #define HvLpEvent_Rc_InvalidAddress 4 | ||
111 | #define HvLpEvent_Rc_InvalidPartition 5 | ||
112 | #define HvLpEvent_Rc_InvalidSize 6 | ||
113 | #define HvLpEvent_Rc_InvalidSubtype 7 | ||
114 | #define HvLpEvent_Rc_InvalidSubtypeData 8 | ||
115 | #define HvLpEvent_Rc_InvalidType 9 | ||
116 | #define HvLpEvent_Rc_PartitionDead 10 | ||
117 | #define HvLpEvent_Rc_PathClosed 11 | ||
118 | #define HvLpEvent_Rc_SubtypeError 12 | ||
119 | |||
120 | #define HvLpEvent_Function_Ack 0 | ||
121 | #define HvLpEvent_Function_Int 1 | ||
122 | |||
123 | #define HvLpEvent_AckInd_NoAck 0 | ||
124 | #define HvLpEvent_AckInd_DoAck 1 | ||
125 | |||
126 | #define HvLpEvent_AckType_ImmediateAck 0 | ||
127 | #define HvLpEvent_AckType_DeferredAck 1 | ||
128 | |||
129 | #define HvLpDma_Direction_LocalToRemote 0 | ||
130 | #define HvLpDma_Direction_RemoteToLocal 1 | ||
131 | |||
132 | #define HvLpDma_AddressType_TceIndex 0 | ||
133 | #define HvLpDma_AddressType_RealAddress 1 | ||
134 | |||
135 | #define HvLpDma_Rc_Good 0 | ||
136 | #define HvLpDma_Rc_Error 1 | ||
137 | #define HvLpDma_Rc_PartitionDead 2 | ||
138 | #define HvLpDma_Rc_PathClosed 3 | ||
139 | #define HvLpDma_Rc_InvalidAddress 4 | ||
140 | #define HvLpDma_Rc_InvalidLength 5 | ||
141 | |||
142 | #endif /* _HVLPEVENT_H */ | ||
diff --git a/include/asm-ppc64/iSeries/HvTypes.h b/include/asm-ppc64/iSeries/HvTypes.h deleted file mode 100644 index b1ef2b4cb3e3..000000000000 --- a/include/asm-ppc64/iSeries/HvTypes.h +++ /dev/null | |||
@@ -1,113 +0,0 @@ | |||
1 | /* | ||
2 | * HvTypes.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | #ifndef _HVTYPES_H | ||
20 | #define _HVTYPES_H | ||
21 | |||
22 | /* | ||
23 | * General typedefs for the hypervisor. | ||
24 | */ | ||
25 | |||
26 | #include <asm/types.h> | ||
27 | |||
28 | typedef u8 HvLpIndex; | ||
29 | typedef u16 HvLpInstanceId; | ||
30 | typedef u64 HvLpTOD; | ||
31 | typedef u64 HvLpSystemSerialNum; | ||
32 | typedef u8 HvLpDeviceSerialNum[12]; | ||
33 | typedef u16 HvLpSanHwSet; | ||
34 | typedef u16 HvLpBus; | ||
35 | typedef u16 HvLpBoard; | ||
36 | typedef u16 HvLpCard; | ||
37 | typedef u8 HvLpDeviceType[4]; | ||
38 | typedef u8 HvLpDeviceModel[3]; | ||
39 | typedef u64 HvIoToken; | ||
40 | typedef u8 HvLpName[8]; | ||
41 | typedef u32 HvIoId; | ||
42 | typedef u64 HvRealMemoryIndex; | ||
43 | typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */ | ||
44 | typedef u16 HvLpVrmIndex; | ||
45 | typedef u32 HvXmGenerationId; | ||
46 | typedef u8 HvLpBusPool; | ||
47 | typedef u8 HvLpSharedPoolIndex; | ||
48 | typedef u16 HvLpSharedProcUnitsX100; | ||
49 | typedef u8 HvLpVirtualLanIndex; | ||
50 | typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */ | ||
51 | typedef u16 HvBusNumber; /* Hypervisor Bus Number */ | ||
52 | typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */ | ||
53 | typedef u8 HvAgentId; /* Hypervisor DevFn */ | ||
54 | |||
55 | |||
56 | #define HVMAXARCHITECTEDLPS 32 | ||
57 | #define HVMAXARCHITECTEDVIRTUALLANS 16 | ||
58 | #define HVMAXARCHITECTEDVIRTUALDISKS 32 | ||
59 | #define HVMAXARCHITECTEDVIRTUALCDROMS 8 | ||
60 | #define HVMAXARCHITECTEDVIRTUALTAPES 8 | ||
61 | #define HVCHUNKSIZE (256 * 1024) | ||
62 | #define HVPAGESIZE (4 * 1024) | ||
63 | #define HVLPMINMEGSPRIMARY 256 | ||
64 | #define HVLPMINMEGSSECONDARY 64 | ||
65 | #define HVCHUNKSPERMEG 4 | ||
66 | #define HVPAGESPERMEG 256 | ||
67 | #define HVPAGESPERCHUNK 64 | ||
68 | |||
69 | #define HvLpIndexInvalid ((HvLpIndex)0xff) | ||
70 | |||
71 | /* | ||
72 | * Enums for the sub-components under PLIC | ||
73 | * Used in HvCall and HvPrimaryCall | ||
74 | */ | ||
75 | enum { | ||
76 | HvCallCompId = 0, | ||
77 | HvCallCpuCtlsCompId = 1, | ||
78 | HvCallCfgCompId = 2, | ||
79 | HvCallEventCompId = 3, | ||
80 | HvCallHptCompId = 4, | ||
81 | HvCallPciCompId = 5, | ||
82 | HvCallSlmCompId = 6, | ||
83 | HvCallSmCompId = 7, | ||
84 | HvCallSpdCompId = 8, | ||
85 | HvCallXmCompId = 9, | ||
86 | HvCallRioCompId = 10, | ||
87 | HvCallRsvd3CompId = 11, | ||
88 | HvCallRsvd2CompId = 12, | ||
89 | HvCallRsvd1CompId = 13, | ||
90 | HvCallMaxCompId = 14, | ||
91 | HvPrimaryCallCompId = 0, | ||
92 | HvPrimaryCallCfgCompId = 1, | ||
93 | HvPrimaryCallPciCompId = 2, | ||
94 | HvPrimaryCallSmCompId = 3, | ||
95 | HvPrimaryCallSpdCompId = 4, | ||
96 | HvPrimaryCallXmCompId = 5, | ||
97 | HvPrimaryCallRioCompId = 6, | ||
98 | HvPrimaryCallRsvd7CompId = 7, | ||
99 | HvPrimaryCallRsvd6CompId = 8, | ||
100 | HvPrimaryCallRsvd5CompId = 9, | ||
101 | HvPrimaryCallRsvd4CompId = 10, | ||
102 | HvPrimaryCallRsvd3CompId = 11, | ||
103 | HvPrimaryCallRsvd2CompId = 12, | ||
104 | HvPrimaryCallRsvd1CompId = 13, | ||
105 | HvPrimaryCallMaxCompId = HvCallMaxCompId | ||
106 | }; | ||
107 | |||
108 | struct HvLpBufferList { | ||
109 | u64 addr; | ||
110 | u64 len; | ||
111 | }; | ||
112 | |||
113 | #endif /* _HVTYPES_H */ | ||
diff --git a/include/asm-ppc64/iSeries/ItExtVpdPanel.h b/include/asm-ppc64/iSeries/ItExtVpdPanel.h deleted file mode 100644 index 4c546a8802b4..000000000000 --- a/include/asm-ppc64/iSeries/ItExtVpdPanel.h +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | /* | ||
2 | * ItExtVpdPanel.h | ||
3 | * Copyright (C) 2002 Dave Boutcher IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | #ifndef _ITEXTVPDPANEL_H | ||
20 | #define _ITEXTVPDPANEL_H | ||
21 | |||
22 | /* | ||
23 | * This struct maps the panel information | ||
24 | * | ||
25 | * Warning: | ||
26 | * This data must match the architecture for the panel information | ||
27 | */ | ||
28 | |||
29 | #include <asm/types.h> | ||
30 | |||
31 | struct ItExtVpdPanel { | ||
32 | /* Definition of the Extended Vpd On Panel Data Area */ | ||
33 | char systemSerial[8]; | ||
34 | char mfgID[4]; | ||
35 | char reserved1[24]; | ||
36 | char machineType[4]; | ||
37 | char systemID[6]; | ||
38 | char somUniqueCnt[4]; | ||
39 | char serialNumberCount; | ||
40 | char reserved2[7]; | ||
41 | u16 bbu3; | ||
42 | u16 bbu2; | ||
43 | u16 bbu1; | ||
44 | char xLocationLabel[8]; | ||
45 | u8 xRsvd1[6]; | ||
46 | u16 xFrameId; | ||
47 | u8 xRsvd2[48]; | ||
48 | }; | ||
49 | |||
50 | extern struct ItExtVpdPanel xItExtVpdPanel; | ||
51 | |||
52 | #endif /* _ITEXTVPDPANEL_H */ | ||
diff --git a/include/asm-ppc64/iSeries/ItLpNaca.h b/include/asm-ppc64/iSeries/ItLpNaca.h deleted file mode 100644 index 225d0176779d..000000000000 --- a/include/asm-ppc64/iSeries/ItLpNaca.h +++ /dev/null | |||
@@ -1,80 +0,0 @@ | |||
1 | /* | ||
2 | * ItLpNaca.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | #ifndef _ITLPNACA_H | ||
20 | #define _ITLPNACA_H | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | |||
24 | /* | ||
25 | * This control block contains the data that is shared between the | ||
26 | * hypervisor (PLIC) and the OS. | ||
27 | */ | ||
28 | |||
29 | struct ItLpNaca { | ||
30 | // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data | ||
31 | u32 xDesc; // Eye catcher x00-x03 | ||
32 | u16 xSize; // Size of this class x04-x05 | ||
33 | u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07 | ||
34 | u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08 | ||
35 | u8 xPrimaryLpIndex; // LP Index of Primary x09-x09 | ||
36 | u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A | ||
37 | u8 xLpIndex; // LP Index x0B-x0B | ||
38 | u16 xMaxLpQueues; // Number of allocated queues x0C-x0D | ||
39 | u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F | ||
40 | u8 xPirEnvironMode:8; // Piranha or hardware x10-x10 | ||
41 | u8 xPirConsoleMode:8; // Piranha console indicator x11-x11 | ||
42 | u8 xPirDasdMode:8; // Piranha dasd indicator x12-x12 | ||
43 | u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17 | ||
44 | u8 xLparInstalled:1; // Is LPAR installed on system x18-x1F | ||
45 | u8 xSysPartitioned:1; // Is the system partitioned ... | ||
46 | u8 xHwSyncedTBs:1; // Hardware synced TBs ... | ||
47 | u8 xIntProcUtilHmt:1; // Utilize HMT for interrupts ... | ||
48 | u8 xRsvd1_1:4; // Reserved ... | ||
49 | u8 xSpVpdFormat:8; // VPD areas are in CSP format ... | ||
50 | u8 xIntProcRatio:8; // Ratio of int procs to procs ... | ||
51 | u8 xRsvd1_2[5]; // Reserved ... | ||
52 | u16 xRsvd1_3; // Reserved x20-x21 | ||
53 | u16 xPlicVrmIndex; // VRM index of PLIC x22-x23 | ||
54 | u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25 | ||
55 | u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27 | ||
56 | u64 xLoadAreaAddr; // ER address of load area x28-x2F | ||
57 | u32 xLoadAreaChunks; // Chunks for the load area x30-x33 | ||
58 | u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37 | ||
59 | // doing an ASR switch on PASE | ||
60 | // system call. | ||
61 | u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f | ||
62 | u8 xRsvd1_4[64]; // x40-x7F | ||
63 | |||
64 | // CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data | ||
65 | u8 xRsvd2_0[128]; // Reserved x00-x7F | ||
66 | |||
67 | // CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators | ||
68 | // NB: Padding required to keep xInterrruptHdlr at x300 which is required | ||
69 | // for v4r4 PLIC. | ||
70 | u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F | ||
71 | u8 xRsvd3_0[384]; // Reserved 180-2FF | ||
72 | |||
73 | // CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt | ||
74 | // handlers | ||
75 | u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF | ||
76 | }; | ||
77 | |||
78 | extern struct ItLpNaca itLpNaca; | ||
79 | |||
80 | #endif /* _ITLPNACA_H */ | ||
diff --git a/include/asm-ppc64/iSeries/ItLpQueue.h b/include/asm-ppc64/iSeries/ItLpQueue.h deleted file mode 100644 index 69b26ad74135..000000000000 --- a/include/asm-ppc64/iSeries/ItLpQueue.h +++ /dev/null | |||
@@ -1,81 +0,0 @@ | |||
1 | /* | ||
2 | * ItLpQueue.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | #ifndef _ITLPQUEUE_H | ||
20 | #define _ITLPQUEUE_H | ||
21 | |||
22 | /* | ||
23 | * This control block defines the simple LP queue structure that is | ||
24 | * shared between the hypervisor (PLIC) and the OS in order to send | ||
25 | * events to an LP. | ||
26 | */ | ||
27 | |||
28 | #include <asm/types.h> | ||
29 | #include <asm/ptrace.h> | ||
30 | |||
31 | struct HvLpEvent; | ||
32 | |||
33 | #define ITMaxLpQueues 8 | ||
34 | |||
35 | #define NotUsed 0 // Queue will not be used by PLIC | ||
36 | #define DedicatedIo 1 // Queue dedicated to IO processor specified | ||
37 | #define DedicatedLp 2 // Queue dedicated to LP specified | ||
38 | #define Shared 3 // Queue shared for both IO and LP | ||
39 | |||
40 | #define LpEventStackSize 4096 | ||
41 | #define LpEventMaxSize 256 | ||
42 | #define LpEventAlign 64 | ||
43 | |||
44 | struct hvlpevent_queue { | ||
45 | /* | ||
46 | * The xSlicCurEventPtr is the pointer to the next event stack entry | ||
47 | * that will become valid. The OS must peek at this entry to determine | ||
48 | * if it is valid. PLIC will set the valid indicator as the very last | ||
49 | * store into that entry. | ||
50 | * | ||
51 | * When the OS has completed processing of the event then it will mark | ||
52 | * the event as invalid so that PLIC knows it can store into that event | ||
53 | * location again. | ||
54 | * | ||
55 | * If the event stack fills and there are overflow events, then PLIC | ||
56 | * will set the xPlicOverflowIntPending flag in which case the OS will | ||
57 | * have to fetch the additional LP events once they have drained the | ||
58 | * event stack. | ||
59 | * | ||
60 | * The first 16-bytes are known by both the OS and PLIC. The remainder | ||
61 | * of the cache line is for use by the OS. | ||
62 | */ | ||
63 | u8 xPlicOverflowIntPending;// 0x00 Overflow events are pending | ||
64 | u8 xPlicStatus; // 0x01 DedicatedIo or DedicatedLp or NotUsed | ||
65 | u16 xSlicLogicalProcIndex; // 0x02 Logical Proc Index for correlation | ||
66 | u8 xPlicRsvd[12]; // 0x04 | ||
67 | char *xSlicCurEventPtr; // 0x10 | ||
68 | char *xSlicLastValidEventPtr; // 0x18 | ||
69 | char *xSlicEventStackPtr; // 0x20 | ||
70 | u8 xIndex; // 0x28 unique sequential index. | ||
71 | u8 xSlicRsvd[3]; // 0x29-2b | ||
72 | spinlock_t lock; | ||
73 | }; | ||
74 | |||
75 | extern struct hvlpevent_queue hvlpevent_queue; | ||
76 | |||
77 | extern int hvlpevent_is_pending(void); | ||
78 | extern void process_hvlpevents(struct pt_regs *); | ||
79 | extern void setup_hvlpevent_queue(void); | ||
80 | |||
81 | #endif /* _ITLPQUEUE_H */ | ||
diff --git a/include/asm-ppc64/iSeries/ItLpRegSave.h b/include/asm-ppc64/iSeries/ItLpRegSave.h deleted file mode 100644 index 1b3087e76205..000000000000 --- a/include/asm-ppc64/iSeries/ItLpRegSave.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | /* | ||
2 | * ItLpRegSave.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | #ifndef _ITLPREGSAVE_H | ||
20 | #define _ITLPREGSAVE_H | ||
21 | |||
22 | /* | ||
23 | * This control block contains the data that is shared between PLIC | ||
24 | * and the OS | ||
25 | */ | ||
26 | |||
27 | struct ItLpRegSave { | ||
28 | u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003 | ||
29 | u16 xSize; // Size of this class 004-005 | ||
30 | u8 xInUse; // Area is live 006-007 | ||
31 | u8 xRsvd1[9]; // Reserved 007-00F | ||
32 | |||
33 | u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F | ||
34 | u32 xCTRL; // Control Register 170-173 | ||
35 | u32 xDEC; // Decrementer 174-177 | ||
36 | u32 xFPSCR; // FP Status and Control Reg 178-17B | ||
37 | u32 xPVR; // Processor Version Number 17C-17F | ||
38 | |||
39 | u64 xMMCR0; // Monitor Mode Control Reg 0 180-187 | ||
40 | u32 xPMC1; // Perf Monitor Counter 1 188-18B | ||
41 | u32 xPMC2; // Perf Monitor Counter 2 18C-18F | ||
42 | u32 xPMC3; // Perf Monitor Counter 3 190-193 | ||
43 | u32 xPMC4; // Perf Monitor Counter 4 194-197 | ||
44 | u32 xPIR; // Processor ID Reg 198-19B | ||
45 | |||
46 | u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F | ||
47 | u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3 | ||
48 | u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7 | ||
49 | u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB | ||
50 | u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF | ||
51 | u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3 | ||
52 | u32 xTSC; // Thread Switch Control 1B4-1B7 | ||
53 | u32 xTST; // Thread Switch Timeout 1B8-1BB | ||
54 | u32 xRsvd; // Reserved 1BC-1BF | ||
55 | |||
56 | u64 xACCR; // Address Compare Control Reg 1C0-1C7 | ||
57 | u64 xIMR; // Instruction Match Register 1C8-1CF | ||
58 | u64 xSDR1; // Storage Description Reg 1 1D0-1D7 | ||
59 | u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF | ||
60 | u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7 | ||
61 | u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF | ||
62 | u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7 | ||
63 | u64 xTB; // Time Base Register 1F8-1FF | ||
64 | |||
65 | u64 xFPR[32]; // Floating Point Registers 200-2FF | ||
66 | |||
67 | u64 xMSR; // Machine State Register 300-307 | ||
68 | u64 xNIA; // Next Instruction Address 308-30F | ||
69 | |||
70 | u64 xDABR; // Data Address Breakpoint Reg 310-317 | ||
71 | u64 xIABR; // Inst Address Breakpoint Reg 318-31F | ||
72 | |||
73 | u64 xHID0; // HW Implementation Dependent0 320-327 | ||
74 | |||
75 | u64 xHID4; // HW Implementation Dependent4 328-32F | ||
76 | u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337 | ||
77 | u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F | ||
78 | u64 xSDAR; // Sample Data Address Register 340-347 | ||
79 | u64 xSIAR; // Sample Inst Address Register 348-34F | ||
80 | |||
81 | u8 xRsvd3[176]; // Reserved 350-3FF | ||
82 | }; | ||
83 | |||
84 | #endif /* _ITLPREGSAVE_H */ | ||
diff --git a/include/asm-ppc64/iSeries/LparMap.h b/include/asm-ppc64/iSeries/LparMap.h deleted file mode 100644 index a6840b186d03..000000000000 --- a/include/asm-ppc64/iSeries/LparMap.h +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | /* | ||
2 | * LparMap.h | ||
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | #ifndef _LPARMAP_H | ||
20 | #define _LPARMAP_H | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
24 | #include <asm/types.h> | ||
25 | |||
26 | /* | ||
27 | * The iSeries hypervisor will set up mapping for one or more | ||
28 | * ESID/VSID pairs (in SLB/segment registers) and will set up | ||
29 | * mappings of one or more ranges of pages to VAs. | ||
30 | * We will have the hypervisor set up the ESID->VSID mapping | ||
31 | * for the four kernel segments (C-F). With shared processors, | ||
32 | * the hypervisor will clear all segment registers and reload | ||
33 | * these four whenever the processor is switched from one | ||
34 | * partition to another. | ||
35 | */ | ||
36 | |||
37 | /* The Vsid and Esid identified below will be used by the hypervisor | ||
38 | * to set up a memory mapping for part of the load area before giving | ||
39 | * control to the Linux kernel. The load area is 64 MB, but this must | ||
40 | * not attempt to map the whole load area. The Hashed Page Table may | ||
41 | * need to be located within the load area (if the total partition size | ||
42 | * is 64 MB), but cannot be mapped. Typically, this should specify | ||
43 | * to map half (32 MB) of the load area. | ||
44 | * | ||
45 | * The hypervisor will set up page table entries for the number of | ||
46 | * pages specified. | ||
47 | * | ||
48 | * In 32-bit mode, the hypervisor will load all four of the | ||
49 | * segment registers (identified by the low-order four bits of the | ||
50 | * Esid field. In 64-bit mode, the hypervisor will load one SLB | ||
51 | * entry to map the Esid to the Vsid. | ||
52 | */ | ||
53 | |||
54 | #define HvEsidsToMap 2 | ||
55 | #define HvRangesToMap 1 | ||
56 | |||
57 | /* Hypervisor initially maps 32MB of the load area */ | ||
58 | #define HvPagesToMap 8192 | ||
59 | |||
60 | struct LparMap { | ||
61 | u64 xNumberEsids; // Number of ESID/VSID pairs | ||
62 | u64 xNumberRanges; // Number of VA ranges to map | ||
63 | u64 xSegmentTableOffs; // Page number within load area of seg table | ||
64 | u64 xRsvd[5]; | ||
65 | struct { | ||
66 | u64 xKernelEsid; // Esid used to map kernel load | ||
67 | u64 xKernelVsid; // Vsid used to map kernel load | ||
68 | } xEsids[HvEsidsToMap]; | ||
69 | struct { | ||
70 | u64 xPages; // Number of pages to be mapped | ||
71 | u64 xOffset; // Offset from start of load area | ||
72 | u64 xVPN; // Virtual Page Number | ||
73 | } xRanges[HvRangesToMap]; | ||
74 | }; | ||
75 | |||
76 | extern const struct LparMap xLparMap; | ||
77 | |||
78 | #endif /* __ASSEMBLY__ */ | ||
79 | |||
80 | /* the fixed address where the LparMap exists */ | ||
81 | #define LPARMAP_PHYS 0x7000 | ||
82 | |||
83 | #endif /* _LPARMAP_H */ | ||
diff --git a/include/asm-ppc64/iSeries/iSeries_io.h b/include/asm-ppc64/iSeries/iSeries_io.h deleted file mode 100644 index 9f79413342b3..000000000000 --- a/include/asm-ppc64/iSeries/iSeries_io.h +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | #ifndef _ISERIES_IO_H | ||
2 | #define _ISERIES_IO_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | |||
6 | #ifdef CONFIG_PPC_ISERIES | ||
7 | #include <linux/types.h> | ||
8 | /* | ||
9 | * File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. | ||
10 | * | ||
11 | * Remaps the io.h for the iSeries Io | ||
12 | * Copyright (C) 2000 Allan H Trautman, IBM Corporation | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the: | ||
26 | * Free Software Foundation, Inc., | ||
27 | * 59 Temple Place, Suite 330, | ||
28 | * Boston, MA 02111-1307 USA | ||
29 | * | ||
30 | * Change Activity: | ||
31 | * Created December 28, 2000 | ||
32 | * End Change Activity | ||
33 | */ | ||
34 | |||
35 | extern u8 iSeries_Read_Byte(const volatile void __iomem * IoAddress); | ||
36 | extern u16 iSeries_Read_Word(const volatile void __iomem * IoAddress); | ||
37 | extern u32 iSeries_Read_Long(const volatile void __iomem * IoAddress); | ||
38 | extern void iSeries_Write_Byte(u8 IoData, volatile void __iomem * IoAddress); | ||
39 | extern void iSeries_Write_Word(u16 IoData, volatile void __iomem * IoAddress); | ||
40 | extern void iSeries_Write_Long(u32 IoData, volatile void __iomem * IoAddress); | ||
41 | |||
42 | extern void iSeries_memset_io(volatile void __iomem *dest, char x, size_t n); | ||
43 | extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, | ||
44 | size_t n); | ||
45 | extern void iSeries_memcpy_fromio(void *dest, | ||
46 | const volatile void __iomem *source, size_t n); | ||
47 | |||
48 | #endif /* CONFIG_PPC_ISERIES */ | ||
49 | #endif /* _ISERIES_IO_H */ | ||
diff --git a/include/asm-ppc64/iSeries/mf.h b/include/asm-ppc64/iSeries/mf.h deleted file mode 100644 index 7e6a0d936999..000000000000 --- a/include/asm-ppc64/iSeries/mf.h +++ /dev/null | |||
@@ -1,57 +0,0 @@ | |||
1 | /* | ||
2 | * mf.h | ||
3 | * Copyright (C) 2001 Troy D. Armstrong IBM Corporation | ||
4 | * Copyright (C) 2004 Stephen Rothwell IBM Corporation | ||
5 | * | ||
6 | * This modules exists as an interface between a Linux secondary partition | ||
7 | * running on an iSeries and the primary partition's Virtual Service | ||
8 | * Processor (VSP) object. The VSP has final authority over powering on/off | ||
9 | * all partitions in the iSeries. It also provides miscellaneous low-level | ||
10 | * machine facility type operations. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
25 | */ | ||
26 | #ifndef _ASM_PPC64_ISERIES_MF_H | ||
27 | #define _ASM_PPC64_ISERIES_MF_H | ||
28 | |||
29 | #include <linux/types.h> | ||
30 | |||
31 | #include <asm/iSeries/HvTypes.h> | ||
32 | #include <asm/iSeries/HvCallEvent.h> | ||
33 | |||
34 | struct rtc_time; | ||
35 | |||
36 | typedef void (*MFCompleteHandler)(void *clientToken, int returnCode); | ||
37 | |||
38 | extern void mf_allocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type, | ||
39 | unsigned size, unsigned amount, MFCompleteHandler hdlr, | ||
40 | void *userToken); | ||
41 | extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type, | ||
42 | unsigned count, MFCompleteHandler hdlr, void *userToken); | ||
43 | |||
44 | extern void mf_power_off(void); | ||
45 | extern void mf_reboot(void); | ||
46 | |||
47 | extern void mf_display_src(u32 word); | ||
48 | extern void mf_display_progress(u16 value); | ||
49 | extern void mf_clear_src(void); | ||
50 | |||
51 | extern void mf_init(void); | ||
52 | |||
53 | extern int mf_get_rtc(struct rtc_time *tm); | ||
54 | extern int mf_get_boot_rtc(struct rtc_time *tm); | ||
55 | extern int mf_set_rtc(struct rtc_time *tm); | ||
56 | |||
57 | #endif /* _ASM_PPC64_ISERIES_MF_H */ | ||
diff --git a/include/asm-ppc64/iSeries/vio.h b/include/asm-ppc64/iSeries/vio.h deleted file mode 100644 index 6c05e6257f53..000000000000 --- a/include/asm-ppc64/iSeries/vio.h +++ /dev/null | |||
@@ -1,130 +0,0 @@ | |||
1 | /* -*- linux-c -*- | ||
2 | * drivers/char/vio.h | ||
3 | * | ||
4 | * iSeries Virtual I/O Message Path header | ||
5 | * | ||
6 | * Authors: Dave Boutcher <boutcher@us.ibm.com> | ||
7 | * Ryan Arnold <ryanarn@us.ibm.com> | ||
8 | * Colin Devilbiss <devilbis@us.ibm.com> | ||
9 | * | ||
10 | * (C) Copyright 2000 IBM Corporation | ||
11 | * | ||
12 | * This header file is used by the iSeries virtual I/O device | ||
13 | * drivers. It defines the interfaces to the common functions | ||
14 | * (implemented in drivers/char/viopath.h) as well as defining | ||
15 | * common functions and structures. Currently (at the time I | ||
16 | * wrote this comment) the iSeries virtual I/O device drivers | ||
17 | * that use this are | ||
18 | * drivers/block/viodasd.c | ||
19 | * drivers/char/viocons.c | ||
20 | * drivers/char/viotape.c | ||
21 | * drivers/cdrom/viocd.c | ||
22 | * | ||
23 | * The iSeries virtual ethernet support (veth.c) uses a whole | ||
24 | * different set of functions. | ||
25 | * | ||
26 | * This program is free software; you can redistribute it and/or | ||
27 | * modify it under the terms of the GNU General Public License as | ||
28 | * published by the Free Software Foundation; either version 2 of the | ||
29 | * License, or (at your option) anyu later version. | ||
30 | * | ||
31 | * This program is distributed in the hope that it will be useful, but | ||
32 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
33 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
34 | * General Public License for more details. | ||
35 | * | ||
36 | * You should have received a copy of the GNU General Public License | ||
37 | * along with this program; if not, write to the Free Software Foundation, | ||
38 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
39 | * | ||
40 | */ | ||
41 | #ifndef _ISERIES_VIO_H | ||
42 | #define _ISERIES_VIO_H | ||
43 | |||
44 | #include <asm/iSeries/HvTypes.h> | ||
45 | #include <asm/iSeries/HvLpEvent.h> | ||
46 | |||
47 | /* | ||
48 | * iSeries virtual I/O events use the subtype field in | ||
49 | * HvLpEvent to figure out what kind of vio event is coming | ||
50 | * in. We use a table to route these, and this defines | ||
51 | * the maximum number of distinct subtypes | ||
52 | */ | ||
53 | #define VIO_MAX_SUBTYPES 8 | ||
54 | |||
55 | /* | ||
56 | * Each subtype can register a handler to process their events. | ||
57 | * The handler must have this interface. | ||
58 | */ | ||
59 | typedef void (vio_event_handler_t) (struct HvLpEvent * event); | ||
60 | |||
61 | extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq); | ||
62 | extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq); | ||
63 | extern int vio_setHandler(int subtype, vio_event_handler_t * beh); | ||
64 | extern int vio_clearHandler(int subtype); | ||
65 | extern int viopath_isactive(HvLpIndex lp); | ||
66 | extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp); | ||
67 | extern HvLpInstanceId viopath_targetinst(HvLpIndex lp); | ||
68 | extern void vio_set_hostlp(void); | ||
69 | extern void *vio_get_event_buffer(int subtype); | ||
70 | extern void vio_free_event_buffer(int subtype, void *buffer); | ||
71 | |||
72 | extern HvLpIndex viopath_hostLp; | ||
73 | extern HvLpIndex viopath_ourLp; | ||
74 | |||
75 | #define VIOCHAR_MAX_DATA 200 | ||
76 | |||
77 | #define VIOMAJOR_SUBTYPE_MASK 0xff00 | ||
78 | #define VIOMINOR_SUBTYPE_MASK 0x00ff | ||
79 | #define VIOMAJOR_SUBTYPE_SHIFT 8 | ||
80 | |||
81 | #define VIOVERSION 0x0101 | ||
82 | |||
83 | /* | ||
84 | * This is the general structure for VIO errors; each module should have | ||
85 | * a table of them, and each table should be terminated by an entry of | ||
86 | * { 0, 0, NULL }. Then, to find a specific error message, a module | ||
87 | * should pass its local table and the return code. | ||
88 | */ | ||
89 | struct vio_error_entry { | ||
90 | u16 rc; | ||
91 | int errno; | ||
92 | const char *msg; | ||
93 | }; | ||
94 | extern const struct vio_error_entry *vio_lookup_rc( | ||
95 | const struct vio_error_entry *local_table, u16 rc); | ||
96 | |||
97 | enum viosubtypes { | ||
98 | viomajorsubtype_monitor = 0x0100, | ||
99 | viomajorsubtype_blockio = 0x0200, | ||
100 | viomajorsubtype_chario = 0x0300, | ||
101 | viomajorsubtype_config = 0x0400, | ||
102 | viomajorsubtype_cdio = 0x0500, | ||
103 | viomajorsubtype_tape = 0x0600, | ||
104 | viomajorsubtype_scsi = 0x0700 | ||
105 | }; | ||
106 | |||
107 | enum vioconfigsubtype { | ||
108 | vioconfigget = 0x0001, | ||
109 | }; | ||
110 | |||
111 | enum viorc { | ||
112 | viorc_good = 0x0000, | ||
113 | viorc_noConnection = 0x0001, | ||
114 | viorc_noReceiver = 0x0002, | ||
115 | viorc_noBufferAvailable = 0x0003, | ||
116 | viorc_invalidMessageType = 0x0004, | ||
117 | viorc_invalidRange = 0x0201, | ||
118 | viorc_invalidToken = 0x0202, | ||
119 | viorc_DMAError = 0x0203, | ||
120 | viorc_useError = 0x0204, | ||
121 | viorc_releaseError = 0x0205, | ||
122 | viorc_invalidDisk = 0x0206, | ||
123 | viorc_openRejected = 0x0301 | ||
124 | }; | ||
125 | |||
126 | struct device; | ||
127 | |||
128 | extern struct device *iSeries_vio_dev; | ||
129 | |||
130 | #endif /* _ISERIES_VIO_H */ | ||
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h index bd7c9532d77b..77fc07c3c6bd 100644 --- a/include/asm-ppc64/io.h +++ b/include/asm-ppc64/io.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <asm/page.h> | 13 | #include <asm/page.h> |
14 | #include <asm/byteorder.h> | 14 | #include <asm/byteorder.h> |
15 | #ifdef CONFIG_PPC_ISERIES | 15 | #ifdef CONFIG_PPC_ISERIES |
16 | #include <asm/iSeries/iSeries_io.h> | 16 | #include <asm/iseries/iseries_io.h> |
17 | #endif | 17 | #endif |
18 | #include <asm/synch.h> | 18 | #include <asm/synch.h> |
19 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
diff --git a/include/asm-ppc64/ipcbuf.h b/include/asm-ppc64/ipcbuf.h deleted file mode 100644 index fa393c8342af..000000000000 --- a/include/asm-ppc64/ipcbuf.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | #ifndef __PPC64_IPCBUF_H__ | ||
2 | #define __PPC64_IPCBUF_H__ | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for the PPC is identical to kern_ipc_perm | ||
6 | * as we have always had 32-bit UIDs and GIDs in the kernel. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | struct ipc64_perm | ||
15 | { | ||
16 | __kernel_key_t key; | ||
17 | __kernel_uid_t uid; | ||
18 | __kernel_gid_t gid; | ||
19 | __kernel_uid_t cuid; | ||
20 | __kernel_gid_t cgid; | ||
21 | __kernel_mode_t mode; | ||
22 | unsigned int seq; | ||
23 | unsigned int __pad1; | ||
24 | unsigned long __unused1; | ||
25 | unsigned long __unused2; | ||
26 | }; | ||
27 | |||
28 | #endif /* __PPC64_IPCBUF_H__ */ | ||
diff --git a/include/asm-ppc64/kexec.h b/include/asm-ppc64/kexec.h deleted file mode 100644 index 511908afaeeb..000000000000 --- a/include/asm-ppc64/kexec.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | #ifndef _PPC64_KEXEC_H | ||
2 | #define _PPC64_KEXEC_H | ||
3 | |||
4 | /* | ||
5 | * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. | ||
6 | * I.e. Maximum page that is mapped directly into kernel memory, | ||
7 | * and kmap is not required. | ||
8 | */ | ||
9 | |||
10 | /* Maximum physical address we can use pages from */ | ||
11 | /* XXX: since we copy virt we can use any page we allocate */ | ||
12 | #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) | ||
13 | |||
14 | /* Maximum address we can reach in physical address mode */ | ||
15 | /* XXX: I want to allow initrd in highmem. otherwise set to rmo on lpar */ | ||
16 | #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) | ||
17 | |||
18 | /* Maximum address we can use for the control code buffer */ | ||
19 | /* XXX: unused today, ppc32 uses TASK_SIZE, probably left over from use_mm */ | ||
20 | #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) | ||
21 | |||
22 | /* XXX: today we don't use this at all, althogh we have a static stack */ | ||
23 | #define KEXEC_CONTROL_CODE_SIZE 4096 | ||
24 | |||
25 | /* The native architecture */ | ||
26 | #define KEXEC_ARCH KEXEC_ARCH_PPC64 | ||
27 | |||
28 | #define MAX_NOTE_BYTES 1024 | ||
29 | |||
30 | #ifndef __ASSEMBLY__ | ||
31 | |||
32 | typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; | ||
33 | |||
34 | extern note_buf_t crash_notes[]; | ||
35 | |||
36 | extern void kexec_smp_wait(void); /* get and clear naca physid, wait for | ||
37 | master to copy new code to 0 */ | ||
38 | |||
39 | #endif /* __ASSEMBLY__ */ | ||
40 | #endif /* _PPC_KEXEC_H */ | ||
41 | |||
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index e0505acb77d9..4c18a5cb69f5 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h | |||
@@ -48,13 +48,21 @@ extern char initial_stab[]; | |||
48 | 48 | ||
49 | /* Bits in the SLB VSID word */ | 49 | /* Bits in the SLB VSID word */ |
50 | #define SLB_VSID_SHIFT 12 | 50 | #define SLB_VSID_SHIFT 12 |
51 | #define SLB_VSID_B ASM_CONST(0xc000000000000000) | ||
52 | #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) | ||
53 | #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) | ||
51 | #define SLB_VSID_KS ASM_CONST(0x0000000000000800) | 54 | #define SLB_VSID_KS ASM_CONST(0x0000000000000800) |
52 | #define SLB_VSID_KP ASM_CONST(0x0000000000000400) | 55 | #define SLB_VSID_KP ASM_CONST(0x0000000000000400) |
53 | #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ | 56 | #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ |
54 | #define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage */ | 57 | #define SLB_VSID_L ASM_CONST(0x0000000000000100) |
55 | #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ | 58 | #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ |
56 | #define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */ | 59 | #define SLB_VSID_LP ASM_CONST(0x0000000000000030) |
57 | 60 | #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) | |
61 | #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) | ||
62 | #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) | ||
63 | #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) | ||
64 | #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) | ||
65 | |||
58 | #define SLB_VSID_KERNEL (SLB_VSID_KP) | 66 | #define SLB_VSID_KERNEL (SLB_VSID_KP) |
59 | #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) | 67 | #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) |
60 | 68 | ||
@@ -69,6 +77,7 @@ extern char initial_stab[]; | |||
69 | #define HPTE_V_AVPN_SHIFT 7 | 77 | #define HPTE_V_AVPN_SHIFT 7 |
70 | #define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) | 78 | #define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) |
71 | #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) | 79 | #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) |
80 | #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) | ||
72 | #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) | 81 | #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) |
73 | #define HPTE_V_LOCK ASM_CONST(0x0000000000000008) | 82 | #define HPTE_V_LOCK ASM_CONST(0x0000000000000008) |
74 | #define HPTE_V_LARGE ASM_CONST(0x0000000000000004) | 83 | #define HPTE_V_LARGE ASM_CONST(0x0000000000000004) |
@@ -81,6 +90,7 @@ extern char initial_stab[]; | |||
81 | #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) | 90 | #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) |
82 | #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) | 91 | #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) |
83 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) | 92 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) |
93 | #define HPTE_R_N ASM_CONST(0x0000000000000004) | ||
84 | 94 | ||
85 | /* Values for PP (assumes Ks=0, Kp=1) */ | 95 | /* Values for PP (assumes Ks=0, Kp=1) */ |
86 | /* pp0 will always be 0 for linux */ | 96 | /* pp0 will always be 0 for linux */ |
@@ -99,100 +109,120 @@ typedef struct { | |||
99 | extern hpte_t *htab_address; | 109 | extern hpte_t *htab_address; |
100 | extern unsigned long htab_hash_mask; | 110 | extern unsigned long htab_hash_mask; |
101 | 111 | ||
102 | static inline unsigned long hpt_hash(unsigned long vpn, int large) | 112 | /* |
113 | * Page size definition | ||
114 | * | ||
115 | * shift : is the "PAGE_SHIFT" value for that page size | ||
116 | * sllp : is a bit mask with the value of SLB L || LP to be or'ed | ||
117 | * directly to a slbmte "vsid" value | ||
118 | * penc : is the HPTE encoding mask for the "LP" field: | ||
119 | * | ||
120 | */ | ||
121 | struct mmu_psize_def | ||
103 | { | 122 | { |
104 | unsigned long vsid; | 123 | unsigned int shift; /* number of bits */ |
105 | unsigned long page; | 124 | unsigned int penc; /* HPTE encoding */ |
106 | 125 | unsigned int tlbiel; /* tlbiel supported for that page size */ | |
107 | if (large) { | 126 | unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ |
108 | vsid = vpn >> 4; | 127 | unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ |
109 | page = vpn & 0xf; | 128 | }; |
110 | } else { | ||
111 | vsid = vpn >> 16; | ||
112 | page = vpn & 0xffff; | ||
113 | } | ||
114 | 129 | ||
115 | return (vsid & 0x7fffffffffUL) ^ page; | 130 | #endif /* __ASSEMBLY__ */ |
116 | } | ||
117 | |||
118 | static inline void __tlbie(unsigned long va, int large) | ||
119 | { | ||
120 | /* clear top 16 bits, non SLS segment */ | ||
121 | va &= ~(0xffffULL << 48); | ||
122 | |||
123 | if (large) { | ||
124 | va &= HPAGE_MASK; | ||
125 | asm volatile("tlbie %0,1" : : "r"(va) : "memory"); | ||
126 | } else { | ||
127 | va &= PAGE_MASK; | ||
128 | asm volatile("tlbie %0,0" : : "r"(va) : "memory"); | ||
129 | } | ||
130 | } | ||
131 | 131 | ||
132 | static inline void tlbie(unsigned long va, int large) | 132 | /* |
133 | { | 133 | * The kernel use the constants below to index in the page sizes array. |
134 | asm volatile("ptesync": : :"memory"); | 134 | * The use of fixed constants for this purpose is better for performances |
135 | __tlbie(va, large); | 135 | * of the low level hash refill handlers. |
136 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); | 136 | * |
137 | } | 137 | * A non supported page size has a "shift" field set to 0 |
138 | * | ||
139 | * Any new page size being implemented can get a new entry in here. Whether | ||
140 | * the kernel will use it or not is a different matter though. The actual page | ||
141 | * size used by hugetlbfs is not defined here and may be made variable | ||
142 | */ | ||
138 | 143 | ||
139 | static inline void __tlbiel(unsigned long va) | 144 | #define MMU_PAGE_4K 0 /* 4K */ |
140 | { | 145 | #define MMU_PAGE_64K 1 /* 64K */ |
141 | /* clear top 16 bits, non SLS segment */ | 146 | #define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ |
142 | va &= ~(0xffffULL << 48); | 147 | #define MMU_PAGE_1M 3 /* 1M */ |
143 | va &= PAGE_MASK; | 148 | #define MMU_PAGE_16M 4 /* 16M */ |
144 | 149 | #define MMU_PAGE_16G 5 /* 16G */ | |
145 | /* | 150 | #define MMU_PAGE_COUNT 6 |
146 | * Thanks to Alan Modra we are now able to use machine specific | ||
147 | * assembly instructions (like tlbiel) by using the gas -many flag. | ||
148 | * However we have to support older toolchains so for the moment | ||
149 | * we hardwire it. | ||
150 | */ | ||
151 | #if 0 | ||
152 | asm volatile("tlbiel %0" : : "r"(va) : "memory"); | ||
153 | #else | ||
154 | asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory"); | ||
155 | #endif | ||
156 | } | ||
157 | 151 | ||
158 | static inline void tlbiel(unsigned long va) | 152 | #ifndef __ASSEMBLY__ |
159 | { | ||
160 | asm volatile("ptesync": : :"memory"); | ||
161 | __tlbiel(va); | ||
162 | asm volatile("ptesync": : :"memory"); | ||
163 | } | ||
164 | 153 | ||
165 | static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot) | 154 | /* |
166 | { | 155 | * The current system page sizes |
167 | unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v); | 156 | */ |
168 | unsigned long va; | 157 | extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; |
158 | extern int mmu_linear_psize; | ||
159 | extern int mmu_virtual_psize; | ||
169 | 160 | ||
170 | va = avpn << 23; | 161 | #ifdef CONFIG_HUGETLB_PAGE |
162 | /* | ||
163 | * The page size index of the huge pages for use by hugetlbfs | ||
164 | */ | ||
165 | extern int mmu_huge_psize; | ||
171 | 166 | ||
172 | if (! (hpte_v & HPTE_V_LARGE)) { | 167 | #endif /* CONFIG_HUGETLB_PAGE */ |
173 | unsigned long vpi, pteg; | ||
174 | 168 | ||
175 | pteg = slot / HPTES_PER_GROUP; | 169 | /* |
176 | if (hpte_v & HPTE_V_SECONDARY) | 170 | * This function sets the AVPN and L fields of the HPTE appropriately |
177 | pteg = ~pteg; | 171 | * for the page size |
172 | */ | ||
173 | static inline unsigned long hpte_encode_v(unsigned long va, int psize) | ||
174 | { | ||
175 | unsigned long v = | ||
176 | v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); | ||
177 | v <<= HPTE_V_AVPN_SHIFT; | ||
178 | if (psize != MMU_PAGE_4K) | ||
179 | v |= HPTE_V_LARGE; | ||
180 | return v; | ||
181 | } | ||
178 | 182 | ||
179 | vpi = ((va >> 28) ^ pteg) & htab_hash_mask; | 183 | /* |
184 | * This function sets the ARPN, and LP fields of the HPTE appropriately | ||
185 | * for the page size. We assume the pa is already "clean" that is properly | ||
186 | * aligned for the requested page size | ||
187 | */ | ||
188 | static inline unsigned long hpte_encode_r(unsigned long pa, int psize) | ||
189 | { | ||
190 | unsigned long r; | ||
180 | 191 | ||
181 | va |= vpi << PAGE_SHIFT; | 192 | /* A 4K page needs no special encoding */ |
193 | if (psize == MMU_PAGE_4K) | ||
194 | return pa & HPTE_R_RPN; | ||
195 | else { | ||
196 | unsigned int penc = mmu_psize_defs[psize].penc; | ||
197 | unsigned int shift = mmu_psize_defs[psize].shift; | ||
198 | return (pa & ~((1ul << shift) - 1)) | (penc << 12); | ||
182 | } | 199 | } |
183 | 200 | return r; | |
184 | return va; | ||
185 | } | 201 | } |
186 | 202 | ||
187 | /* | 203 | /* |
188 | * Handle a fault by adding an HPTE. If the address can't be determined | 204 | * This hashes a virtual address for a 256Mb segment only for now |
189 | * to be valid via Linux page tables, return 1. If handled return 0 | ||
190 | */ | 205 | */ |
191 | extern int __hash_page(unsigned long ea, unsigned long access, | 206 | |
192 | unsigned long vsid, pte_t *ptep, unsigned long trap, | 207 | static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) |
193 | int local); | 208 | { |
209 | return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); | ||
210 | } | ||
211 | |||
212 | extern int __hash_page_4K(unsigned long ea, unsigned long access, | ||
213 | unsigned long vsid, pte_t *ptep, unsigned long trap, | ||
214 | unsigned int local); | ||
215 | extern int __hash_page_64K(unsigned long ea, unsigned long access, | ||
216 | unsigned long vsid, pte_t *ptep, unsigned long trap, | ||
217 | unsigned int local); | ||
218 | struct mm_struct; | ||
219 | extern int hash_huge_page(struct mm_struct *mm, unsigned long access, | ||
220 | unsigned long ea, unsigned long vsid, int local); | ||
194 | 221 | ||
195 | extern void htab_finish_init(void); | 222 | extern void htab_finish_init(void); |
223 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | ||
224 | unsigned long pstart, unsigned long mode, | ||
225 | int psize); | ||
196 | 226 | ||
197 | extern void hpte_init_native(void); | 227 | extern void hpte_init_native(void); |
198 | extern void hpte_init_lpar(void); | 228 | extern void hpte_init_lpar(void); |
@@ -200,17 +230,21 @@ extern void hpte_init_iSeries(void); | |||
200 | 230 | ||
201 | extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, | 231 | extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
202 | unsigned long va, unsigned long prpn, | 232 | unsigned long va, unsigned long prpn, |
203 | unsigned long vflags, | 233 | unsigned long rflags, |
204 | unsigned long rflags); | 234 | unsigned long vflags, int psize); |
205 | extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, | 235 | |
206 | unsigned long prpn, | 236 | extern long native_hpte_insert(unsigned long hpte_group, |
207 | unsigned long vflags, unsigned long rflags); | 237 | unsigned long va, unsigned long prpn, |
238 | unsigned long rflags, | ||
239 | unsigned long vflags, int psize); | ||
208 | 240 | ||
209 | extern long iSeries_hpte_bolt_or_insert(unsigned long hpte_group, | 241 | extern long iSeries_hpte_insert(unsigned long hpte_group, |
210 | unsigned long va, unsigned long prpn, | 242 | unsigned long va, unsigned long prpn, |
211 | unsigned long vflags, unsigned long rflags); | 243 | unsigned long rflags, |
244 | unsigned long vflags, int psize); | ||
212 | 245 | ||
213 | extern void stabs_alloc(void); | 246 | extern void stabs_alloc(void); |
247 | extern void slb_initialize(void); | ||
214 | 248 | ||
215 | #endif /* __ASSEMBLY__ */ | 249 | #endif /* __ASSEMBLY__ */ |
216 | 250 | ||
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h index 77a743402db4..4f512e9fa6b8 100644 --- a/include/asm-ppc64/mmu_context.h +++ b/include/asm-ppc64/mmu_context.h | |||
@@ -17,22 +17,15 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * Every architecture must define this function. It's the fastest | 20 | * Getting into a kernel thread, there is no valid user segment, mark |
21 | * way of searching a 140-bit bitmap where the first 100 bits are | 21 | * paca->pgdir NULL so that SLB miss on user addresses will fault |
22 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
23 | * bits is cleared. | ||
24 | */ | 22 | */ |
25 | static inline int sched_find_first_bit(unsigned long *b) | 23 | static inline void enter_lazy_tlb(struct mm_struct *mm, |
26 | { | 24 | struct task_struct *tsk) |
27 | if (unlikely(b[0])) | ||
28 | return __ffs(b[0]); | ||
29 | if (unlikely(b[1])) | ||
30 | return __ffs(b[1]) + 64; | ||
31 | return __ffs(b[2]) + 128; | ||
32 | } | ||
33 | |||
34 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
35 | { | 25 | { |
26 | #ifdef CONFIG_PPC_64K_PAGES | ||
27 | get_paca()->pgdir = NULL; | ||
28 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
36 | } | 29 | } |
37 | 30 | ||
38 | #define NO_CONTEXT 0 | 31 | #define NO_CONTEXT 0 |
@@ -55,8 +48,13 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
55 | cpu_set(smp_processor_id(), next->cpu_vm_mask); | 48 | cpu_set(smp_processor_id(), next->cpu_vm_mask); |
56 | 49 | ||
57 | /* No need to flush userspace segments if the mm doesnt change */ | 50 | /* No need to flush userspace segments if the mm doesnt change */ |
51 | #ifdef CONFIG_PPC_64K_PAGES | ||
52 | if (prev == next && get_paca()->pgdir == next->pgd) | ||
53 | return; | ||
54 | #else | ||
58 | if (prev == next) | 55 | if (prev == next) |
59 | return; | 56 | return; |
57 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
60 | 58 | ||
61 | #ifdef CONFIG_ALTIVEC | 59 | #ifdef CONFIG_ALTIVEC |
62 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | 60 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
diff --git a/include/asm-ppc64/naca.h b/include/asm-ppc64/naca.h deleted file mode 100644 index d2afe6447597..000000000000 --- a/include/asm-ppc64/naca.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | #ifndef _NACA_H | ||
2 | #define _NACA_H | ||
3 | |||
4 | /* | ||
5 | * c 2001 PPC 64 Team, IBM Corp | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <asm/types.h> | ||
14 | |||
15 | struct naca_struct { | ||
16 | /* Kernel only data - undefined for user space */ | ||
17 | void *xItVpdAreas; /* VPD Data 0x00 */ | ||
18 | void *xRamDisk; /* iSeries ramdisk 0x08 */ | ||
19 | u64 xRamDiskSize; /* In pages 0x10 */ | ||
20 | }; | ||
21 | |||
22 | extern struct naca_struct naca; | ||
23 | |||
24 | #endif /* _NACA_H */ | ||
diff --git a/include/asm-ppc64/numnodes.h b/include/asm-ppc64/numnodes.h deleted file mode 100644 index 75ae0b906708..000000000000 --- a/include/asm-ppc64/numnodes.h +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | #ifndef _ASM_MAX_NUMNODES_H | ||
2 | #define _ASM_MAX_NUMNODES_H | ||
3 | |||
4 | /* Max 16 Nodes */ | ||
5 | #define NODES_SHIFT 4 | ||
6 | |||
7 | #endif /* _ASM_MAX_NUMNODES_H */ | ||
diff --git a/include/asm-ppc64/nvram.h b/include/asm-ppc64/nvram.h index dfaa21566c9a..def47d720d3d 100644 --- a/include/asm-ppc64/nvram.h +++ b/include/asm-ppc64/nvram.h | |||
@@ -70,7 +70,7 @@ extern struct nvram_partition *nvram_find_partition(int sig, const char *name); | |||
70 | 70 | ||
71 | extern int pSeries_nvram_init(void); | 71 | extern int pSeries_nvram_init(void); |
72 | extern int pmac_nvram_init(void); | 72 | extern int pmac_nvram_init(void); |
73 | extern int bpa_nvram_init(void); | 73 | extern int mmio_nvram_init(void); |
74 | 74 | ||
75 | /* PowerMac specific nvram stuffs */ | 75 | /* PowerMac specific nvram stuffs */ |
76 | 76 | ||
diff --git a/include/asm-ppc64/paca.h b/include/asm-ppc64/paca.h index 2f0f36f73d38..bccacd6aa93a 100644 --- a/include/asm-ppc64/paca.h +++ b/include/asm-ppc64/paca.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/config.h> | 19 | #include <linux/config.h> |
20 | #include <asm/types.h> | 20 | #include <asm/types.h> |
21 | #include <asm/lppaca.h> | 21 | #include <asm/lppaca.h> |
22 | #include <asm/iSeries/ItLpRegSave.h> | 22 | #include <asm/iseries/it_lp_reg_save.h> |
23 | #include <asm/mmu.h> | 23 | #include <asm/mmu.h> |
24 | 24 | ||
25 | register struct paca_struct *local_paca asm("r13"); | 25 | register struct paca_struct *local_paca asm("r13"); |
@@ -72,10 +72,15 @@ struct paca_struct { | |||
72 | /* | 72 | /* |
73 | * Now, starting in cacheline 2, the exception save areas | 73 | * Now, starting in cacheline 2, the exception save areas |
74 | */ | 74 | */ |
75 | u64 exgen[8] __attribute__((aligned(0x80))); /* used for most interrupts/exceptions */ | 75 | /* used for most interrupts/exceptions */ |
76 | u64 exmc[8]; /* used for machine checks */ | 76 | u64 exgen[10] __attribute__((aligned(0x80))); |
77 | u64 exslb[8]; /* used for SLB/segment table misses | 77 | u64 exmc[10]; /* used for machine checks */ |
78 | * on the linear mapping */ | 78 | u64 exslb[10]; /* used for SLB/segment table misses |
79 | * on the linear mapping */ | ||
80 | #ifdef CONFIG_PPC_64K_PAGES | ||
81 | pgd_t *pgdir; | ||
82 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
83 | |||
79 | mm_context_t context; | 84 | mm_context_t context; |
80 | u16 slb_cache[SLB_CACHE_ENTRIES]; | 85 | u16 slb_cache[SLB_CACHE_ENTRIES]; |
81 | u16 slb_cache_ptr; | 86 | u16 slb_cache_ptr; |
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h index d404431f0a9a..82ce187e5be8 100644 --- a/include/asm-ppc64/page.h +++ b/include/asm-ppc64/page.h | |||
@@ -13,32 +13,59 @@ | |||
13 | #include <linux/config.h> | 13 | #include <linux/config.h> |
14 | #include <asm/ppc_asm.h> /* for ASM_CONST */ | 14 | #include <asm/ppc_asm.h> /* for ASM_CONST */ |
15 | 15 | ||
16 | /* PAGE_SHIFT determines the page size */ | 16 | /* |
17 | #define PAGE_SHIFT 12 | 17 | * We support either 4k or 64k software page size. When using 64k pages |
18 | #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) | 18 | * however, wether we are really supporting 64k pages in HW or not is |
19 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 19 | * irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12 |
20 | * as use of 64k pages remains a linux kernel specific, every notion of | ||
21 | * page number shared with the firmware, TCEs, iommu, etc... still assumes | ||
22 | * a page size of 4096. | ||
23 | */ | ||
24 | #ifdef CONFIG_PPC_64K_PAGES | ||
25 | #define PAGE_SHIFT 16 | ||
26 | #else | ||
27 | #define PAGE_SHIFT 12 | ||
28 | #endif | ||
20 | 29 | ||
21 | #define SID_SHIFT 28 | 30 | #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) |
22 | #define SID_MASK 0xfffffffffUL | 31 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
23 | #define ESID_MASK 0xfffffffff0000000UL | ||
24 | #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK) | ||
25 | 32 | ||
26 | #define HPAGE_SHIFT 24 | 33 | /* HW_PAGE_SHIFT is always 4k pages */ |
27 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) | 34 | #define HW_PAGE_SHIFT 12 |
28 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | 35 | #define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT) |
36 | #define HW_PAGE_MASK (~(HW_PAGE_SIZE-1)) | ||
29 | 37 | ||
30 | #ifdef CONFIG_HUGETLB_PAGE | 38 | /* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and |
39 | * HW_PAGE_SHIFT, that is 4k pages | ||
40 | */ | ||
41 | #define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT) | ||
42 | |||
43 | /* Segment size */ | ||
44 | #define SID_SHIFT 28 | ||
45 | #define SID_MASK 0xfffffffffUL | ||
46 | #define ESID_MASK 0xfffffffff0000000UL | ||
47 | #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK) | ||
31 | 48 | ||
49 | /* Large pages size */ | ||
50 | |||
51 | #ifndef __ASSEMBLY__ | ||
52 | extern unsigned int HPAGE_SHIFT; | ||
53 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) | ||
54 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
32 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 55 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
56 | #endif /* __ASSEMBLY__ */ | ||
57 | |||
58 | #ifdef CONFIG_HUGETLB_PAGE | ||
59 | |||
33 | 60 | ||
34 | #define HTLB_AREA_SHIFT 40 | 61 | #define HTLB_AREA_SHIFT 40 |
35 | #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) | 62 | #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) |
36 | #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) | 63 | #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) |
37 | 64 | ||
38 | #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ | 65 | #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ |
39 | - (1U << GET_ESID(addr))) & 0xffff) | 66 | - (1U << GET_ESID(addr))) & 0xffff) |
40 | #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ | 67 | #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ |
41 | - (1U << GET_HTLB_AREA(addr))) & 0xffff) | 68 | - (1U << GET_HTLB_AREA(addr))) & 0xffff) |
42 | 69 | ||
43 | #define ARCH_HAS_HUGEPAGE_ONLY_RANGE | 70 | #define ARCH_HAS_HUGEPAGE_ONLY_RANGE |
44 | #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE | 71 | #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE |
@@ -114,7 +141,25 @@ static __inline__ void clear_page(void *addr) | |||
114 | : "ctr", "memory"); | 141 | : "ctr", "memory"); |
115 | } | 142 | } |
116 | 143 | ||
117 | extern void copy_page(void *to, void *from); | 144 | extern void copy_4K_page(void *to, void *from); |
145 | |||
146 | #ifdef CONFIG_PPC_64K_PAGES | ||
147 | static inline void copy_page(void *to, void *from) | ||
148 | { | ||
149 | unsigned int i; | ||
150 | for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) { | ||
151 | copy_4K_page(to, from); | ||
152 | to += 4096; | ||
153 | from += 4096; | ||
154 | } | ||
155 | } | ||
156 | #else /* CONFIG_PPC_64K_PAGES */ | ||
157 | static inline void copy_page(void *to, void *from) | ||
158 | { | ||
159 | copy_4K_page(to, from); | ||
160 | } | ||
161 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
162 | |||
118 | struct page; | 163 | struct page; |
119 | extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); | 164 | extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); |
120 | extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p); | 165 | extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p); |
@@ -124,43 +169,75 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag | |||
124 | * These are used to make use of C type-checking. | 169 | * These are used to make use of C type-checking. |
125 | * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. | 170 | * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. |
126 | */ | 171 | */ |
127 | typedef struct { unsigned long pte; } pte_t; | ||
128 | typedef struct { unsigned long pmd; } pmd_t; | ||
129 | typedef struct { unsigned long pud; } pud_t; | ||
130 | typedef struct { unsigned long pgd; } pgd_t; | ||
131 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
132 | 172 | ||
173 | /* PTE level */ | ||
174 | typedef struct { unsigned long pte; } pte_t; | ||
133 | #define pte_val(x) ((x).pte) | 175 | #define pte_val(x) ((x).pte) |
134 | #define pmd_val(x) ((x).pmd) | ||
135 | #define pud_val(x) ((x).pud) | ||
136 | #define pgd_val(x) ((x).pgd) | ||
137 | #define pgprot_val(x) ((x).pgprot) | ||
138 | |||
139 | #define __pte(x) ((pte_t) { (x) }) | 176 | #define __pte(x) ((pte_t) { (x) }) |
177 | |||
178 | /* 64k pages additionally define a bigger "real PTE" type that gathers | ||
179 | * the "second half" part of the PTE for pseudo 64k pages | ||
180 | */ | ||
181 | #ifdef CONFIG_PPC_64K_PAGES | ||
182 | typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; | ||
183 | #else | ||
184 | typedef struct { pte_t pte; } real_pte_t; | ||
185 | #endif | ||
186 | |||
187 | /* PMD level */ | ||
188 | typedef struct { unsigned long pmd; } pmd_t; | ||
189 | #define pmd_val(x) ((x).pmd) | ||
140 | #define __pmd(x) ((pmd_t) { (x) }) | 190 | #define __pmd(x) ((pmd_t) { (x) }) |
191 | |||
192 | /* PUD level exusts only on 4k pages */ | ||
193 | #ifndef CONFIG_PPC_64K_PAGES | ||
194 | typedef struct { unsigned long pud; } pud_t; | ||
195 | #define pud_val(x) ((x).pud) | ||
141 | #define __pud(x) ((pud_t) { (x) }) | 196 | #define __pud(x) ((pud_t) { (x) }) |
197 | #endif | ||
198 | |||
199 | /* PGD level */ | ||
200 | typedef struct { unsigned long pgd; } pgd_t; | ||
201 | #define pgd_val(x) ((x).pgd) | ||
142 | #define __pgd(x) ((pgd_t) { (x) }) | 202 | #define __pgd(x) ((pgd_t) { (x) }) |
203 | |||
204 | /* Page protection bits */ | ||
205 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
206 | #define pgprot_val(x) ((x).pgprot) | ||
143 | #define __pgprot(x) ((pgprot_t) { (x) }) | 207 | #define __pgprot(x) ((pgprot_t) { (x) }) |
144 | 208 | ||
145 | #else | 209 | #else |
210 | |||
146 | /* | 211 | /* |
147 | * .. while these make it easier on the compiler | 212 | * .. while these make it easier on the compiler |
148 | */ | 213 | */ |
149 | typedef unsigned long pte_t; | ||
150 | typedef unsigned long pmd_t; | ||
151 | typedef unsigned long pud_t; | ||
152 | typedef unsigned long pgd_t; | ||
153 | typedef unsigned long pgprot_t; | ||
154 | 214 | ||
215 | typedef unsigned long pte_t; | ||
155 | #define pte_val(x) (x) | 216 | #define pte_val(x) (x) |
217 | #define __pte(x) (x) | ||
218 | |||
219 | #ifdef CONFIG_PPC_64K_PAGES | ||
220 | typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; | ||
221 | #else | ||
222 | typedef unsigned long real_pte_t; | ||
223 | #endif | ||
224 | |||
225 | |||
226 | typedef unsigned long pmd_t; | ||
156 | #define pmd_val(x) (x) | 227 | #define pmd_val(x) (x) |
228 | #define __pmd(x) (x) | ||
229 | |||
230 | #ifndef CONFIG_PPC_64K_PAGES | ||
231 | typedef unsigned long pud_t; | ||
157 | #define pud_val(x) (x) | 232 | #define pud_val(x) (x) |
233 | #define __pud(x) (x) | ||
234 | #endif | ||
235 | |||
236 | typedef unsigned long pgd_t; | ||
158 | #define pgd_val(x) (x) | 237 | #define pgd_val(x) (x) |
159 | #define pgprot_val(x) (x) | 238 | #define pgprot_val(x) (x) |
160 | 239 | ||
161 | #define __pte(x) (x) | 240 | typedef unsigned long pgprot_t; |
162 | #define __pmd(x) (x) | ||
163 | #define __pud(x) (x) | ||
164 | #define __pgd(x) (x) | 241 | #define __pgd(x) (x) |
165 | #define __pgprot(x) (x) | 242 | #define __pgprot(x) (x) |
166 | 243 | ||
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h index 26bc49c1108d..98da0e4262bd 100644 --- a/include/asm-ppc64/pgalloc.h +++ b/include/asm-ppc64/pgalloc.h | |||
@@ -8,10 +8,16 @@ | |||
8 | 8 | ||
9 | extern kmem_cache_t *pgtable_cache[]; | 9 | extern kmem_cache_t *pgtable_cache[]; |
10 | 10 | ||
11 | #ifdef CONFIG_PPC_64K_PAGES | ||
12 | #define PTE_CACHE_NUM 0 | ||
13 | #define PMD_CACHE_NUM 0 | ||
14 | #define PGD_CACHE_NUM 1 | ||
15 | #else | ||
11 | #define PTE_CACHE_NUM 0 | 16 | #define PTE_CACHE_NUM 0 |
12 | #define PMD_CACHE_NUM 1 | 17 | #define PMD_CACHE_NUM 1 |
13 | #define PUD_CACHE_NUM 1 | 18 | #define PUD_CACHE_NUM 1 |
14 | #define PGD_CACHE_NUM 0 | 19 | #define PGD_CACHE_NUM 0 |
20 | #endif | ||
15 | 21 | ||
16 | /* | 22 | /* |
17 | * This program is free software; you can redistribute it and/or | 23 | * This program is free software; you can redistribute it and/or |
@@ -30,6 +36,8 @@ static inline void pgd_free(pgd_t *pgd) | |||
30 | kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); | 36 | kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); |
31 | } | 37 | } |
32 | 38 | ||
39 | #ifndef CONFIG_PPC_64K_PAGES | ||
40 | |||
33 | #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) | 41 | #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) |
34 | 42 | ||
35 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 43 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
@@ -43,7 +51,30 @@ static inline void pud_free(pud_t *pud) | |||
43 | kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); | 51 | kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); |
44 | } | 52 | } |
45 | 53 | ||
46 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) | 54 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
55 | { | ||
56 | pud_set(pud, (unsigned long)pmd); | ||
57 | } | ||
58 | |||
59 | #define pmd_populate(mm, pmd, pte_page) \ | ||
60 | pmd_populate_kernel(mm, pmd, page_address(pte_page)) | ||
61 | #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) | ||
62 | |||
63 | |||
64 | #else /* CONFIG_PPC_64K_PAGES */ | ||
65 | |||
66 | #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) | ||
67 | |||
68 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, | ||
69 | pte_t *pte) | ||
70 | { | ||
71 | pmd_set(pmd, (unsigned long)pte); | ||
72 | } | ||
73 | |||
74 | #define pmd_populate(mm, pmd, pte_page) \ | ||
75 | pmd_populate_kernel(mm, pmd, page_address(pte_page)) | ||
76 | |||
77 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
47 | 78 | ||
48 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 79 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
49 | { | 80 | { |
@@ -56,17 +87,15 @@ static inline void pmd_free(pmd_t *pmd) | |||
56 | kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); | 87 | kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); |
57 | } | 88 | } |
58 | 89 | ||
59 | #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) | 90 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
60 | #define pmd_populate(mm, pmd, pte_page) \ | 91 | unsigned long address) |
61 | pmd_populate_kernel(mm, pmd, page_address(pte_page)) | ||
62 | |||
63 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
64 | { | 92 | { |
65 | return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], | 93 | return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], |
66 | GFP_KERNEL|__GFP_REPEAT); | 94 | GFP_KERNEL|__GFP_REPEAT); |
67 | } | 95 | } |
68 | 96 | ||
69 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | 97 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
98 | unsigned long address) | ||
70 | { | 99 | { |
71 | return virt_to_page(pte_alloc_one_kernel(mm, address)); | 100 | return virt_to_page(pte_alloc_one_kernel(mm, address)); |
72 | } | 101 | } |
@@ -103,7 +132,7 @@ static inline void pgtable_free(pgtable_free_t pgf) | |||
103 | kmem_cache_free(pgtable_cache[cachenum], p); | 132 | kmem_cache_free(pgtable_cache[cachenum], p); |
104 | } | 133 | } |
105 | 134 | ||
106 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); | 135 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); |
107 | 136 | ||
108 | #define __pte_free_tlb(tlb, ptepage) \ | 137 | #define __pte_free_tlb(tlb, ptepage) \ |
109 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ | 138 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ |
@@ -111,9 +140,11 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); | |||
111 | #define __pmd_free_tlb(tlb, pmd) \ | 140 | #define __pmd_free_tlb(tlb, pmd) \ |
112 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ | 141 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ |
113 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) | 142 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) |
143 | #ifndef CONFIG_PPC_64K_PAGES | ||
114 | #define __pud_free_tlb(tlb, pmd) \ | 144 | #define __pud_free_tlb(tlb, pmd) \ |
115 | pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ | 145 | pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ |
116 | PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) | 146 | PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) |
147 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
117 | 148 | ||
118 | #define check_pgt_cache() do { } while (0) | 149 | #define check_pgt_cache() do { } while (0) |
119 | 150 | ||
diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-ppc64/pgtable-4k.h new file mode 100644 index 000000000000..e9590c06ad92 --- /dev/null +++ b/include/asm-ppc64/pgtable-4k.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Entries per page directory level. The PTE level must use a 64b record | ||
3 | * for each page table entry. The PMD and PGD level use a 32b record for | ||
4 | * each entry by assuming that each entry is page aligned. | ||
5 | */ | ||
6 | #define PTE_INDEX_SIZE 9 | ||
7 | #define PMD_INDEX_SIZE 7 | ||
8 | #define PUD_INDEX_SIZE 7 | ||
9 | #define PGD_INDEX_SIZE 9 | ||
10 | |||
11 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | ||
12 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
13 | #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) | ||
14 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
15 | |||
16 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
17 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | ||
18 | #define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) | ||
19 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
20 | |||
21 | /* PMD_SHIFT determines what a second-level page table entry can map */ | ||
22 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | ||
23 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
24 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
25 | |||
26 | /* With 4k base page size, hugepage PTEs go at the PMD level */ | ||
27 | #define MIN_HUGEPTE_SHIFT PMD_SHIFT | ||
28 | |||
29 | /* PUD_SHIFT determines what a third-level page table entry can map */ | ||
30 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | ||
31 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
32 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
33 | |||
34 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | ||
35 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) | ||
36 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
37 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
38 | |||
39 | /* PTE bits */ | ||
40 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ | ||
41 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ | ||
42 | #define _PAGE_F_SECOND _PAGE_SECONDARY | ||
43 | #define _PAGE_F_GIX _PAGE_GROUP_IX | ||
44 | |||
45 | /* PTE flags to conserve for HPTE identification */ | ||
46 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ | ||
47 | _PAGE_SECONDARY | _PAGE_GROUP_IX) | ||
48 | |||
49 | /* PAGE_MASK gives the right answer below, but only by accident */ | ||
50 | /* It should be preserving the high 48 bits and then specifically */ | ||
51 | /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ | ||
52 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ | ||
53 | _PAGE_HPTEFLAGS) | ||
54 | |||
55 | /* Bits to mask out from a PMD to get to the PTE page */ | ||
56 | #define PMD_MASKED_BITS 0 | ||
57 | /* Bits to mask out from a PUD to get to the PMD page */ | ||
58 | #define PUD_MASKED_BITS 0 | ||
59 | /* Bits to mask out from a PGD to get to the PUD page */ | ||
60 | #define PGD_MASKED_BITS 0 | ||
61 | |||
62 | /* shift to put page number into pte */ | ||
63 | #define PTE_RPN_SHIFT (17) | ||
64 | |||
65 | #define __real_pte(e,p) ((real_pte_t)(e)) | ||
66 | #define __rpte_to_pte(r) (r) | ||
67 | #define __rpte_to_hidx(r,index) (pte_val((r)) >> 12) | ||
68 | |||
69 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | ||
70 | do { \ | ||
71 | index = 0; \ | ||
72 | shift = mmu_psize_defs[psize].shift; \ | ||
73 | |||
74 | #define pte_iterate_hashed_end() } while(0) | ||
75 | |||
76 | /* | ||
77 | * 4-level page tables related bits | ||
78 | */ | ||
79 | |||
80 | #define pgd_none(pgd) (!pgd_val(pgd)) | ||
81 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) | ||
82 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | ||
83 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) | ||
84 | #define pgd_page(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) | ||
85 | |||
86 | #define pud_offset(pgdp, addr) \ | ||
87 | (((pud_t *) pgd_page(*(pgdp))) + \ | ||
88 | (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | ||
89 | |||
90 | #define pud_ERROR(e) \ | ||
91 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e)) | ||
diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-ppc64/pgtable-64k.h new file mode 100644 index 000000000000..154f1840ece4 --- /dev/null +++ b/include/asm-ppc64/pgtable-64k.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #include <asm-generic/pgtable-nopud.h> | ||
2 | |||
3 | |||
4 | #define PTE_INDEX_SIZE 12 | ||
5 | #define PMD_INDEX_SIZE 12 | ||
6 | #define PUD_INDEX_SIZE 0 | ||
7 | #define PGD_INDEX_SIZE 4 | ||
8 | |||
9 | #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) | ||
10 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
11 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
12 | |||
13 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
14 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | ||
15 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
16 | |||
17 | /* With 4k base page size, hugepage PTEs go at the PMD level */ | ||
18 | #define MIN_HUGEPTE_SHIFT PAGE_SHIFT | ||
19 | |||
20 | /* PMD_SHIFT determines what a second-level page table entry can map */ | ||
21 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | ||
22 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
23 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
24 | |||
25 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | ||
26 | #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | ||
27 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
28 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
29 | |||
30 | /* Additional PTE bits (don't change without checking asm in hash_low.S) */ | ||
31 | #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ | ||
32 | #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ | ||
33 | #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ | ||
34 | #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ | ||
35 | #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ | ||
36 | |||
37 | /* PTE flags to conserve for HPTE identification */ | ||
38 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\ | ||
39 | _PAGE_COMBO) | ||
40 | |||
41 | /* Shift to put page number into pte. | ||
42 | * | ||
43 | * That gives us a max RPN of 32 bits, which means a max of 48 bits | ||
44 | * of addressable physical space. | ||
45 | * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but | ||
46 | * 32 makes PTEs more readable for debugging for now :) | ||
47 | */ | ||
48 | #define PTE_RPN_SHIFT (32) | ||
49 | #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) | ||
50 | #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) | ||
51 | |||
52 | /* _PAGE_CHG_MASK masks of bits that are to be preserved accross | ||
53 | * pgprot changes | ||
54 | */ | ||
55 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | ||
56 | _PAGE_ACCESSED) | ||
57 | |||
58 | /* Bits to mask out from a PMD to get to the PTE page */ | ||
59 | #define PMD_MASKED_BITS 0x1ff | ||
60 | /* Bits to mask out from a PGD/PUD to get to the PMD page */ | ||
61 | #define PUD_MASKED_BITS 0x1ff | ||
62 | |||
63 | #ifndef __ASSEMBLY__ | ||
64 | |||
65 | /* Manipulate "rpte" values */ | ||
66 | #define __real_pte(e,p) ((real_pte_t) { \ | ||
67 | (e), pte_val(*((p) + PTRS_PER_PTE)) }) | ||
68 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ | ||
69 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) | ||
70 | #define __rpte_to_pte(r) ((r).pte) | ||
71 | #define __rpte_sub_valid(rpte, index) \ | ||
72 | (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) | ||
73 | |||
74 | |||
75 | /* Trick: we set __end to va + 64k, which happens works for | ||
76 | * a 16M page as well as we want only one iteration | ||
77 | */ | ||
78 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | ||
79 | do { \ | ||
80 | unsigned long __end = va + PAGE_SIZE; \ | ||
81 | unsigned __split = (psize == MMU_PAGE_4K || \ | ||
82 | psize == MMU_PAGE_64K_AP); \ | ||
83 | shift = mmu_psize_defs[psize].shift; \ | ||
84 | for (index = 0; va < __end; index++, va += (1 << shift)) { \ | ||
85 | if (!__split || __rpte_sub_valid(rpte, index)) do { \ | ||
86 | |||
87 | #define pte_iterate_hashed_end() } while(0); } } while(0) | ||
88 | |||
89 | |||
90 | #endif /* __ASSEMBLY__ */ | ||
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index 8c3f574046b6..a9783ba7fe98 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h | |||
@@ -13,42 +13,14 @@ | |||
13 | #include <asm/mmu.h> | 13 | #include <asm/mmu.h> |
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | struct mm_struct; | ||
16 | #endif /* __ASSEMBLY__ */ | 17 | #endif /* __ASSEMBLY__ */ |
17 | 18 | ||
18 | /* | 19 | #ifdef CONFIG_PPC_64K_PAGES |
19 | * Entries per page directory level. The PTE level must use a 64b record | 20 | #include <asm/pgtable-64k.h> |
20 | * for each page table entry. The PMD and PGD level use a 32b record for | 21 | #else |
21 | * each entry by assuming that each entry is page aligned. | 22 | #include <asm/pgtable-4k.h> |
22 | */ | 23 | #endif |
23 | #define PTE_INDEX_SIZE 9 | ||
24 | #define PMD_INDEX_SIZE 7 | ||
25 | #define PUD_INDEX_SIZE 7 | ||
26 | #define PGD_INDEX_SIZE 9 | ||
27 | |||
28 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | ||
29 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
30 | #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) | ||
31 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
32 | |||
33 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
34 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | ||
35 | #define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) | ||
36 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
37 | |||
38 | /* PMD_SHIFT determines what a second-level page table entry can map */ | ||
39 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | ||
40 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
41 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
42 | |||
43 | /* PUD_SHIFT determines what a third-level page table entry can map */ | ||
44 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | ||
45 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
46 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
47 | |||
48 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | ||
49 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) | ||
50 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
51 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
52 | 24 | ||
53 | #define FIRST_USER_ADDRESS 0 | 25 | #define FIRST_USER_ADDRESS 0 |
54 | 26 | ||
@@ -75,8 +47,9 @@ | |||
75 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) | 47 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) |
76 | 48 | ||
77 | /* | 49 | /* |
78 | * Bits in a linux-style PTE. These match the bits in the | 50 | * Common bits in a linux-style PTE. These match the bits in the |
79 | * (hardware-defined) PowerPC PTE as closely as possible. | 51 | * (hardware-defined) PowerPC PTE as closely as possible. Additional |
52 | * bits may be defined in pgtable-*.h | ||
80 | */ | 53 | */ |
81 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ | 54 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ |
82 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ | 55 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ |
@@ -91,15 +64,6 @@ | |||
91 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | 64 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ |
92 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | 65 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ |
93 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | 66 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ |
94 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ | ||
95 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ | ||
96 | #define _PAGE_HUGE 0x10000 /* 16MB page */ | ||
97 | /* Bits 0x7000 identify the index within an HPT Group */ | ||
98 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX) | ||
99 | /* PAGE_MASK gives the right answer below, but only by accident */ | ||
100 | /* It should be preserving the high 48 bits and then specifically */ | ||
101 | /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ | ||
102 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS) | ||
103 | 67 | ||
104 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | 68 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) |
105 | 69 | ||
@@ -122,10 +86,10 @@ | |||
122 | #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) | 86 | #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) |
123 | #define HAVE_PAGE_AGP | 87 | #define HAVE_PAGE_AGP |
124 | 88 | ||
125 | /* | 89 | /* PTEIDX nibble */ |
126 | * This bit in a hardware PTE indicates that the page is *not* executable. | 90 | #define _PTEIDX_SECONDARY 0x8 |
127 | */ | 91 | #define _PTEIDX_GROUP_IX 0x7 |
128 | #define HW_NO_EXEC _PAGE_EXEC | 92 | |
129 | 93 | ||
130 | /* | 94 | /* |
131 | * POWER4 and newer have per page execute protection, older chips can only | 95 | * POWER4 and newer have per page execute protection, older chips can only |
@@ -164,21 +128,10 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |||
164 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 128 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
165 | #endif /* __ASSEMBLY__ */ | 129 | #endif /* __ASSEMBLY__ */ |
166 | 130 | ||
167 | /* shift to put page number into pte */ | ||
168 | #define PTE_SHIFT (17) | ||
169 | |||
170 | #ifdef CONFIG_HUGETLB_PAGE | 131 | #ifdef CONFIG_HUGETLB_PAGE |
171 | 132 | ||
172 | #ifndef __ASSEMBLY__ | ||
173 | int hash_huge_page(struct mm_struct *mm, unsigned long access, | ||
174 | unsigned long ea, unsigned long vsid, int local); | ||
175 | #endif /* __ASSEMBLY__ */ | ||
176 | |||
177 | #define HAVE_ARCH_UNMAPPED_AREA | 133 | #define HAVE_ARCH_UNMAPPED_AREA |
178 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 134 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
179 | #else | ||
180 | |||
181 | #define hash_huge_page(mm,a,ea,vsid,local) -1 | ||
182 | 135 | ||
183 | #endif | 136 | #endif |
184 | 137 | ||
@@ -197,7 +150,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |||
197 | pte_t pte; | 150 | pte_t pte; |
198 | 151 | ||
199 | 152 | ||
200 | pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot); | 153 | pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); |
201 | return pte; | 154 | return pte; |
202 | } | 155 | } |
203 | 156 | ||
@@ -209,30 +162,25 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |||
209 | 162 | ||
210 | /* pte_clear moved to later in this file */ | 163 | /* pte_clear moved to later in this file */ |
211 | 164 | ||
212 | #define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) | 165 | #define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) |
213 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 166 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
214 | 167 | ||
215 | #define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);}) | 168 | #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) |
216 | #define pmd_none(pmd) (!pmd_val(pmd)) | 169 | #define pmd_none(pmd) (!pmd_val(pmd)) |
217 | #define pmd_bad(pmd) (pmd_val(pmd) == 0) | 170 | #define pmd_bad(pmd) (pmd_val(pmd) == 0) |
218 | #define pmd_present(pmd) (pmd_val(pmd) != 0) | 171 | #define pmd_present(pmd) (pmd_val(pmd) != 0) |
219 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | 172 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) |
220 | #define pmd_page_kernel(pmd) (pmd_val(pmd)) | 173 | #define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) |
221 | #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) | 174 | #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) |
222 | 175 | ||
223 | #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp)) | 176 | #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) |
224 | #define pud_none(pud) (!pud_val(pud)) | 177 | #define pud_none(pud) (!pud_val(pud)) |
225 | #define pud_bad(pud) ((pud_val(pud)) == 0) | 178 | #define pud_bad(pud) ((pud_val(pud)) == 0) |
226 | #define pud_present(pud) (pud_val(pud) != 0) | 179 | #define pud_present(pud) (pud_val(pud) != 0) |
227 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) | 180 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) |
228 | #define pud_page(pud) (pud_val(pud)) | 181 | #define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS) |
229 | 182 | ||
230 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | 183 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) |
231 | #define pgd_none(pgd) (!pgd_val(pgd)) | ||
232 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) | ||
233 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | ||
234 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) | ||
235 | #define pgd_page(pgd) (pgd_val(pgd)) | ||
236 | 184 | ||
237 | /* | 185 | /* |
238 | * Find an entry in a page-table-directory. We combine the address region | 186 | * Find an entry in a page-table-directory. We combine the address region |
@@ -243,9 +191,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |||
243 | 191 | ||
244 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | 192 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
245 | 193 | ||
246 | #define pud_offset(pgdp, addr) \ | ||
247 | (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | ||
248 | |||
249 | #define pmd_offset(pudp,addr) \ | 194 | #define pmd_offset(pudp,addr) \ |
250 | (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) | 195 | (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) |
251 | 196 | ||
@@ -271,7 +216,6 @@ static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} | |||
271 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} | 216 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} |
272 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} | 217 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} |
273 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} | 218 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} |
274 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE;} | ||
275 | 219 | ||
276 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | 220 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } |
277 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | 221 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } |
@@ -286,7 +230,6 @@ static inline pte_t pte_mkclean(pte_t pte) { | |||
286 | pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } | 230 | pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } |
287 | static inline pte_t pte_mkold(pte_t pte) { | 231 | static inline pte_t pte_mkold(pte_t pte) { |
288 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | 232 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
289 | |||
290 | static inline pte_t pte_mkread(pte_t pte) { | 233 | static inline pte_t pte_mkread(pte_t pte) { |
291 | pte_val(pte) |= _PAGE_USER; return pte; } | 234 | pte_val(pte) |= _PAGE_USER; return pte; } |
292 | static inline pte_t pte_mkexec(pte_t pte) { | 235 | static inline pte_t pte_mkexec(pte_t pte) { |
@@ -298,7 +241,7 @@ static inline pte_t pte_mkdirty(pte_t pte) { | |||
298 | static inline pte_t pte_mkyoung(pte_t pte) { | 241 | static inline pte_t pte_mkyoung(pte_t pte) { |
299 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | 242 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
300 | static inline pte_t pte_mkhuge(pte_t pte) { | 243 | static inline pte_t pte_mkhuge(pte_t pte) { |
301 | pte_val(pte) |= _PAGE_HUGE; return pte; } | 244 | return pte; } |
302 | 245 | ||
303 | /* Atomic PTE updates */ | 246 | /* Atomic PTE updates */ |
304 | static inline unsigned long pte_update(pte_t *p, unsigned long clr) | 247 | static inline unsigned long pte_update(pte_t *p, unsigned long clr) |
@@ -321,11 +264,13 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr) | |||
321 | /* PTE updating functions, this function puts the PTE in the | 264 | /* PTE updating functions, this function puts the PTE in the |
322 | * batch, doesn't actually triggers the hash flush immediately, | 265 | * batch, doesn't actually triggers the hash flush immediately, |
323 | * you need to call flush_tlb_pending() to do that. | 266 | * you need to call flush_tlb_pending() to do that. |
267 | * Pass -1 for "normal" size (4K or 64K) | ||
324 | */ | 268 | */ |
325 | extern void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, | 269 | extern void hpte_update(struct mm_struct *mm, unsigned long addr, |
326 | int wrprot); | 270 | pte_t *ptep, unsigned long pte, int huge); |
327 | 271 | ||
328 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 272 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, |
273 | unsigned long addr, pte_t *ptep) | ||
329 | { | 274 | { |
330 | unsigned long old; | 275 | unsigned long old; |
331 | 276 | ||
@@ -333,7 +278,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned lon | |||
333 | return 0; | 278 | return 0; |
334 | old = pte_update(ptep, _PAGE_ACCESSED); | 279 | old = pte_update(ptep, _PAGE_ACCESSED); |
335 | if (old & _PAGE_HASHPTE) { | 280 | if (old & _PAGE_HASHPTE) { |
336 | hpte_update(mm, addr, old, 0); | 281 | hpte_update(mm, addr, ptep, old, 0); |
337 | flush_tlb_pending(); | 282 | flush_tlb_pending(); |
338 | } | 283 | } |
339 | return (old & _PAGE_ACCESSED) != 0; | 284 | return (old & _PAGE_ACCESSED) != 0; |
@@ -351,7 +296,8 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned lon | |||
351 | * moment we always flush but we need to fix hpte_update and test if the | 296 | * moment we always flush but we need to fix hpte_update and test if the |
352 | * optimisation is worth it. | 297 | * optimisation is worth it. |
353 | */ | 298 | */ |
354 | static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 299 | static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, |
300 | unsigned long addr, pte_t *ptep) | ||
355 | { | 301 | { |
356 | unsigned long old; | 302 | unsigned long old; |
357 | 303 | ||
@@ -359,7 +305,7 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned lon | |||
359 | return 0; | 305 | return 0; |
360 | old = pte_update(ptep, _PAGE_DIRTY); | 306 | old = pte_update(ptep, _PAGE_DIRTY); |
361 | if (old & _PAGE_HASHPTE) | 307 | if (old & _PAGE_HASHPTE) |
362 | hpte_update(mm, addr, old, 0); | 308 | hpte_update(mm, addr, ptep, old, 0); |
363 | return (old & _PAGE_DIRTY) != 0; | 309 | return (old & _PAGE_DIRTY) != 0; |
364 | } | 310 | } |
365 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 311 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
@@ -371,7 +317,8 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned lon | |||
371 | }) | 317 | }) |
372 | 318 | ||
373 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 319 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
374 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 320 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
321 | pte_t *ptep) | ||
375 | { | 322 | { |
376 | unsigned long old; | 323 | unsigned long old; |
377 | 324 | ||
@@ -379,7 +326,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
379 | return; | 326 | return; |
380 | old = pte_update(ptep, _PAGE_RW); | 327 | old = pte_update(ptep, _PAGE_RW); |
381 | if (old & _PAGE_HASHPTE) | 328 | if (old & _PAGE_HASHPTE) |
382 | hpte_update(mm, addr, old, 0); | 329 | hpte_update(mm, addr, ptep, old, 0); |
383 | } | 330 | } |
384 | 331 | ||
385 | /* | 332 | /* |
@@ -408,21 +355,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
408 | }) | 355 | }) |
409 | 356 | ||
410 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 357 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
411 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 358 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
359 | unsigned long addr, pte_t *ptep) | ||
412 | { | 360 | { |
413 | unsigned long old = pte_update(ptep, ~0UL); | 361 | unsigned long old = pte_update(ptep, ~0UL); |
414 | 362 | ||
415 | if (old & _PAGE_HASHPTE) | 363 | if (old & _PAGE_HASHPTE) |
416 | hpte_update(mm, addr, old, 0); | 364 | hpte_update(mm, addr, ptep, old, 0); |
417 | return __pte(old); | 365 | return __pte(old); |
418 | } | 366 | } |
419 | 367 | ||
420 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) | 368 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
369 | pte_t * ptep) | ||
421 | { | 370 | { |
422 | unsigned long old = pte_update(ptep, ~0UL); | 371 | unsigned long old = pte_update(ptep, ~0UL); |
423 | 372 | ||
424 | if (old & _PAGE_HASHPTE) | 373 | if (old & _PAGE_HASHPTE) |
425 | hpte_update(mm, addr, old, 0); | 374 | hpte_update(mm, addr, ptep, old, 0); |
426 | } | 375 | } |
427 | 376 | ||
428 | /* | 377 | /* |
@@ -435,7 +384,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
435 | pte_clear(mm, addr, ptep); | 384 | pte_clear(mm, addr, ptep); |
436 | flush_tlb_pending(); | 385 | flush_tlb_pending(); |
437 | } | 386 | } |
438 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | 387 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
388 | |||
389 | #ifdef CONFIG_PPC_64K_PAGES | ||
390 | if (mmu_virtual_psize != MMU_PAGE_64K) | ||
391 | pte = __pte(pte_val(pte) | _PAGE_COMBO); | ||
392 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
393 | |||
394 | *ptep = pte; | ||
439 | } | 395 | } |
440 | 396 | ||
441 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this | 397 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this |
@@ -482,8 +438,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |||
482 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | 438 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
483 | #define pmd_ERROR(e) \ | 439 | #define pmd_ERROR(e) \ |
484 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | 440 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) |
485 | #define pud_ERROR(e) \ | ||
486 | printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | ||
487 | #define pgd_ERROR(e) \ | 441 | #define pgd_ERROR(e) \ |
488 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 442 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
489 | 443 | ||
@@ -509,12 +463,12 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |||
509 | /* Encode and de-code a swap entry */ | 463 | /* Encode and de-code a swap entry */ |
510 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) | 464 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) |
511 | #define __swp_offset(entry) ((entry).val >> 8) | 465 | #define __swp_offset(entry) ((entry).val >> 8) |
512 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 466 | #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) |
513 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT }) | 467 | #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) |
514 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT }) | 468 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) |
515 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) | 469 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) |
516 | #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_SHIFT)|_PAGE_FILE}) | 470 | #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) |
517 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) | 471 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) |
518 | 472 | ||
519 | /* | 473 | /* |
520 | * kern_addr_valid is intended to indicate whether an address is a valid | 474 | * kern_addr_valid is intended to indicate whether an address is a valid |
@@ -532,29 +486,22 @@ void pgtable_cache_init(void); | |||
532 | /* | 486 | /* |
533 | * find_linux_pte returns the address of a linux pte for a given | 487 | * find_linux_pte returns the address of a linux pte for a given |
534 | * effective address and directory. If not found, it returns zero. | 488 | * effective address and directory. If not found, it returns zero. |
535 | */ | 489 | */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) |
536 | static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) | ||
537 | { | 490 | { |
538 | pgd_t *pg; | 491 | pgd_t *pg; |
539 | pud_t *pu; | 492 | pud_t *pu; |
540 | pmd_t *pm; | 493 | pmd_t *pm; |
541 | pte_t *pt = NULL; | 494 | pte_t *pt = NULL; |
542 | pte_t pte; | ||
543 | 495 | ||
544 | pg = pgdir + pgd_index(ea); | 496 | pg = pgdir + pgd_index(ea); |
545 | if (!pgd_none(*pg)) { | 497 | if (!pgd_none(*pg)) { |
546 | pu = pud_offset(pg, ea); | 498 | pu = pud_offset(pg, ea); |
547 | if (!pud_none(*pu)) { | 499 | if (!pud_none(*pu)) { |
548 | pm = pmd_offset(pu, ea); | 500 | pm = pmd_offset(pu, ea); |
549 | if (pmd_present(*pm)) { | 501 | if (pmd_present(*pm)) |
550 | pt = pte_offset_kernel(pm, ea); | 502 | pt = pte_offset_kernel(pm, ea); |
551 | pte = *pt; | ||
552 | if (!pte_present(pte)) | ||
553 | pt = NULL; | ||
554 | } | ||
555 | } | 503 | } |
556 | } | 504 | } |
557 | |||
558 | return pt; | 505 | return pt; |
559 | } | 506 | } |
560 | 507 | ||
diff --git a/include/asm-ppc64/plpar_wrappers.h b/include/asm-ppc64/plpar_wrappers.h deleted file mode 100644 index 72dd2449ee76..000000000000 --- a/include/asm-ppc64/plpar_wrappers.h +++ /dev/null | |||
@@ -1,120 +0,0 @@ | |||
1 | #ifndef _PPC64_PLPAR_WRAPPERS_H | ||
2 | #define _PPC64_PLPAR_WRAPPERS_H | ||
3 | |||
4 | #include <asm/hvcall.h> | ||
5 | |||
6 | static inline long poll_pending(void) | ||
7 | { | ||
8 | unsigned long dummy; | ||
9 | return plpar_hcall(H_POLL_PENDING, 0, 0, 0, 0, | ||
10 | &dummy, &dummy, &dummy); | ||
11 | } | ||
12 | |||
13 | static inline long prod_processor(void) | ||
14 | { | ||
15 | plpar_hcall_norets(H_PROD); | ||
16 | return(0); | ||
17 | } | ||
18 | |||
19 | static inline long cede_processor(void) | ||
20 | { | ||
21 | plpar_hcall_norets(H_CEDE); | ||
22 | return(0); | ||
23 | } | ||
24 | |||
25 | static inline long register_vpa(unsigned long flags, unsigned long proc, | ||
26 | unsigned long vpa) | ||
27 | { | ||
28 | return plpar_hcall_norets(H_REGISTER_VPA, flags, proc, vpa); | ||
29 | } | ||
30 | |||
31 | void vpa_init(int cpu); | ||
32 | |||
33 | static inline long plpar_pte_remove(unsigned long flags, | ||
34 | unsigned long ptex, | ||
35 | unsigned long avpn, | ||
36 | unsigned long *old_pteh_ret, | ||
37 | unsigned long *old_ptel_ret) | ||
38 | { | ||
39 | unsigned long dummy; | ||
40 | return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0, | ||
41 | old_pteh_ret, old_ptel_ret, &dummy); | ||
42 | } | ||
43 | |||
44 | static inline long plpar_pte_read(unsigned long flags, | ||
45 | unsigned long ptex, | ||
46 | unsigned long *old_pteh_ret, unsigned long *old_ptel_ret) | ||
47 | { | ||
48 | unsigned long dummy; | ||
49 | return plpar_hcall(H_READ, flags, ptex, 0, 0, | ||
50 | old_pteh_ret, old_ptel_ret, &dummy); | ||
51 | } | ||
52 | |||
53 | static inline long plpar_pte_protect(unsigned long flags, | ||
54 | unsigned long ptex, | ||
55 | unsigned long avpn) | ||
56 | { | ||
57 | return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn); | ||
58 | } | ||
59 | |||
60 | static inline long plpar_tce_get(unsigned long liobn, | ||
61 | unsigned long ioba, | ||
62 | unsigned long *tce_ret) | ||
63 | { | ||
64 | unsigned long dummy; | ||
65 | return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0, | ||
66 | tce_ret, &dummy, &dummy); | ||
67 | } | ||
68 | |||
69 | static inline long plpar_tce_put(unsigned long liobn, | ||
70 | unsigned long ioba, | ||
71 | unsigned long tceval) | ||
72 | { | ||
73 | return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval); | ||
74 | } | ||
75 | |||
76 | static inline long plpar_tce_put_indirect(unsigned long liobn, | ||
77 | unsigned long ioba, | ||
78 | unsigned long page, | ||
79 | unsigned long count) | ||
80 | { | ||
81 | return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count); | ||
82 | } | ||
83 | |||
84 | static inline long plpar_tce_stuff(unsigned long liobn, | ||
85 | unsigned long ioba, | ||
86 | unsigned long tceval, | ||
87 | unsigned long count) | ||
88 | { | ||
89 | return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count); | ||
90 | } | ||
91 | |||
92 | static inline long plpar_get_term_char(unsigned long termno, | ||
93 | unsigned long *len_ret, | ||
94 | char *buf_ret) | ||
95 | { | ||
96 | unsigned long *lbuf = (unsigned long *)buf_ret; /* ToDo: alignment? */ | ||
97 | return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0, | ||
98 | len_ret, lbuf+0, lbuf+1); | ||
99 | } | ||
100 | |||
101 | static inline long plpar_put_term_char(unsigned long termno, | ||
102 | unsigned long len, | ||
103 | const char *buffer) | ||
104 | { | ||
105 | unsigned long *lbuf = (unsigned long *)buffer; /* ToDo: alignment? */ | ||
106 | return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0], | ||
107 | lbuf[1]); | ||
108 | } | ||
109 | |||
110 | static inline long plpar_set_xdabr(unsigned long address, unsigned long flags) | ||
111 | { | ||
112 | return plpar_hcall_norets(H_SET_XDABR, address, flags); | ||
113 | } | ||
114 | |||
115 | static inline long plpar_set_dabr(unsigned long val) | ||
116 | { | ||
117 | return plpar_hcall_norets(H_SET_DABR, val); | ||
118 | } | ||
119 | |||
120 | #endif /* _PPC64_PLPAR_WRAPPERS_H */ | ||
diff --git a/include/asm-ppc64/ppc32.h b/include/asm-ppc64/ppc32.h deleted file mode 100644 index 3945a55d112a..000000000000 --- a/include/asm-ppc64/ppc32.h +++ /dev/null | |||
@@ -1,122 +0,0 @@ | |||
1 | #ifndef _PPC64_PPC32_H | ||
2 | #define _PPC64_PPC32_H | ||
3 | |||
4 | #include <linux/compat.h> | ||
5 | #include <asm/siginfo.h> | ||
6 | #include <asm/signal.h> | ||
7 | |||
8 | /* | ||
9 | * Data types and macros for providing 32b PowerPC support. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | /* These are here to support 32-bit syscalls on a 64-bit kernel. */ | ||
18 | |||
19 | typedef struct compat_siginfo { | ||
20 | int si_signo; | ||
21 | int si_errno; | ||
22 | int si_code; | ||
23 | |||
24 | union { | ||
25 | int _pad[SI_PAD_SIZE32]; | ||
26 | |||
27 | /* kill() */ | ||
28 | struct { | ||
29 | compat_pid_t _pid; /* sender's pid */ | ||
30 | compat_uid_t _uid; /* sender's uid */ | ||
31 | } _kill; | ||
32 | |||
33 | /* POSIX.1b timers */ | ||
34 | struct { | ||
35 | compat_timer_t _tid; /* timer id */ | ||
36 | int _overrun; /* overrun count */ | ||
37 | compat_sigval_t _sigval; /* same as below */ | ||
38 | int _sys_private; /* not to be passed to user */ | ||
39 | } _timer; | ||
40 | |||
41 | /* POSIX.1b signals */ | ||
42 | struct { | ||
43 | compat_pid_t _pid; /* sender's pid */ | ||
44 | compat_uid_t _uid; /* sender's uid */ | ||
45 | compat_sigval_t _sigval; | ||
46 | } _rt; | ||
47 | |||
48 | /* SIGCHLD */ | ||
49 | struct { | ||
50 | compat_pid_t _pid; /* which child */ | ||
51 | compat_uid_t _uid; /* sender's uid */ | ||
52 | int _status; /* exit code */ | ||
53 | compat_clock_t _utime; | ||
54 | compat_clock_t _stime; | ||
55 | } _sigchld; | ||
56 | |||
57 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */ | ||
58 | struct { | ||
59 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
60 | } _sigfault; | ||
61 | |||
62 | /* SIGPOLL */ | ||
63 | struct { | ||
64 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
65 | int _fd; | ||
66 | } _sigpoll; | ||
67 | } _sifields; | ||
68 | } compat_siginfo_t; | ||
69 | |||
70 | #define __old_sigaction32 old_sigaction32 | ||
71 | |||
72 | struct __old_sigaction32 { | ||
73 | compat_uptr_t sa_handler; | ||
74 | compat_old_sigset_t sa_mask; | ||
75 | unsigned int sa_flags; | ||
76 | compat_uptr_t sa_restorer; /* not used by Linux/SPARC yet */ | ||
77 | }; | ||
78 | |||
79 | |||
80 | |||
81 | struct sigaction32 { | ||
82 | compat_uptr_t sa_handler; /* Really a pointer, but need to deal with 32 bits */ | ||
83 | unsigned int sa_flags; | ||
84 | compat_uptr_t sa_restorer; /* Another 32 bit pointer */ | ||
85 | compat_sigset_t sa_mask; /* A 32 bit mask */ | ||
86 | }; | ||
87 | |||
88 | typedef struct sigaltstack_32 { | ||
89 | unsigned int ss_sp; | ||
90 | int ss_flags; | ||
91 | compat_size_t ss_size; | ||
92 | } stack_32_t; | ||
93 | |||
94 | struct sigcontext32 { | ||
95 | unsigned int _unused[4]; | ||
96 | int signal; | ||
97 | compat_uptr_t handler; | ||
98 | unsigned int oldmask; | ||
99 | compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */ | ||
100 | }; | ||
101 | |||
102 | struct mcontext32 { | ||
103 | elf_gregset_t32 mc_gregs; | ||
104 | elf_fpregset_t mc_fregs; | ||
105 | unsigned int mc_pad[2]; | ||
106 | elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16))); | ||
107 | }; | ||
108 | |||
109 | struct ucontext32 { | ||
110 | unsigned int uc_flags; | ||
111 | unsigned int uc_link; | ||
112 | stack_32_t uc_stack; | ||
113 | int uc_pad[7]; | ||
114 | compat_uptr_t uc_regs; /* points to uc_mcontext field */ | ||
115 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | ||
116 | /* glibc has 1024-bit signal masks, ours are 64-bit */ | ||
117 | int uc_maskext[30]; | ||
118 | int uc_pad2[3]; | ||
119 | struct mcontext32 uc_mcontext; | ||
120 | }; | ||
121 | |||
122 | #endif /* _PPC64_PPC32_H */ | ||
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h index e8d0d2ab4c0f..bdb47174ff0e 100644 --- a/include/asm-ppc64/prom.h +++ b/include/asm-ppc64/prom.h | |||
@@ -188,6 +188,14 @@ extern struct device_node *of_get_next_child(const struct device_node *node, | |||
188 | extern struct device_node *of_node_get(struct device_node *node); | 188 | extern struct device_node *of_node_get(struct device_node *node); |
189 | extern void of_node_put(struct device_node *node); | 189 | extern void of_node_put(struct device_node *node); |
190 | 190 | ||
191 | /* For scanning the flat device-tree at boot time */ | ||
192 | int __init of_scan_flat_dt(int (*it)(unsigned long node, | ||
193 | const char *uname, int depth, | ||
194 | void *data), | ||
195 | void *data); | ||
196 | void* __init of_get_flat_dt_prop(unsigned long node, const char *name, | ||
197 | unsigned long *size); | ||
198 | |||
191 | /* For updating the device tree at runtime */ | 199 | /* For updating the device tree at runtime */ |
192 | extern void of_attach_node(struct device_node *); | 200 | extern void of_attach_node(struct device_node *); |
193 | extern void of_detach_node(const struct device_node *); | 201 | extern void of_detach_node(const struct device_node *); |
diff --git a/include/asm-ppc64/ptrace.h b/include/asm-ppc64/ptrace.h deleted file mode 100644 index 3a55377f1fd3..000000000000 --- a/include/asm-ppc64/ptrace.h +++ /dev/null | |||
@@ -1,213 +0,0 @@ | |||
1 | #ifndef _PPC64_PTRACE_H | ||
2 | #define _PPC64_PTRACE_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2001 PPC64 Team, IBM Corp | ||
6 | * | ||
7 | * This struct defines the way the registers are stored on the | ||
8 | * kernel stack during a system call or other kernel entry. | ||
9 | * | ||
10 | * this should only contain volatile regs | ||
11 | * since we can keep non-volatile in the thread_struct | ||
12 | * should set this up when only volatiles are saved | ||
13 | * by intr code. | ||
14 | * | ||
15 | * Since this is going on the stack, *CARE MUST BE TAKEN* to insure | ||
16 | * that the overall structure is a multiple of 16 bytes in length. | ||
17 | * | ||
18 | * Note that the offsets of the fields in this struct correspond with | ||
19 | * the PT_* values below. This simplifies arch/ppc64/kernel/ptrace.c. | ||
20 | * | ||
21 | * This program is free software; you can redistribute it and/or | ||
22 | * modify it under the terms of the GNU General Public License | ||
23 | * as published by the Free Software Foundation; either version | ||
24 | * 2 of the License, or (at your option) any later version. | ||
25 | */ | ||
26 | |||
27 | #ifndef __ASSEMBLY__ | ||
28 | |||
29 | struct pt_regs { | ||
30 | unsigned long gpr[32]; | ||
31 | unsigned long nip; | ||
32 | unsigned long msr; | ||
33 | unsigned long orig_gpr3; /* Used for restarting system calls */ | ||
34 | unsigned long ctr; | ||
35 | unsigned long link; | ||
36 | unsigned long xer; | ||
37 | unsigned long ccr; | ||
38 | unsigned long softe; /* Soft enabled/disabled */ | ||
39 | unsigned long trap; /* Reason for being here */ | ||
40 | unsigned long dar; /* Fault registers */ | ||
41 | unsigned long dsisr; | ||
42 | unsigned long result; /* Result of a system call */ | ||
43 | }; | ||
44 | |||
45 | struct pt_regs32 { | ||
46 | unsigned int gpr[32]; | ||
47 | unsigned int nip; | ||
48 | unsigned int msr; | ||
49 | unsigned int orig_gpr3; /* Used for restarting system calls */ | ||
50 | unsigned int ctr; | ||
51 | unsigned int link; | ||
52 | unsigned int xer; | ||
53 | unsigned int ccr; | ||
54 | unsigned int mq; /* 601 only (not used at present) */ | ||
55 | unsigned int trap; /* Reason for being here */ | ||
56 | unsigned int dar; /* Fault registers */ | ||
57 | unsigned int dsisr; | ||
58 | unsigned int result; /* Result of a system call */ | ||
59 | }; | ||
60 | |||
61 | #ifdef __KERNEL__ | ||
62 | |||
63 | #define instruction_pointer(regs) ((regs)->nip) | ||
64 | |||
65 | #ifdef CONFIG_SMP | ||
66 | extern unsigned long profile_pc(struct pt_regs *regs); | ||
67 | #else | ||
68 | #define profile_pc(regs) instruction_pointer(regs) | ||
69 | #endif | ||
70 | |||
71 | #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) | ||
72 | |||
73 | #define force_successful_syscall_return() \ | ||
74 | (current_thread_info()->syscall_noerror = 1) | ||
75 | |||
76 | /* | ||
77 | * We use the least-significant bit of the trap field to indicate | ||
78 | * whether we have saved the full set of registers, or only a | ||
79 | * partial set. A 1 there means the partial set. | ||
80 | */ | ||
81 | #define FULL_REGS(regs) (((regs)->trap & 1) == 0) | ||
82 | #define TRAP(regs) ((regs)->trap & ~0xF) | ||
83 | #define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) | ||
84 | |||
85 | #endif /* __KERNEL__ */ | ||
86 | |||
87 | #endif /* __ASSEMBLY__ */ | ||
88 | |||
89 | #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ | ||
90 | |||
91 | /* Size of dummy stack frame allocated when calling signal handler. */ | ||
92 | #define __SIGNAL_FRAMESIZE 128 | ||
93 | #define __SIGNAL_FRAMESIZE32 64 | ||
94 | |||
95 | /* | ||
96 | * Offsets used by 'ptrace' system call interface. | ||
97 | */ | ||
98 | #define PT_R0 0 | ||
99 | #define PT_R1 1 | ||
100 | #define PT_R2 2 | ||
101 | #define PT_R3 3 | ||
102 | #define PT_R4 4 | ||
103 | #define PT_R5 5 | ||
104 | #define PT_R6 6 | ||
105 | #define PT_R7 7 | ||
106 | #define PT_R8 8 | ||
107 | #define PT_R9 9 | ||
108 | #define PT_R10 10 | ||
109 | #define PT_R11 11 | ||
110 | #define PT_R12 12 | ||
111 | #define PT_R13 13 | ||
112 | #define PT_R14 14 | ||
113 | #define PT_R15 15 | ||
114 | #define PT_R16 16 | ||
115 | #define PT_R17 17 | ||
116 | #define PT_R18 18 | ||
117 | #define PT_R19 19 | ||
118 | #define PT_R20 20 | ||
119 | #define PT_R21 21 | ||
120 | #define PT_R22 22 | ||
121 | #define PT_R23 23 | ||
122 | #define PT_R24 24 | ||
123 | #define PT_R25 25 | ||
124 | #define PT_R26 26 | ||
125 | #define PT_R27 27 | ||
126 | #define PT_R28 28 | ||
127 | #define PT_R29 29 | ||
128 | #define PT_R30 30 | ||
129 | #define PT_R31 31 | ||
130 | |||
131 | #define PT_NIP 32 | ||
132 | #define PT_MSR 33 | ||
133 | #ifdef __KERNEL__ | ||
134 | #define PT_ORIG_R3 34 | ||
135 | #endif | ||
136 | #define PT_CTR 35 | ||
137 | #define PT_LNK 36 | ||
138 | #define PT_XER 37 | ||
139 | #define PT_CCR 38 | ||
140 | #define PT_SOFTE 39 | ||
141 | #define PT_TRAP 40 | ||
142 | #define PT_DAR 41 | ||
143 | #define PT_DSISR 42 | ||
144 | #define PT_RESULT 43 | ||
145 | |||
146 | #define PT_FPR0 48 | ||
147 | |||
148 | /* | ||
149 | * Kernel and userspace will both use this PT_FPSCR value. 32-bit apps will | ||
150 | * have visibility to the asm-ppc/ptrace.h header instead of this one. | ||
151 | */ | ||
152 | #define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */ | ||
153 | |||
154 | #ifdef __KERNEL__ | ||
155 | #define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */ | ||
156 | #endif | ||
157 | |||
158 | #define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */ | ||
159 | #define PT_VSCR (PT_VR0 + 32*2 + 1) | ||
160 | #define PT_VRSAVE (PT_VR0 + 33*2) | ||
161 | |||
162 | #ifdef __KERNEL__ | ||
163 | #define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */ | ||
164 | #define PT_VSCR_32 (PT_VR0 + 32*4 + 3) | ||
165 | #define PT_VRSAVE_32 (PT_VR0 + 33*4) | ||
166 | #endif | ||
167 | |||
168 | /* | ||
169 | * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. | ||
170 | * The transfer totals 34 quadword. Quadwords 0-31 contain the | ||
171 | * corresponding vector registers. Quadword 32 contains the vscr as the | ||
172 | * last word (offset 12) within that quadword. Quadword 33 contains the | ||
173 | * vrsave as the first word (offset 0) within the quadword. | ||
174 | * | ||
175 | * This definition of the VMX state is compatible with the current PPC32 | ||
176 | * ptrace interface. This allows signal handling and ptrace to use the same | ||
177 | * structures. This also simplifies the implementation of a bi-arch | ||
178 | * (combined (32- and 64-bit) gdb. | ||
179 | */ | ||
180 | #define PTRACE_GETVRREGS 18 | ||
181 | #define PTRACE_SETVRREGS 19 | ||
182 | |||
183 | /* | ||
184 | * While we dont have 64bit book E processors, we need to reserve the | ||
185 | * relevant ptrace calls for 32bit compatibility. | ||
186 | */ | ||
187 | #if 0 | ||
188 | #define PTRACE_GETEVRREGS 20 | ||
189 | #define PTRACE_SETEVRREGS 21 | ||
190 | #endif | ||
191 | |||
192 | /* | ||
193 | * Get or set a debug register. The first 16 are DABR registers and the | ||
194 | * second 16 are IABR registers. | ||
195 | */ | ||
196 | #define PTRACE_GET_DEBUGREG 25 | ||
197 | #define PTRACE_SET_DEBUGREG 26 | ||
198 | |||
199 | /* Additional PTRACE requests implemented on PowerPC. */ | ||
200 | #define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */ | ||
201 | #define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */ | ||
202 | #define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */ | ||
203 | #define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */ | ||
204 | |||
205 | /* Calls to trace a 64bit program from a 32bit program */ | ||
206 | #define PPC_PTRACE_PEEKTEXT_3264 0x95 | ||
207 | #define PPC_PTRACE_PEEKDATA_3264 0x94 | ||
208 | #define PPC_PTRACE_POKETEXT_3264 0x93 | ||
209 | #define PPC_PTRACE_POKEDATA_3264 0x92 | ||
210 | #define PPC_PTRACE_PEEKUSR_3264 0x91 | ||
211 | #define PPC_PTRACE_POKEUSR_3264 0x90 | ||
212 | |||
213 | #endif /* _PPC64_PTRACE_H */ | ||
diff --git a/include/asm-ppc64/sigcontext.h b/include/asm-ppc64/sigcontext.h deleted file mode 100644 index 6f8aee768c5e..000000000000 --- a/include/asm-ppc64/sigcontext.h +++ /dev/null | |||
@@ -1,47 +0,0 @@ | |||
1 | #ifndef _ASM_PPC64_SIGCONTEXT_H | ||
2 | #define _ASM_PPC64_SIGCONTEXT_H | ||
3 | |||
4 | /* | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | #include <linux/compiler.h> | ||
11 | #include <asm/ptrace.h> | ||
12 | #include <asm/elf.h> | ||
13 | |||
14 | |||
15 | struct sigcontext { | ||
16 | unsigned long _unused[4]; | ||
17 | int signal; | ||
18 | int _pad0; | ||
19 | unsigned long handler; | ||
20 | unsigned long oldmask; | ||
21 | struct pt_regs __user *regs; | ||
22 | elf_gregset_t gp_regs; | ||
23 | elf_fpregset_t fp_regs; | ||
24 | /* | ||
25 | * To maintain compatibility with current implementations the sigcontext is | ||
26 | * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t) | ||
27 | * followed by an unstructured (vmx_reserve) field of 69 doublewords. This | ||
28 | * allows the array of vector registers to be quadword aligned independent of | ||
29 | * the alignment of the containing sigcontext or ucontext. It is the | ||
30 | * responsibility of the code setting the sigcontext to set this pointer to | ||
31 | * either NULL (if this processor does not support the VMX feature) or the | ||
32 | * address of the first quadword within the allocated (vmx_reserve) area. | ||
33 | * | ||
34 | * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with | ||
35 | * an array of 34 quadword entries (elf_vrregset_t). The entries with | ||
36 | * indexes 0-31 contain the corresponding vector registers. The entry with | ||
37 | * index 32 contains the vscr as the last word (offset 12) within the | ||
38 | * quadword. This allows the vscr to be stored as either a quadword (since | ||
39 | * it must be copied via a vector register to/from storage) or as a word. | ||
40 | * The entry with index 33 contains the vrsave as the first word (offset 0) | ||
41 | * within the quadword. | ||
42 | */ | ||
43 | elf_vrreg_t __user *v_regs; | ||
44 | long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1]; | ||
45 | }; | ||
46 | |||
47 | #endif /* _ASM_PPC64_SIGCONTEXT_H */ | ||
diff --git a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h deleted file mode 100644 index c5e9052e7967..000000000000 --- a/include/asm-ppc64/smp.h +++ /dev/null | |||
@@ -1,98 +0,0 @@ | |||
1 | /* | ||
2 | * smp.h: PPC64 specific SMP code. | ||
3 | * | ||
4 | * Original was a copy of sparc smp.h. Now heavily modified | ||
5 | * for PPC. | ||
6 | * | ||
7 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
8 | * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #ifdef __KERNEL__ | ||
17 | #ifndef _PPC64_SMP_H | ||
18 | #define _PPC64_SMP_H | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/threads.h> | ||
22 | #include <linux/cpumask.h> | ||
23 | #include <linux/kernel.h> | ||
24 | |||
25 | #ifndef __ASSEMBLY__ | ||
26 | |||
27 | #include <asm/paca.h> | ||
28 | |||
29 | extern int boot_cpuid; | ||
30 | extern int boot_cpuid_phys; | ||
31 | |||
32 | extern void cpu_die(void); | ||
33 | |||
34 | #ifdef CONFIG_SMP | ||
35 | |||
36 | extern void smp_send_debugger_break(int cpu); | ||
37 | struct pt_regs; | ||
38 | extern void smp_message_recv(int, struct pt_regs *); | ||
39 | |||
40 | #ifdef CONFIG_HOTPLUG_CPU | ||
41 | extern void fixup_irqs(cpumask_t map); | ||
42 | int generic_cpu_disable(void); | ||
43 | int generic_cpu_enable(unsigned int cpu); | ||
44 | void generic_cpu_die(unsigned int cpu); | ||
45 | void generic_mach_cpu_die(void); | ||
46 | #endif | ||
47 | |||
48 | #define raw_smp_processor_id() (get_paca()->paca_index) | ||
49 | #define hard_smp_processor_id() (get_paca()->hw_cpu_id) | ||
50 | |||
51 | extern cpumask_t cpu_sibling_map[NR_CPUS]; | ||
52 | |||
53 | /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. | ||
54 | * | ||
55 | * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up | ||
56 | * in /proc/interrupts will be wrong!!! --Troy */ | ||
57 | #define PPC_MSG_CALL_FUNCTION 0 | ||
58 | #define PPC_MSG_RESCHEDULE 1 | ||
59 | /* This is unused now */ | ||
60 | #if 0 | ||
61 | #define PPC_MSG_MIGRATE_TASK 2 | ||
62 | #endif | ||
63 | #define PPC_MSG_DEBUGGER_BREAK 3 | ||
64 | |||
65 | void smp_init_iSeries(void); | ||
66 | void smp_init_pSeries(void); | ||
67 | |||
68 | extern int __cpu_disable(void); | ||
69 | extern void __cpu_die(unsigned int cpu); | ||
70 | #endif /* CONFIG_SMP */ | ||
71 | |||
72 | #define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id) | ||
73 | #define set_hard_smp_processor_id(CPU, VAL) \ | ||
74 | do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0) | ||
75 | |||
76 | extern int smt_enabled_at_boot; | ||
77 | |||
78 | extern int smp_mpic_probe(void); | ||
79 | extern void smp_mpic_setup_cpu(int cpu); | ||
80 | extern void smp_generic_kick_cpu(int nr); | ||
81 | |||
82 | extern void smp_generic_give_timebase(void); | ||
83 | extern void smp_generic_take_timebase(void); | ||
84 | |||
85 | extern struct smp_ops_t *smp_ops; | ||
86 | |||
87 | #ifdef CONFIG_PPC_PSERIES | ||
88 | void vpa_init(int cpu); | ||
89 | #else | ||
90 | static inline void vpa_init(int cpu) | ||
91 | { | ||
92 | } | ||
93 | #endif /* CONFIG_PPC_PSERIES */ | ||
94 | |||
95 | #endif /* __ASSEMBLY__ */ | ||
96 | |||
97 | #endif /* !(_PPC64_SMP_H) */ | ||
98 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-ppc64/sparsemem.h b/include/asm-ppc64/sparsemem.h deleted file mode 100644 index c5bd47e57f17..000000000000 --- a/include/asm-ppc64/sparsemem.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | #ifndef _ASM_PPC64_SPARSEMEM_H | ||
2 | #define _ASM_PPC64_SPARSEMEM_H 1 | ||
3 | |||
4 | #ifdef CONFIG_SPARSEMEM | ||
5 | /* | ||
6 | * SECTION_SIZE_BITS 2^N: how big each section will be | ||
7 | * MAX_PHYSADDR_BITS 2^N: how much physical address space we have | ||
8 | * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space | ||
9 | */ | ||
10 | #define SECTION_SIZE_BITS 24 | ||
11 | #define MAX_PHYSADDR_BITS 38 | ||
12 | #define MAX_PHYSMEM_BITS 36 | ||
13 | |||
14 | #endif /* CONFIG_SPARSEMEM */ | ||
15 | |||
16 | #endif /* _ASM_PPC64_SPARSEMEM_H */ | ||
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h index 14cb895bb607..7d84fb5e39f1 100644 --- a/include/asm-ppc64/spinlock.h +++ b/include/asm-ppc64/spinlock.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/config.h> | 21 | #include <linux/config.h> |
22 | #include <asm/paca.h> | 22 | #include <asm/paca.h> |
23 | #include <asm/hvcall.h> | 23 | #include <asm/hvcall.h> |
24 | #include <asm/iSeries/HvCall.h> | 24 | #include <asm/iseries/hv_call.h> |
25 | 25 | ||
26 | #define __raw_spin_is_locked(x) ((x)->slock != 0) | 26 | #define __raw_spin_is_locked(x) ((x)->slock != 0) |
27 | 27 | ||
diff --git a/include/asm-ppc64/stat.h b/include/asm-ppc64/stat.h deleted file mode 100644 index 973a5f97951d..000000000000 --- a/include/asm-ppc64/stat.h +++ /dev/null | |||
@@ -1,60 +0,0 @@ | |||
1 | #ifndef _PPC64_STAT_H | ||
2 | #define _PPC64_STAT_H | ||
3 | |||
4 | /* | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | |||
13 | struct stat { | ||
14 | unsigned long st_dev; | ||
15 | ino_t st_ino; | ||
16 | nlink_t st_nlink; | ||
17 | mode_t st_mode; | ||
18 | uid_t st_uid; | ||
19 | gid_t st_gid; | ||
20 | unsigned long st_rdev; | ||
21 | off_t st_size; | ||
22 | unsigned long st_blksize; | ||
23 | unsigned long st_blocks; | ||
24 | unsigned long st_atime; | ||
25 | unsigned long st_atime_nsec; | ||
26 | unsigned long st_mtime; | ||
27 | unsigned long st_mtime_nsec; | ||
28 | unsigned long st_ctime; | ||
29 | unsigned long st_ctime_nsec; | ||
30 | unsigned long __unused4; | ||
31 | unsigned long __unused5; | ||
32 | unsigned long __unused6; | ||
33 | }; | ||
34 | |||
35 | #define STAT_HAVE_NSEC 1 | ||
36 | |||
37 | /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ | ||
38 | struct stat64 { | ||
39 | unsigned long st_dev; /* Device. */ | ||
40 | unsigned long st_ino; /* File serial number. */ | ||
41 | unsigned int st_mode; /* File mode. */ | ||
42 | unsigned int st_nlink; /* Link count. */ | ||
43 | unsigned int st_uid; /* User ID of the file's owner. */ | ||
44 | unsigned int st_gid; /* Group ID of the file's group. */ | ||
45 | unsigned long st_rdev; /* Device number, if device. */ | ||
46 | unsigned short __pad2; | ||
47 | long st_size; /* Size of file, in bytes. */ | ||
48 | int st_blksize; /* Optimal block size for I/O. */ | ||
49 | |||
50 | long st_blocks; /* Number 512-byte blocks allocated. */ | ||
51 | int st_atime; /* Time of last access. */ | ||
52 | int st_atime_nsec; | ||
53 | int st_mtime; /* Time of last modification. */ | ||
54 | int st_mtime_nsec; | ||
55 | int st_ctime; /* Time of last status change. */ | ||
56 | int st_ctime_nsec; | ||
57 | unsigned int __unused4; | ||
58 | unsigned int __unused5; | ||
59 | }; | ||
60 | #endif | ||
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h index 99b8ca52f101..0cdd66c9f4b7 100644 --- a/include/asm-ppc64/system.h +++ b/include/asm-ppc64/system.h | |||
@@ -248,7 +248,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | |||
248 | } | 248 | } |
249 | 249 | ||
250 | static __inline__ unsigned long | 250 | static __inline__ unsigned long |
251 | __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) | 251 | __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) |
252 | { | 252 | { |
253 | unsigned long prev; | 253 | unsigned long prev; |
254 | 254 | ||
diff --git a/include/asm-ppc64/tlb.h b/include/asm-ppc64/tlb.h deleted file mode 100644 index 97cb696ce68d..000000000000 --- a/include/asm-ppc64/tlb.h +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | /* | ||
2 | * TLB shootdown specifics for PPC64 | ||
3 | * | ||
4 | * Copyright (C) 2002 Anton Blanchard, IBM Corp. | ||
5 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | #ifndef _PPC64_TLB_H | ||
13 | #define _PPC64_TLB_H | ||
14 | |||
15 | #include <asm/tlbflush.h> | ||
16 | |||
17 | struct mmu_gather; | ||
18 | |||
19 | extern void pte_free_finish(void); | ||
20 | |||
21 | static inline void tlb_flush(struct mmu_gather *tlb) | ||
22 | { | ||
23 | flush_tlb_pending(); | ||
24 | pte_free_finish(); | ||
25 | } | ||
26 | |||
27 | /* Avoid pulling in another include just for this */ | ||
28 | #define check_pgt_cache() do { } while (0) | ||
29 | |||
30 | /* Get the generic bits... */ | ||
31 | #include <asm-generic/tlb.h> | ||
32 | |||
33 | /* Nothing needed here in fact... */ | ||
34 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
35 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
36 | |||
37 | #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) | ||
38 | |||
39 | #endif /* _PPC64_TLB_H */ | ||
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h deleted file mode 100644 index 626f505c6ee3..000000000000 --- a/include/asm-ppc64/tlbflush.h +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | #ifndef _PPC64_TLBFLUSH_H | ||
2 | #define _PPC64_TLBFLUSH_H | ||
3 | |||
4 | /* | ||
5 | * TLB flushing: | ||
6 | * | ||
7 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
8 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
9 | * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB | ||
10 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
11 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
12 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables | ||
13 | */ | ||
14 | |||
15 | #include <linux/percpu.h> | ||
16 | #include <asm/page.h> | ||
17 | |||
18 | #define PPC64_TLB_BATCH_NR 192 | ||
19 | |||
20 | struct mm_struct; | ||
21 | struct ppc64_tlb_batch { | ||
22 | unsigned long index; | ||
23 | struct mm_struct *mm; | ||
24 | pte_t pte[PPC64_TLB_BATCH_NR]; | ||
25 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; | ||
26 | unsigned int large; | ||
27 | }; | ||
28 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | ||
29 | |||
30 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | ||
31 | |||
32 | static inline void flush_tlb_pending(void) | ||
33 | { | ||
34 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); | ||
35 | |||
36 | if (batch->index) | ||
37 | __flush_tlb_pending(batch); | ||
38 | put_cpu_var(ppc64_tlb_batch); | ||
39 | } | ||
40 | |||
41 | #define flush_tlb_mm(mm) flush_tlb_pending() | ||
42 | #define flush_tlb_page(vma, addr) flush_tlb_pending() | ||
43 | #define flush_tlb_page_nohash(vma, addr) do { } while (0) | ||
44 | #define flush_tlb_range(vma, start, end) \ | ||
45 | do { (void)(start); flush_tlb_pending(); } while (0) | ||
46 | #define flush_tlb_kernel_range(start, end) flush_tlb_pending() | ||
47 | #define flush_tlb_pgtables(mm, start, end) do { } while (0) | ||
48 | |||
49 | extern void flush_hash_page(unsigned long va, pte_t pte, int local); | ||
50 | void flush_hash_range(unsigned long number, int local); | ||
51 | |||
52 | #endif /* _PPC64_TLBFLUSH_H */ | ||
diff --git a/include/asm-ppc64/uaccess.h b/include/asm-ppc64/uaccess.h deleted file mode 100644 index 132c1276547b..000000000000 --- a/include/asm-ppc64/uaccess.h +++ /dev/null | |||
@@ -1,341 +0,0 @@ | |||
1 | #ifndef _PPC64_UACCESS_H | ||
2 | #define _PPC64_UACCESS_H | ||
3 | |||
4 | /* | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASSEMBLY__ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <asm/processor.h> | ||
15 | |||
16 | #define VERIFY_READ 0 | ||
17 | #define VERIFY_WRITE 1 | ||
18 | |||
19 | /* | ||
20 | * The fs value determines whether argument validity checking should be | ||
21 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
22 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
23 | * | ||
24 | * For historical reasons, these macros are grossly misnamed. | ||
25 | */ | ||
26 | |||
27 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
28 | |||
29 | #define KERNEL_DS MAKE_MM_SEG(0UL) | ||
30 | #define USER_DS MAKE_MM_SEG(0xf000000000000000UL) | ||
31 | |||
32 | #define get_ds() (KERNEL_DS) | ||
33 | #define get_fs() (current->thread.fs) | ||
34 | #define set_fs(val) (current->thread.fs = (val)) | ||
35 | |||
36 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
37 | |||
38 | /* | ||
39 | * Use the alpha trick for checking ranges: | ||
40 | * | ||
41 | * Is a address valid? This does a straightforward calculation rather | ||
42 | * than tests. | ||
43 | * | ||
44 | * Address valid if: | ||
45 | * - "addr" doesn't have any high-bits set | ||
46 | * - AND "size" doesn't have any high-bits set | ||
47 | * - OR we are in kernel mode. | ||
48 | * | ||
49 | * We dont have to check for high bits in (addr+size) because the first | ||
50 | * two checks force the maximum result to be below the start of the | ||
51 | * kernel region. | ||
52 | */ | ||
53 | #define __access_ok(addr,size,segment) \ | ||
54 | (((segment).seg & (addr | size )) == 0) | ||
55 | |||
56 | #define access_ok(type,addr,size) \ | ||
57 | __access_ok(((__force unsigned long)(addr)),(size),get_fs()) | ||
58 | |||
59 | /* | ||
60 | * The exception table consists of pairs of addresses: the first is the | ||
61 | * address of an instruction that is allowed to fault, and the second is | ||
62 | * the address at which the program should continue. No registers are | ||
63 | * modified, so it is entirely up to the continuation code to figure out | ||
64 | * what to do. | ||
65 | * | ||
66 | * All the routines below use bits of fixup code that are out of line | ||
67 | * with the main instruction path. This means when everything is well, | ||
68 | * we don't even have to jump over them. Further, they do not intrude | ||
69 | * on our cache or tlb entries. | ||
70 | */ | ||
71 | |||
72 | struct exception_table_entry | ||
73 | { | ||
74 | unsigned long insn, fixup; | ||
75 | }; | ||
76 | |||
77 | /* Returns 0 if exception not found and fixup otherwise. */ | ||
78 | extern unsigned long search_exception_table(unsigned long); | ||
79 | |||
80 | /* | ||
81 | * These are the main single-value transfer routines. They automatically | ||
82 | * use the right size if we just have the right pointer type. | ||
83 | * | ||
84 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | ||
85 | * and yet we don't want to do any pointers, because that is too much | ||
86 | * of a performance impact. Thus we have a few rather ugly macros here, | ||
87 | * and hide all the ugliness from the user. | ||
88 | * | ||
89 | * The "__xxx" versions of the user access functions are versions that | ||
90 | * do not verify the address space, that must have been done previously | ||
91 | * with a separate "access_ok()" call (this is used when we do multiple | ||
92 | * accesses to the same area of user memory). | ||
93 | * | ||
94 | * As we use the same address space for kernel and user data on the | ||
95 | * PowerPC, we can just do these as direct assignments. (Of course, the | ||
96 | * exception handling means that it's no longer "just"...) | ||
97 | */ | ||
98 | #define get_user(x,ptr) \ | ||
99 | __get_user_check((x),(ptr),sizeof(*(ptr))) | ||
100 | #define put_user(x,ptr) \ | ||
101 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
102 | |||
103 | #define __get_user(x,ptr) \ | ||
104 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | ||
105 | #define __put_user(x,ptr) \ | ||
106 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
107 | |||
108 | #define __get_user_unaligned __get_user | ||
109 | #define __put_user_unaligned __put_user | ||
110 | |||
111 | extern long __put_user_bad(void); | ||
112 | |||
113 | #define __put_user_nocheck(x,ptr,size) \ | ||
114 | ({ \ | ||
115 | long __pu_err; \ | ||
116 | might_sleep(); \ | ||
117 | __chk_user_ptr(ptr); \ | ||
118 | __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ | ||
119 | __pu_err; \ | ||
120 | }) | ||
121 | |||
122 | #define __put_user_check(x,ptr,size) \ | ||
123 | ({ \ | ||
124 | long __pu_err = -EFAULT; \ | ||
125 | void __user *__pu_addr = (ptr); \ | ||
126 | might_sleep(); \ | ||
127 | if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ | ||
128 | __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \ | ||
129 | __pu_err; \ | ||
130 | }) | ||
131 | |||
132 | #define __put_user_size(x,ptr,size,retval,errret) \ | ||
133 | do { \ | ||
134 | retval = 0; \ | ||
135 | switch (size) { \ | ||
136 | case 1: __put_user_asm(x,ptr,retval,"stb",errret); break; \ | ||
137 | case 2: __put_user_asm(x,ptr,retval,"sth",errret); break; \ | ||
138 | case 4: __put_user_asm(x,ptr,retval,"stw",errret); break; \ | ||
139 | case 8: __put_user_asm(x,ptr,retval,"std",errret); break; \ | ||
140 | default: __put_user_bad(); \ | ||
141 | } \ | ||
142 | } while (0) | ||
143 | |||
144 | /* | ||
145 | * We don't tell gcc that we are accessing memory, but this is OK | ||
146 | * because we do not write to any memory gcc knows about, so there | ||
147 | * are no aliasing issues. | ||
148 | */ | ||
149 | #define __put_user_asm(x, addr, err, op, errret) \ | ||
150 | __asm__ __volatile__( \ | ||
151 | "1: "op" %1,0(%2) # put_user\n" \ | ||
152 | "2:\n" \ | ||
153 | ".section .fixup,\"ax\"\n" \ | ||
154 | "3: li %0,%3\n" \ | ||
155 | " b 2b\n" \ | ||
156 | ".previous\n" \ | ||
157 | ".section __ex_table,\"a\"\n" \ | ||
158 | " .align 3\n" \ | ||
159 | " .llong 1b,3b\n" \ | ||
160 | ".previous" \ | ||
161 | : "=r"(err) \ | ||
162 | : "r"(x), "b"(addr), "i"(errret), "0"(err)) | ||
163 | |||
164 | |||
165 | #define __get_user_nocheck(x,ptr,size) \ | ||
166 | ({ \ | ||
167 | long __gu_err; \ | ||
168 | unsigned long __gu_val; \ | ||
169 | might_sleep(); \ | ||
170 | __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ | ||
171 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
172 | __gu_err; \ | ||
173 | }) | ||
174 | |||
175 | #define __get_user_check(x,ptr,size) \ | ||
176 | ({ \ | ||
177 | long __gu_err = -EFAULT; \ | ||
178 | unsigned long __gu_val = 0; \ | ||
179 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
180 | might_sleep(); \ | ||
181 | if (access_ok(VERIFY_READ,__gu_addr,size)) \ | ||
182 | __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\ | ||
183 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
184 | __gu_err; \ | ||
185 | }) | ||
186 | |||
187 | extern long __get_user_bad(void); | ||
188 | |||
189 | #define __get_user_size(x,ptr,size,retval,errret) \ | ||
190 | do { \ | ||
191 | retval = 0; \ | ||
192 | __chk_user_ptr(ptr); \ | ||
193 | switch (size) { \ | ||
194 | case 1: __get_user_asm(x,ptr,retval,"lbz",errret); break; \ | ||
195 | case 2: __get_user_asm(x,ptr,retval,"lhz",errret); break; \ | ||
196 | case 4: __get_user_asm(x,ptr,retval,"lwz",errret); break; \ | ||
197 | case 8: __get_user_asm(x,ptr,retval,"ld",errret); break; \ | ||
198 | default: (x) = __get_user_bad(); \ | ||
199 | } \ | ||
200 | } while (0) | ||
201 | |||
202 | #define __get_user_asm(x, addr, err, op, errret) \ | ||
203 | __asm__ __volatile__( \ | ||
204 | "1: "op" %1,0(%2) # get_user\n" \ | ||
205 | "2:\n" \ | ||
206 | ".section .fixup,\"ax\"\n" \ | ||
207 | "3: li %0,%3\n" \ | ||
208 | " li %1,0\n" \ | ||
209 | " b 2b\n" \ | ||
210 | ".previous\n" \ | ||
211 | ".section __ex_table,\"a\"\n" \ | ||
212 | " .align 3\n" \ | ||
213 | " .llong 1b,3b\n" \ | ||
214 | ".previous" \ | ||
215 | : "=r"(err), "=r"(x) \ | ||
216 | : "b"(addr), "i"(errret), "0"(err)) | ||
217 | |||
218 | /* more complex routines */ | ||
219 | |||
220 | extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from, | ||
221 | unsigned long size); | ||
222 | |||
223 | static inline unsigned long | ||
224 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | ||
225 | { | ||
226 | if (__builtin_constant_p(n)) { | ||
227 | unsigned long ret; | ||
228 | |||
229 | switch (n) { | ||
230 | case 1: | ||
231 | __get_user_size(*(u8 *)to, from, 1, ret, 1); | ||
232 | return ret; | ||
233 | case 2: | ||
234 | __get_user_size(*(u16 *)to, from, 2, ret, 2); | ||
235 | return ret; | ||
236 | case 4: | ||
237 | __get_user_size(*(u32 *)to, from, 4, ret, 4); | ||
238 | return ret; | ||
239 | case 8: | ||
240 | __get_user_size(*(u64 *)to, from, 8, ret, 8); | ||
241 | return ret; | ||
242 | } | ||
243 | } | ||
244 | return __copy_tofrom_user((__force void __user *) to, from, n); | ||
245 | } | ||
246 | |||
247 | static inline unsigned long | ||
248 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
249 | { | ||
250 | might_sleep(); | ||
251 | return __copy_from_user_inatomic(to, from, n); | ||
252 | } | ||
253 | |||
254 | static inline unsigned long | ||
255 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | ||
256 | { | ||
257 | if (__builtin_constant_p(n)) { | ||
258 | unsigned long ret; | ||
259 | |||
260 | switch (n) { | ||
261 | case 1: | ||
262 | __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); | ||
263 | return ret; | ||
264 | case 2: | ||
265 | __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); | ||
266 | return ret; | ||
267 | case 4: | ||
268 | __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); | ||
269 | return ret; | ||
270 | case 8: | ||
271 | __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret, 8); | ||
272 | return ret; | ||
273 | } | ||
274 | } | ||
275 | return __copy_tofrom_user(to, (__force const void __user *) from, n); | ||
276 | } | ||
277 | |||
278 | static inline unsigned long | ||
279 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
280 | { | ||
281 | might_sleep(); | ||
282 | return __copy_to_user_inatomic(to, from, n); | ||
283 | } | ||
284 | |||
285 | #define __copy_in_user(to, from, size) \ | ||
286 | __copy_tofrom_user((to), (from), (size)) | ||
287 | |||
288 | extern unsigned long copy_from_user(void *to, const void __user *from, | ||
289 | unsigned long n); | ||
290 | extern unsigned long copy_to_user(void __user *to, const void *from, | ||
291 | unsigned long n); | ||
292 | extern unsigned long copy_in_user(void __user *to, const void __user *from, | ||
293 | unsigned long n); | ||
294 | |||
295 | extern unsigned long __clear_user(void __user *addr, unsigned long size); | ||
296 | |||
297 | static inline unsigned long | ||
298 | clear_user(void __user *addr, unsigned long size) | ||
299 | { | ||
300 | might_sleep(); | ||
301 | if (likely(access_ok(VERIFY_WRITE, addr, size))) | ||
302 | size = __clear_user(addr, size); | ||
303 | return size; | ||
304 | } | ||
305 | |||
306 | extern int __strncpy_from_user(char *dst, const char __user *src, long count); | ||
307 | |||
308 | static inline long | ||
309 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
310 | { | ||
311 | might_sleep(); | ||
312 | if (likely(access_ok(VERIFY_READ, src, 1))) | ||
313 | return __strncpy_from_user(dst, src, count); | ||
314 | return -EFAULT; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Return the size of a string (including the ending 0) | ||
319 | * | ||
320 | * Return 0 for error | ||
321 | */ | ||
322 | extern int __strnlen_user(const char __user *str, long len); | ||
323 | |||
324 | /* | ||
325 | * Returns the length of the string at str (including the null byte), | ||
326 | * or 0 if we hit a page we can't access, | ||
327 | * or something > len if we didn't find a null byte. | ||
328 | */ | ||
329 | static inline int strnlen_user(const char __user *str, long len) | ||
330 | { | ||
331 | might_sleep(); | ||
332 | if (likely(access_ok(VERIFY_READ, str, 1))) | ||
333 | return __strnlen_user(str, len); | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | #define strlen_user(str) strnlen_user((str), 0x7ffffffe) | ||
338 | |||
339 | #endif /* __ASSEMBLY__ */ | ||
340 | |||
341 | #endif /* _PPC64_UACCESS_H */ | ||
diff --git a/include/asm-ppc64/ucontext.h b/include/asm-ppc64/ucontext.h deleted file mode 100644 index ef8cc5b37542..000000000000 --- a/include/asm-ppc64/ucontext.h +++ /dev/null | |||
@@ -1,22 +0,0 @@ | |||
1 | #ifndef _ASMPPC64_UCONTEXT_H | ||
2 | #define _ASMPPC64_UCONTEXT_H | ||
3 | |||
4 | #include <asm/sigcontext.h> | ||
5 | |||
6 | /* | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | struct ucontext { | ||
14 | unsigned long uc_flags; | ||
15 | struct ucontext *uc_link; | ||
16 | stack_t uc_stack; | ||
17 | sigset_t uc_sigmask; | ||
18 | sigset_t __unsued[15]; /* Allow for uc_sigmask growth */ | ||
19 | struct sigcontext uc_mcontext; /* last for extensibility */ | ||
20 | }; | ||
21 | |||
22 | #endif /* _ASMPPC64_UCONTEXT_H */ | ||