diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-s390/system.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-s390/system.h')
-rw-r--r-- | include/asm-s390/system.h | 477 |
1 files changed, 477 insertions, 0 deletions
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h new file mode 100644 index 000000000000..81514d76edcf --- /dev/null +++ b/include/asm-s390/system.h | |||
@@ -0,0 +1,477 @@ | |||
1 | /* | ||
2 | * include/asm-s390/system.h | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * | ||
8 | * Derived from "include/asm-i386/system.h" | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_SYSTEM_H | ||
12 | #define __ASM_SYSTEM_H | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <asm/types.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/setup.h> | ||
19 | |||
20 | #ifdef __KERNEL__ | ||
21 | |||
22 | struct task_struct; | ||
23 | |||
24 | extern struct task_struct *__switch_to(void *, void *); | ||
25 | |||
26 | #ifdef __s390x__ | ||
27 | #define __FLAG_SHIFT 56 | ||
28 | #else /* ! __s390x__ */ | ||
29 | #define __FLAG_SHIFT 24 | ||
30 | #endif /* ! __s390x__ */ | ||
31 | |||
32 | static inline void save_fp_regs(s390_fp_regs *fpregs) | ||
33 | { | ||
34 | asm volatile ( | ||
35 | " std 0,8(%1)\n" | ||
36 | " std 2,24(%1)\n" | ||
37 | " std 4,40(%1)\n" | ||
38 | " std 6,56(%1)" | ||
39 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); | ||
40 | if (!MACHINE_HAS_IEEE) | ||
41 | return; | ||
42 | asm volatile( | ||
43 | " stfpc 0(%1)\n" | ||
44 | " std 1,16(%1)\n" | ||
45 | " std 3,32(%1)\n" | ||
46 | " std 5,48(%1)\n" | ||
47 | " std 7,64(%1)\n" | ||
48 | " std 8,72(%1)\n" | ||
49 | " std 9,80(%1)\n" | ||
50 | " std 10,88(%1)\n" | ||
51 | " std 11,96(%1)\n" | ||
52 | " std 12,104(%1)\n" | ||
53 | " std 13,112(%1)\n" | ||
54 | " std 14,120(%1)\n" | ||
55 | " std 15,128(%1)\n" | ||
56 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); | ||
57 | } | ||
58 | |||
59 | static inline void restore_fp_regs(s390_fp_regs *fpregs) | ||
60 | { | ||
61 | asm volatile ( | ||
62 | " ld 0,8(%0)\n" | ||
63 | " ld 2,24(%0)\n" | ||
64 | " ld 4,40(%0)\n" | ||
65 | " ld 6,56(%0)" | ||
66 | : : "a" (fpregs), "m" (*fpregs) ); | ||
67 | if (!MACHINE_HAS_IEEE) | ||
68 | return; | ||
69 | asm volatile( | ||
70 | " lfpc 0(%0)\n" | ||
71 | " ld 1,16(%0)\n" | ||
72 | " ld 3,32(%0)\n" | ||
73 | " ld 5,48(%0)\n" | ||
74 | " ld 7,64(%0)\n" | ||
75 | " ld 8,72(%0)\n" | ||
76 | " ld 9,80(%0)\n" | ||
77 | " ld 10,88(%0)\n" | ||
78 | " ld 11,96(%0)\n" | ||
79 | " ld 12,104(%0)\n" | ||
80 | " ld 13,112(%0)\n" | ||
81 | " ld 14,120(%0)\n" | ||
82 | " ld 15,128(%0)\n" | ||
83 | : : "a" (fpregs), "m" (*fpregs) ); | ||
84 | } | ||
85 | |||
86 | static inline void save_access_regs(unsigned int *acrs) | ||
87 | { | ||
88 | asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" ); | ||
89 | } | ||
90 | |||
91 | static inline void restore_access_regs(unsigned int *acrs) | ||
92 | { | ||
93 | asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) ); | ||
94 | } | ||
95 | |||
96 | #define switch_to(prev,next,last) do { \ | ||
97 | if (prev == next) \ | ||
98 | break; \ | ||
99 | save_fp_regs(&prev->thread.fp_regs); \ | ||
100 | restore_fp_regs(&next->thread.fp_regs); \ | ||
101 | save_access_regs(&prev->thread.acrs[0]); \ | ||
102 | restore_access_regs(&next->thread.acrs[0]); \ | ||
103 | prev = __switch_to(prev,next); \ | ||
104 | } while (0) | ||
105 | |||
106 | #define prepare_arch_switch(rq, next) do { } while(0) | ||
107 | #define task_running(rq, p) ((rq)->curr == (p)) | ||
108 | |||
109 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
110 | extern void account_user_vtime(struct task_struct *); | ||
111 | extern void account_system_vtime(struct task_struct *); | ||
112 | |||
113 | #define finish_arch_switch(rq, prev) do { \ | ||
114 | set_fs(current->thread.mm_segment); \ | ||
115 | spin_unlock(&(rq)->lock); \ | ||
116 | account_system_vtime(prev); \ | ||
117 | local_irq_enable(); \ | ||
118 | } while (0) | ||
119 | |||
120 | #else | ||
121 | |||
122 | #define finish_arch_switch(rq, prev) do { \ | ||
123 | set_fs(current->thread.mm_segment); \ | ||
124 | spin_unlock_irq(&(rq)->lock); \ | ||
125 | } while (0) | ||
126 | |||
127 | #endif | ||
128 | |||
129 | #define nop() __asm__ __volatile__ ("nop") | ||
130 | |||
131 | #define xchg(ptr,x) \ | ||
132 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr)))) | ||
133 | |||
134 | static inline unsigned long __xchg(unsigned long x, void * ptr, int size) | ||
135 | { | ||
136 | unsigned long addr, old; | ||
137 | int shift; | ||
138 | |||
139 | switch (size) { | ||
140 | case 1: | ||
141 | addr = (unsigned long) ptr; | ||
142 | shift = (3 ^ (addr & 3)) << 3; | ||
143 | addr ^= addr & 3; | ||
144 | asm volatile( | ||
145 | " l %0,0(%4)\n" | ||
146 | "0: lr 0,%0\n" | ||
147 | " nr 0,%3\n" | ||
148 | " or 0,%2\n" | ||
149 | " cs %0,0,0(%4)\n" | ||
150 | " jl 0b\n" | ||
151 | : "=&d" (old), "=m" (*(int *) addr) | ||
152 | : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), | ||
153 | "m" (*(int *) addr) : "memory", "cc", "0" ); | ||
154 | x = old >> shift; | ||
155 | break; | ||
156 | case 2: | ||
157 | addr = (unsigned long) ptr; | ||
158 | shift = (2 ^ (addr & 2)) << 3; | ||
159 | addr ^= addr & 2; | ||
160 | asm volatile( | ||
161 | " l %0,0(%4)\n" | ||
162 | "0: lr 0,%0\n" | ||
163 | " nr 0,%3\n" | ||
164 | " or 0,%2\n" | ||
165 | " cs %0,0,0(%4)\n" | ||
166 | " jl 0b\n" | ||
167 | : "=&d" (old), "=m" (*(int *) addr) | ||
168 | : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), | ||
169 | "m" (*(int *) addr) : "memory", "cc", "0" ); | ||
170 | x = old >> shift; | ||
171 | break; | ||
172 | case 4: | ||
173 | asm volatile ( | ||
174 | " l %0,0(%3)\n" | ||
175 | "0: cs %0,%2,0(%3)\n" | ||
176 | " jl 0b\n" | ||
177 | : "=&d" (old), "=m" (*(int *) ptr) | ||
178 | : "d" (x), "a" (ptr), "m" (*(int *) ptr) | ||
179 | : "memory", "cc" ); | ||
180 | x = old; | ||
181 | break; | ||
182 | #ifdef __s390x__ | ||
183 | case 8: | ||
184 | asm volatile ( | ||
185 | " lg %0,0(%3)\n" | ||
186 | "0: csg %0,%2,0(%3)\n" | ||
187 | " jl 0b\n" | ||
188 | : "=&d" (old), "=m" (*(long *) ptr) | ||
189 | : "d" (x), "a" (ptr), "m" (*(long *) ptr) | ||
190 | : "memory", "cc" ); | ||
191 | x = old; | ||
192 | break; | ||
193 | #endif /* __s390x__ */ | ||
194 | } | ||
195 | return x; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
200 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
201 | * indicated by comparing RETURN with OLD. | ||
202 | */ | ||
203 | |||
204 | #define __HAVE_ARCH_CMPXCHG 1 | ||
205 | |||
206 | #define cmpxchg(ptr,o,n)\ | ||
207 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | ||
208 | (unsigned long)(n),sizeof(*(ptr)))) | ||
209 | |||
210 | static inline unsigned long | ||
211 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | ||
212 | { | ||
213 | unsigned long addr, prev, tmp; | ||
214 | int shift; | ||
215 | |||
216 | switch (size) { | ||
217 | case 1: | ||
218 | addr = (unsigned long) ptr; | ||
219 | shift = (3 ^ (addr & 3)) << 3; | ||
220 | addr ^= addr & 3; | ||
221 | asm volatile( | ||
222 | " l %0,0(%4)\n" | ||
223 | "0: nr %0,%5\n" | ||
224 | " lr %1,%0\n" | ||
225 | " or %0,%2\n" | ||
226 | " or %1,%3\n" | ||
227 | " cs %0,%1,0(%4)\n" | ||
228 | " jnl 1f\n" | ||
229 | " xr %1,%0\n" | ||
230 | " nr %1,%5\n" | ||
231 | " jnz 0b\n" | ||
232 | "1:" | ||
233 | : "=&d" (prev), "=&d" (tmp) | ||
234 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | ||
235 | "d" (~(255 << shift)) | ||
236 | : "memory", "cc" ); | ||
237 | return prev >> shift; | ||
238 | case 2: | ||
239 | addr = (unsigned long) ptr; | ||
240 | shift = (2 ^ (addr & 2)) << 3; | ||
241 | addr ^= addr & 2; | ||
242 | asm volatile( | ||
243 | " l %0,0(%4)\n" | ||
244 | "0: nr %0,%5\n" | ||
245 | " lr %1,%0\n" | ||
246 | " or %0,%2\n" | ||
247 | " or %1,%3\n" | ||
248 | " cs %0,%1,0(%4)\n" | ||
249 | " jnl 1f\n" | ||
250 | " xr %1,%0\n" | ||
251 | " nr %1,%5\n" | ||
252 | " jnz 0b\n" | ||
253 | "1:" | ||
254 | : "=&d" (prev), "=&d" (tmp) | ||
255 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | ||
256 | "d" (~(65535 << shift)) | ||
257 | : "memory", "cc" ); | ||
258 | return prev >> shift; | ||
259 | case 4: | ||
260 | asm volatile ( | ||
261 | " cs %0,%2,0(%3)\n" | ||
262 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) | ||
263 | : "memory", "cc" ); | ||
264 | return prev; | ||
265 | #ifdef __s390x__ | ||
266 | case 8: | ||
267 | asm volatile ( | ||
268 | " csg %0,%2,0(%3)\n" | ||
269 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) | ||
270 | : "memory", "cc" ); | ||
271 | return prev; | ||
272 | #endif /* __s390x__ */ | ||
273 | } | ||
274 | return old; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * Force strict CPU ordering. | ||
279 | * And yes, this is required on UP too when we're talking | ||
280 | * to devices. | ||
281 | * | ||
282 | * This is very similar to the ppc eieio/sync instruction in that is | ||
283 | * does a checkpoint syncronisation & makes sure that | ||
284 | * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). | ||
285 | */ | ||
286 | |||
287 | #define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" ) | ||
288 | # define SYNC_OTHER_CORES(x) eieio() | ||
289 | #define mb() eieio() | ||
290 | #define rmb() eieio() | ||
291 | #define wmb() eieio() | ||
292 | #define read_barrier_depends() do { } while(0) | ||
293 | #define smp_mb() mb() | ||
294 | #define smp_rmb() rmb() | ||
295 | #define smp_wmb() wmb() | ||
296 | #define smp_read_barrier_depends() read_barrier_depends() | ||
297 | #define smp_mb__before_clear_bit() smp_mb() | ||
298 | #define smp_mb__after_clear_bit() smp_mb() | ||
299 | |||
300 | |||
301 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
302 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
303 | |||
304 | /* interrupt control.. */ | ||
305 | #define local_irq_enable() ({ \ | ||
306 | unsigned long __dummy; \ | ||
307 | __asm__ __volatile__ ( \ | ||
308 | "stosm 0(%1),0x03" \ | ||
309 | : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \ | ||
310 | }) | ||
311 | |||
312 | #define local_irq_disable() ({ \ | ||
313 | unsigned long __flags; \ | ||
314 | __asm__ __volatile__ ( \ | ||
315 | "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \ | ||
316 | __flags; \ | ||
317 | }) | ||
318 | |||
319 | #define local_save_flags(x) \ | ||
320 | __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) ) | ||
321 | |||
322 | #define local_irq_restore(x) \ | ||
323 | __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory") | ||
324 | |||
325 | #define irqs_disabled() \ | ||
326 | ({ \ | ||
327 | unsigned long flags; \ | ||
328 | local_save_flags(flags); \ | ||
329 | !((flags >> __FLAG_SHIFT) & 3); \ | ||
330 | }) | ||
331 | |||
332 | #ifdef __s390x__ | ||
333 | |||
334 | #define __load_psw(psw) \ | ||
335 | __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); | ||
336 | |||
337 | #define __ctl_load(array, low, high) ({ \ | ||
338 | typedef struct { char _[sizeof(array)]; } addrtype; \ | ||
339 | __asm__ __volatile__ ( \ | ||
340 | " bras 1,0f\n" \ | ||
341 | " lctlg 0,0,0(%0)\n" \ | ||
342 | "0: ex %1,0(1)" \ | ||
343 | : : "a" (&array), "a" (((low)<<4)+(high)), \ | ||
344 | "m" (*(addrtype *)(array)) : "1" ); \ | ||
345 | }) | ||
346 | |||
347 | #define __ctl_store(array, low, high) ({ \ | ||
348 | typedef struct { char _[sizeof(array)]; } addrtype; \ | ||
349 | __asm__ __volatile__ ( \ | ||
350 | " bras 1,0f\n" \ | ||
351 | " stctg 0,0,0(%1)\n" \ | ||
352 | "0: ex %2,0(1)" \ | ||
353 | : "=m" (*(addrtype *)(array)) \ | ||
354 | : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \ | ||
355 | }) | ||
356 | |||
357 | #define __ctl_set_bit(cr, bit) ({ \ | ||
358 | __u8 __dummy[24]; \ | ||
359 | __asm__ __volatile__ ( \ | ||
360 | " bras 1,0f\n" /* skip indirect insns */ \ | ||
361 | " stctg 0,0,0(%1)\n" \ | ||
362 | " lctlg 0,0,0(%1)\n" \ | ||
363 | "0: ex %2,0(1)\n" /* execute stctl */ \ | ||
364 | " lg 0,0(%1)\n" \ | ||
365 | " ogr 0,%3\n" /* set the bit */ \ | ||
366 | " stg 0,0(%1)\n" \ | ||
367 | "1: ex %2,6(1)" /* execute lctl */ \ | ||
368 | : "=m" (__dummy) \ | ||
369 | : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ | ||
370 | "a" (cr*17), "a" (1L<<(bit)) \ | ||
371 | : "cc", "0", "1" ); \ | ||
372 | }) | ||
373 | |||
374 | #define __ctl_clear_bit(cr, bit) ({ \ | ||
375 | __u8 __dummy[16]; \ | ||
376 | __asm__ __volatile__ ( \ | ||
377 | " bras 1,0f\n" /* skip indirect insns */ \ | ||
378 | " stctg 0,0,0(%1)\n" \ | ||
379 | " lctlg 0,0,0(%1)\n" \ | ||
380 | "0: ex %2,0(1)\n" /* execute stctl */ \ | ||
381 | " lg 0,0(%1)\n" \ | ||
382 | " ngr 0,%3\n" /* set the bit */ \ | ||
383 | " stg 0,0(%1)\n" \ | ||
384 | "1: ex %2,6(1)" /* execute lctl */ \ | ||
385 | : "=m" (__dummy) \ | ||
386 | : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ | ||
387 | "a" (cr*17), "a" (~(1L<<(bit))) \ | ||
388 | : "cc", "0", "1" ); \ | ||
389 | }) | ||
390 | |||
391 | #else /* __s390x__ */ | ||
392 | |||
393 | #define __load_psw(psw) \ | ||
394 | __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" ); | ||
395 | |||
396 | #define __ctl_load(array, low, high) ({ \ | ||
397 | typedef struct { char _[sizeof(array)]; } addrtype; \ | ||
398 | __asm__ __volatile__ ( \ | ||
399 | " bras 1,0f\n" \ | ||
400 | " lctl 0,0,0(%0)\n" \ | ||
401 | "0: ex %1,0(1)" \ | ||
402 | : : "a" (&array), "a" (((low)<<4)+(high)), \ | ||
403 | "m" (*(addrtype *)(array)) : "1" ); \ | ||
404 | }) | ||
405 | |||
406 | #define __ctl_store(array, low, high) ({ \ | ||
407 | typedef struct { char _[sizeof(array)]; } addrtype; \ | ||
408 | __asm__ __volatile__ ( \ | ||
409 | " bras 1,0f\n" \ | ||
410 | " stctl 0,0,0(%1)\n" \ | ||
411 | "0: ex %2,0(1)" \ | ||
412 | : "=m" (*(addrtype *)(array)) \ | ||
413 | : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \ | ||
414 | }) | ||
415 | |||
416 | #define __ctl_set_bit(cr, bit) ({ \ | ||
417 | __u8 __dummy[16]; \ | ||
418 | __asm__ __volatile__ ( \ | ||
419 | " bras 1,0f\n" /* skip indirect insns */ \ | ||
420 | " stctl 0,0,0(%1)\n" \ | ||
421 | " lctl 0,0,0(%1)\n" \ | ||
422 | "0: ex %2,0(1)\n" /* execute stctl */ \ | ||
423 | " l 0,0(%1)\n" \ | ||
424 | " or 0,%3\n" /* set the bit */ \ | ||
425 | " st 0,0(%1)\n" \ | ||
426 | "1: ex %2,4(1)" /* execute lctl */ \ | ||
427 | : "=m" (__dummy) \ | ||
428 | : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ | ||
429 | "a" (cr*17), "a" (1<<(bit)) \ | ||
430 | : "cc", "0", "1" ); \ | ||
431 | }) | ||
432 | |||
433 | #define __ctl_clear_bit(cr, bit) ({ \ | ||
434 | __u8 __dummy[16]; \ | ||
435 | __asm__ __volatile__ ( \ | ||
436 | " bras 1,0f\n" /* skip indirect insns */ \ | ||
437 | " stctl 0,0,0(%1)\n" \ | ||
438 | " lctl 0,0,0(%1)\n" \ | ||
439 | "0: ex %2,0(1)\n" /* execute stctl */ \ | ||
440 | " l 0,0(%1)\n" \ | ||
441 | " nr 0,%3\n" /* set the bit */ \ | ||
442 | " st 0,0(%1)\n" \ | ||
443 | "1: ex %2,4(1)" /* execute lctl */ \ | ||
444 | : "=m" (__dummy) \ | ||
445 | : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ | ||
446 | "a" (cr*17), "a" (~(1<<(bit))) \ | ||
447 | : "cc", "0", "1" ); \ | ||
448 | }) | ||
449 | #endif /* __s390x__ */ | ||
450 | |||
451 | /* For spinlocks etc */ | ||
452 | #define local_irq_save(x) ((x) = local_irq_disable()) | ||
453 | |||
454 | #ifdef CONFIG_SMP | ||
455 | |||
456 | extern void smp_ctl_set_bit(int cr, int bit); | ||
457 | extern void smp_ctl_clear_bit(int cr, int bit); | ||
458 | #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) | ||
459 | #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) | ||
460 | |||
461 | #else | ||
462 | |||
463 | #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) | ||
464 | #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) | ||
465 | |||
466 | #endif /* CONFIG_SMP */ | ||
467 | |||
468 | extern void (*_machine_restart)(char *command); | ||
469 | extern void (*_machine_halt)(void); | ||
470 | extern void (*_machine_power_off)(void); | ||
471 | |||
472 | #define arch_align_stack(x) (x) | ||
473 | |||
474 | #endif /* __KERNEL__ */ | ||
475 | |||
476 | #endif | ||
477 | |||