diff options
Diffstat (limited to 'include/asm-s390/system.h')
-rw-r--r-- | include/asm-s390/system.h | 462 |
1 files changed, 0 insertions, 462 deletions
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h deleted file mode 100644 index 819e7d99ca0c..000000000000 --- a/include/asm-s390/system.h +++ /dev/null | |||
@@ -1,462 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-s390/system.h | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * | ||
8 | * Derived from "include/asm-i386/system.h" | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_SYSTEM_H | ||
12 | #define __ASM_SYSTEM_H | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <asm/types.h> | ||
16 | #include <asm/ptrace.h> | ||
17 | #include <asm/setup.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/lowcore.h> | ||
20 | |||
21 | #ifdef __KERNEL__ | ||
22 | |||
23 | struct task_struct; | ||
24 | |||
25 | extern struct task_struct *__switch_to(void *, void *); | ||
26 | |||
27 | static inline void save_fp_regs(s390_fp_regs *fpregs) | ||
28 | { | ||
29 | asm volatile( | ||
30 | " std 0,8(%1)\n" | ||
31 | " std 2,24(%1)\n" | ||
32 | " std 4,40(%1)\n" | ||
33 | " std 6,56(%1)" | ||
34 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | ||
35 | if (!MACHINE_HAS_IEEE) | ||
36 | return; | ||
37 | asm volatile( | ||
38 | " stfpc 0(%1)\n" | ||
39 | " std 1,16(%1)\n" | ||
40 | " std 3,32(%1)\n" | ||
41 | " std 5,48(%1)\n" | ||
42 | " std 7,64(%1)\n" | ||
43 | " std 8,72(%1)\n" | ||
44 | " std 9,80(%1)\n" | ||
45 | " std 10,88(%1)\n" | ||
46 | " std 11,96(%1)\n" | ||
47 | " std 12,104(%1)\n" | ||
48 | " std 13,112(%1)\n" | ||
49 | " std 14,120(%1)\n" | ||
50 | " std 15,128(%1)\n" | ||
51 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | ||
52 | } | ||
53 | |||
54 | static inline void restore_fp_regs(s390_fp_regs *fpregs) | ||
55 | { | ||
56 | asm volatile( | ||
57 | " ld 0,8(%0)\n" | ||
58 | " ld 2,24(%0)\n" | ||
59 | " ld 4,40(%0)\n" | ||
60 | " ld 6,56(%0)" | ||
61 | : : "a" (fpregs), "m" (*fpregs)); | ||
62 | if (!MACHINE_HAS_IEEE) | ||
63 | return; | ||
64 | asm volatile( | ||
65 | " lfpc 0(%0)\n" | ||
66 | " ld 1,16(%0)\n" | ||
67 | " ld 3,32(%0)\n" | ||
68 | " ld 5,48(%0)\n" | ||
69 | " ld 7,64(%0)\n" | ||
70 | " ld 8,72(%0)\n" | ||
71 | " ld 9,80(%0)\n" | ||
72 | " ld 10,88(%0)\n" | ||
73 | " ld 11,96(%0)\n" | ||
74 | " ld 12,104(%0)\n" | ||
75 | " ld 13,112(%0)\n" | ||
76 | " ld 14,120(%0)\n" | ||
77 | " ld 15,128(%0)\n" | ||
78 | : : "a" (fpregs), "m" (*fpregs)); | ||
79 | } | ||
80 | |||
81 | static inline void save_access_regs(unsigned int *acrs) | ||
82 | { | ||
83 | asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); | ||
84 | } | ||
85 | |||
86 | static inline void restore_access_regs(unsigned int *acrs) | ||
87 | { | ||
88 | asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); | ||
89 | } | ||
90 | |||
91 | #define switch_to(prev,next,last) do { \ | ||
92 | if (prev == next) \ | ||
93 | break; \ | ||
94 | save_fp_regs(&prev->thread.fp_regs); \ | ||
95 | restore_fp_regs(&next->thread.fp_regs); \ | ||
96 | save_access_regs(&prev->thread.acrs[0]); \ | ||
97 | restore_access_regs(&next->thread.acrs[0]); \ | ||
98 | prev = __switch_to(prev,next); \ | ||
99 | } while (0) | ||
100 | |||
101 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
102 | extern void account_vtime(struct task_struct *); | ||
103 | extern void account_tick_vtime(struct task_struct *); | ||
104 | extern void account_system_vtime(struct task_struct *); | ||
105 | #else | ||
106 | #define account_vtime(x) do { /* empty */ } while (0) | ||
107 | #endif | ||
108 | |||
109 | #ifdef CONFIG_PFAULT | ||
110 | extern void pfault_irq_init(void); | ||
111 | extern int pfault_init(void); | ||
112 | extern void pfault_fini(void); | ||
113 | #else /* CONFIG_PFAULT */ | ||
114 | #define pfault_irq_init() do { } while (0) | ||
115 | #define pfault_init() ({-1;}) | ||
116 | #define pfault_fini() do { } while (0) | ||
117 | #endif /* CONFIG_PFAULT */ | ||
118 | |||
119 | #ifdef CONFIG_PAGE_STATES | ||
120 | extern void cmma_init(void); | ||
121 | #else | ||
122 | static inline void cmma_init(void) { } | ||
123 | #endif | ||
124 | |||
125 | #define finish_arch_switch(prev) do { \ | ||
126 | set_fs(current->thread.mm_segment); \ | ||
127 | account_vtime(prev); \ | ||
128 | } while (0) | ||
129 | |||
130 | #define nop() asm volatile("nop") | ||
131 | |||
132 | #define xchg(ptr,x) \ | ||
133 | ({ \ | ||
134 | __typeof__(*(ptr)) __ret; \ | ||
135 | __ret = (__typeof__(*(ptr))) \ | ||
136 | __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \ | ||
137 | __ret; \ | ||
138 | }) | ||
139 | |||
140 | extern void __xchg_called_with_bad_pointer(void); | ||
141 | |||
142 | static inline unsigned long __xchg(unsigned long x, void * ptr, int size) | ||
143 | { | ||
144 | unsigned long addr, old; | ||
145 | int shift; | ||
146 | |||
147 | switch (size) { | ||
148 | case 1: | ||
149 | addr = (unsigned long) ptr; | ||
150 | shift = (3 ^ (addr & 3)) << 3; | ||
151 | addr ^= addr & 3; | ||
152 | asm volatile( | ||
153 | " l %0,0(%4)\n" | ||
154 | "0: lr 0,%0\n" | ||
155 | " nr 0,%3\n" | ||
156 | " or 0,%2\n" | ||
157 | " cs %0,0,0(%4)\n" | ||
158 | " jl 0b\n" | ||
159 | : "=&d" (old), "=m" (*(int *) addr) | ||
160 | : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), | ||
161 | "m" (*(int *) addr) : "memory", "cc", "0"); | ||
162 | return old >> shift; | ||
163 | case 2: | ||
164 | addr = (unsigned long) ptr; | ||
165 | shift = (2 ^ (addr & 2)) << 3; | ||
166 | addr ^= addr & 2; | ||
167 | asm volatile( | ||
168 | " l %0,0(%4)\n" | ||
169 | "0: lr 0,%0\n" | ||
170 | " nr 0,%3\n" | ||
171 | " or 0,%2\n" | ||
172 | " cs %0,0,0(%4)\n" | ||
173 | " jl 0b\n" | ||
174 | : "=&d" (old), "=m" (*(int *) addr) | ||
175 | : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), | ||
176 | "m" (*(int *) addr) : "memory", "cc", "0"); | ||
177 | return old >> shift; | ||
178 | case 4: | ||
179 | asm volatile( | ||
180 | " l %0,0(%3)\n" | ||
181 | "0: cs %0,%2,0(%3)\n" | ||
182 | " jl 0b\n" | ||
183 | : "=&d" (old), "=m" (*(int *) ptr) | ||
184 | : "d" (x), "a" (ptr), "m" (*(int *) ptr) | ||
185 | : "memory", "cc"); | ||
186 | return old; | ||
187 | #ifdef __s390x__ | ||
188 | case 8: | ||
189 | asm volatile( | ||
190 | " lg %0,0(%3)\n" | ||
191 | "0: csg %0,%2,0(%3)\n" | ||
192 | " jl 0b\n" | ||
193 | : "=&d" (old), "=m" (*(long *) ptr) | ||
194 | : "d" (x), "a" (ptr), "m" (*(long *) ptr) | ||
195 | : "memory", "cc"); | ||
196 | return old; | ||
197 | #endif /* __s390x__ */ | ||
198 | } | ||
199 | __xchg_called_with_bad_pointer(); | ||
200 | return x; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
205 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
206 | * indicated by comparing RETURN with OLD. | ||
207 | */ | ||
208 | |||
209 | #define __HAVE_ARCH_CMPXCHG 1 | ||
210 | |||
211 | #define cmpxchg(ptr, o, n) \ | ||
212 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | ||
213 | (unsigned long)(n), sizeof(*(ptr)))) | ||
214 | |||
215 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
216 | |||
217 | static inline unsigned long | ||
218 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | ||
219 | { | ||
220 | unsigned long addr, prev, tmp; | ||
221 | int shift; | ||
222 | |||
223 | switch (size) { | ||
224 | case 1: | ||
225 | addr = (unsigned long) ptr; | ||
226 | shift = (3 ^ (addr & 3)) << 3; | ||
227 | addr ^= addr & 3; | ||
228 | asm volatile( | ||
229 | " l %0,0(%4)\n" | ||
230 | "0: nr %0,%5\n" | ||
231 | " lr %1,%0\n" | ||
232 | " or %0,%2\n" | ||
233 | " or %1,%3\n" | ||
234 | " cs %0,%1,0(%4)\n" | ||
235 | " jnl 1f\n" | ||
236 | " xr %1,%0\n" | ||
237 | " nr %1,%5\n" | ||
238 | " jnz 0b\n" | ||
239 | "1:" | ||
240 | : "=&d" (prev), "=&d" (tmp) | ||
241 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | ||
242 | "d" (~(255 << shift)) | ||
243 | : "memory", "cc"); | ||
244 | return prev >> shift; | ||
245 | case 2: | ||
246 | addr = (unsigned long) ptr; | ||
247 | shift = (2 ^ (addr & 2)) << 3; | ||
248 | addr ^= addr & 2; | ||
249 | asm volatile( | ||
250 | " l %0,0(%4)\n" | ||
251 | "0: nr %0,%5\n" | ||
252 | " lr %1,%0\n" | ||
253 | " or %0,%2\n" | ||
254 | " or %1,%3\n" | ||
255 | " cs %0,%1,0(%4)\n" | ||
256 | " jnl 1f\n" | ||
257 | " xr %1,%0\n" | ||
258 | " nr %1,%5\n" | ||
259 | " jnz 0b\n" | ||
260 | "1:" | ||
261 | : "=&d" (prev), "=&d" (tmp) | ||
262 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | ||
263 | "d" (~(65535 << shift)) | ||
264 | : "memory", "cc"); | ||
265 | return prev >> shift; | ||
266 | case 4: | ||
267 | asm volatile( | ||
268 | " cs %0,%2,0(%3)\n" | ||
269 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) | ||
270 | : "memory", "cc"); | ||
271 | return prev; | ||
272 | #ifdef __s390x__ | ||
273 | case 8: | ||
274 | asm volatile( | ||
275 | " csg %0,%2,0(%3)\n" | ||
276 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) | ||
277 | : "memory", "cc"); | ||
278 | return prev; | ||
279 | #endif /* __s390x__ */ | ||
280 | } | ||
281 | __cmpxchg_called_with_bad_pointer(); | ||
282 | return old; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * Force strict CPU ordering. | ||
287 | * And yes, this is required on UP too when we're talking | ||
288 | * to devices. | ||
289 | * | ||
290 | * This is very similar to the ppc eieio/sync instruction in that is | ||
291 | * does a checkpoint syncronisation & makes sure that | ||
292 | * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). | ||
293 | */ | ||
294 | |||
295 | #define eieio() asm volatile("bcr 15,0" : : : "memory") | ||
296 | #define SYNC_OTHER_CORES(x) eieio() | ||
297 | #define mb() eieio() | ||
298 | #define rmb() eieio() | ||
299 | #define wmb() eieio() | ||
300 | #define read_barrier_depends() do { } while(0) | ||
301 | #define smp_mb() mb() | ||
302 | #define smp_rmb() rmb() | ||
303 | #define smp_wmb() wmb() | ||
304 | #define smp_read_barrier_depends() read_barrier_depends() | ||
305 | #define smp_mb__before_clear_bit() smp_mb() | ||
306 | #define smp_mb__after_clear_bit() smp_mb() | ||
307 | |||
308 | |||
309 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
310 | |||
311 | #ifdef __s390x__ | ||
312 | |||
313 | #define __ctl_load(array, low, high) ({ \ | ||
314 | typedef struct { char _[sizeof(array)]; } addrtype; \ | ||
315 | asm volatile( \ | ||
316 | " lctlg %1,%2,0(%0)\n" \ | ||
317 | : : "a" (&array), "i" (low), "i" (high), \ | ||
318 | "m" (*(addrtype *)(&array))); \ | ||
319 | }) | ||
320 | |||
321 | #define __ctl_store(array, low, high) ({ \ | ||
322 | typedef struct { char _[sizeof(array)]; } addrtype; \ | ||
323 | asm volatile( \ | ||
324 | " stctg %2,%3,0(%1)\n" \ | ||
325 | : "=m" (*(addrtype *)(&array)) \ | ||
326 | : "a" (&array), "i" (low), "i" (high)); \ | ||
327 | }) | ||
328 | |||
329 | #else /* __s390x__ */ | ||
330 | |||
331 | #define __ctl_load(array, low, high) ({ \ | ||
332 | typedef struct { char _[sizeof(array)]; } addrtype; \ | ||
333 | asm volatile( \ | ||
334 | " lctl %1,%2,0(%0)\n" \ | ||
335 | : : "a" (&array), "i" (low), "i" (high), \ | ||
336 | "m" (*(addrtype *)(&array))); \ | ||
337 | }) | ||
338 | |||
339 | #define __ctl_store(array, low, high) ({ \ | ||
340 | typedef struct { char _[sizeof(array)]; } addrtype; \ | ||
341 | asm volatile( \ | ||
342 | " stctl %2,%3,0(%1)\n" \ | ||
343 | : "=m" (*(addrtype *)(&array)) \ | ||
344 | : "a" (&array), "i" (low), "i" (high)); \ | ||
345 | }) | ||
346 | |||
347 | #endif /* __s390x__ */ | ||
348 | |||
349 | #define __ctl_set_bit(cr, bit) ({ \ | ||
350 | unsigned long __dummy; \ | ||
351 | __ctl_store(__dummy, cr, cr); \ | ||
352 | __dummy |= 1UL << (bit); \ | ||
353 | __ctl_load(__dummy, cr, cr); \ | ||
354 | }) | ||
355 | |||
356 | #define __ctl_clear_bit(cr, bit) ({ \ | ||
357 | unsigned long __dummy; \ | ||
358 | __ctl_store(__dummy, cr, cr); \ | ||
359 | __dummy &= ~(1UL << (bit)); \ | ||
360 | __ctl_load(__dummy, cr, cr); \ | ||
361 | }) | ||
362 | |||
363 | #include <linux/irqflags.h> | ||
364 | |||
365 | #include <asm-generic/cmpxchg-local.h> | ||
366 | |||
367 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
368 | unsigned long old, | ||
369 | unsigned long new, int size) | ||
370 | { | ||
371 | switch (size) { | ||
372 | case 1: | ||
373 | case 2: | ||
374 | case 4: | ||
375 | #ifdef __s390x__ | ||
376 | case 8: | ||
377 | #endif | ||
378 | return __cmpxchg(ptr, old, new, size); | ||
379 | default: | ||
380 | return __cmpxchg_local_generic(ptr, old, new, size); | ||
381 | } | ||
382 | |||
383 | return old; | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | ||
388 | * them available. | ||
389 | */ | ||
390 | #define cmpxchg_local(ptr, o, n) \ | ||
391 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
392 | (unsigned long)(n), sizeof(*(ptr)))) | ||
393 | #ifdef __s390x__ | ||
394 | #define cmpxchg64_local(ptr, o, n) \ | ||
395 | ({ \ | ||
396 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
397 | cmpxchg_local((ptr), (o), (n)); \ | ||
398 | }) | ||
399 | #else | ||
400 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
401 | #endif | ||
402 | |||
403 | /* | ||
404 | * Use to set psw mask except for the first byte which | ||
405 | * won't be changed by this function. | ||
406 | */ | ||
407 | static inline void | ||
408 | __set_psw_mask(unsigned long mask) | ||
409 | { | ||
410 | __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); | ||
411 | } | ||
412 | |||
413 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) | ||
414 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | ||
415 | |||
416 | int stfle(unsigned long long *list, int doublewords); | ||
417 | |||
418 | #ifdef CONFIG_SMP | ||
419 | |||
420 | extern void smp_ctl_set_bit(int cr, int bit); | ||
421 | extern void smp_ctl_clear_bit(int cr, int bit); | ||
422 | #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) | ||
423 | #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) | ||
424 | |||
425 | #else | ||
426 | |||
427 | #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) | ||
428 | #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) | ||
429 | |||
430 | #endif /* CONFIG_SMP */ | ||
431 | |||
432 | static inline unsigned int stfl(void) | ||
433 | { | ||
434 | asm volatile( | ||
435 | " .insn s,0xb2b10000,0(0)\n" /* stfl */ | ||
436 | "0:\n" | ||
437 | EX_TABLE(0b,0b)); | ||
438 | return S390_lowcore.stfl_fac_list; | ||
439 | } | ||
440 | |||
441 | static inline unsigned short stap(void) | ||
442 | { | ||
443 | unsigned short cpu_address; | ||
444 | |||
445 | asm volatile("stap %0" : "=m" (cpu_address)); | ||
446 | return cpu_address; | ||
447 | } | ||
448 | |||
449 | extern void (*_machine_restart)(char *command); | ||
450 | extern void (*_machine_halt)(void); | ||
451 | extern void (*_machine_power_off)(void); | ||
452 | |||
453 | #define arch_align_stack(x) (x) | ||
454 | |||
455 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
456 | extern psw_t sysc_restore_trace_psw; | ||
457 | extern psw_t io_restore_trace_psw; | ||
458 | #endif | ||
459 | |||
460 | #endif /* __KERNEL__ */ | ||
461 | |||
462 | #endif | ||