aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/processor.h
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2005-06-25 17:55:30 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:37 -0400
commit77fa22450de00d535de2cc8be653983560828000 (patch)
tree61644edb2263c3d0db3ea9e9518c6f76a60039e0 /include/asm-s390/processor.h
parentf901e5d1e06b3326c100c5d0df43656311befb81 (diff)
[PATCH] s390: improved machine check handling
Improved machine check handling. Kernel is now able to receive machine checks while in kernel mode (system call, interrupt and program check handling). Also register validation is now performed. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-s390/processor.h')
-rw-r--r--include/asm-s390/processor.h52
1 files changed, 20 insertions, 32 deletions
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index fb46e9090b50..8bd14de69e35 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -207,6 +207,18 @@ unsigned long get_wchan(struct task_struct *p);
207#endif /* __s390x__ */ 207#endif /* __s390x__ */
208 208
209/* 209/*
210 * Set PSW to specified value.
211 */
212static inline void __load_psw(psw_t psw)
213{
214#ifndef __s390x__
215 asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" );
216#else
217 asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" );
218#endif
219}
220
221/*
210 * Set PSW mask to specified value, while leaving the 222 * Set PSW mask to specified value, while leaving the
211 * PSW addr pointing to the next instruction. 223 * PSW addr pointing to the next instruction.
212 */ 224 */
@@ -214,8 +226,8 @@ unsigned long get_wchan(struct task_struct *p);
214static inline void __load_psw_mask (unsigned long mask) 226static inline void __load_psw_mask (unsigned long mask)
215{ 227{
216 unsigned long addr; 228 unsigned long addr;
217
218 psw_t psw; 229 psw_t psw;
230
219 psw.mask = mask; 231 psw.mask = mask;
220 232
221#ifndef __s390x__ 233#ifndef __s390x__
@@ -241,30 +253,8 @@ static inline void __load_psw_mask (unsigned long mask)
241 */ 253 */
242static inline void enabled_wait(void) 254static inline void enabled_wait(void)
243{ 255{
244 unsigned long reg; 256 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT |
245 psw_t wait_psw; 257 PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY);
246
247 wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT |
248 PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY;
249#ifndef __s390x__
250 asm volatile (
251 " basr %0,0\n"
252 "0: la %0,1f-0b(%0)\n"
253 " st %0,4(%1)\n"
254 " oi 4(%1),0x80\n"
255 " lpsw 0(%1)\n"
256 "1:"
257 : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw)
258 : "memory", "cc" );
259#else /* __s390x__ */
260 asm volatile (
261 " larl %0,0f\n"
262 " stg %0,8(%1)\n"
263 " lpswe 0(%1)\n"
264 "0:"
265 : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw)
266 : "memory", "cc" );
267#endif /* __s390x__ */
268} 258}
269 259
270/* 260/*
@@ -273,13 +263,11 @@ static inline void enabled_wait(void)
273 263
274static inline void disabled_wait(unsigned long code) 264static inline void disabled_wait(unsigned long code)
275{ 265{
276 char psw_buffer[2*sizeof(psw_t)];
277 unsigned long ctl_buf; 266 unsigned long ctl_buf;
278 psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1) 267 psw_t dw_psw;
279 & -sizeof(psw_t));
280 268
281 dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT; 269 dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
282 dw_psw->addr = code; 270 dw_psw.addr = code;
283 /* 271 /*
284 * Store status and then load disabled wait psw, 272 * Store status and then load disabled wait psw,
285 * the processor is dead afterwards 273 * the processor is dead afterwards
@@ -301,7 +289,7 @@ static inline void disabled_wait(unsigned long code)
301 " oi 0x1c0,0x10\n" /* fake protection bit */ 289 " oi 0x1c0,0x10\n" /* fake protection bit */
302 " lpsw 0(%1)" 290 " lpsw 0(%1)"
303 : "=m" (ctl_buf) 291 : "=m" (ctl_buf)
304 : "a" (dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); 292 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" );
305#else /* __s390x__ */ 293#else /* __s390x__ */
306 asm volatile (" stctg 0,0,0(%2)\n" 294 asm volatile (" stctg 0,0,0(%2)\n"
307 " ni 4(%2),0xef\n" /* switch off protection */ 295 " ni 4(%2),0xef\n" /* switch off protection */
@@ -333,7 +321,7 @@ static inline void disabled_wait(unsigned long code)
333 " oi 0x384(1),0x10\n" /* fake protection bit */ 321 " oi 0x384(1),0x10\n" /* fake protection bit */
334 " lpswe 0(%1)" 322 " lpswe 0(%1)"
335 : "=m" (ctl_buf) 323 : "=m" (ctl_buf)
336 : "a" (dw_psw), "a" (&ctl_buf), 324 : "a" (&dw_psw), "a" (&ctl_buf),
337 "m" (dw_psw) : "cc", "0", "1"); 325 "m" (dw_psw) : "cc", "0", "1");
338#endif /* __s390x__ */ 326#endif /* __s390x__ */
339} 327}