aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 21:15:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 21:15:32 -0400
commitdb1417967959569599da2a4bd0ffb93b17ad795f (patch)
tree08751414d5f4a9e264af924154ed3543a8e573a9
parent48aab2f79dfc1357c48ce22ff5c989b52a590069 (diff)
parentc6da39f26cfe475704ec521723192e520e8f51b8 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 patches from Martin Schwidefsky: "The biggest patch is the rework of the smp code, something I wanted to do for some time. There are some patches for our various dump methods and one new thing: z/VM LGR detection. LGR stands for linux-guest- relocation and is the guest migration feature of z/VM. For debugging purposes we keep a log of the systems where a specific guest has lived." Fix up trivial conflict in arch/s390/kernel/smp.c due to the scheduler cleanup having removed some code next to removed s390 code. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: [S390] kernel: Pass correct stack for smp_call_ipl_cpu() [S390] Ensure that vmcore_info pointer is never accessed directly [S390] dasd: prevent validate server for offline devices [S390] Remove monolithic build option for zcrypt driver. [S390] stack dump: fix indentation in output [S390] kernel: Add OS info memory interface [S390] Use block_sigmask() [S390] kernel: Add z/VM LGR detection [S390] irq: external interrupt code passing [S390] irq: set __ARCH_IRQ_EXIT_IRQS_DISABLED [S390] zfcpdump: Implement async sdias event processing [S390] Use copy_to_absolute_zero() instead of "stura/sturg" [S390] rework idle code [S390] rework smp code [S390] rename lowcore field [S390] Fix gcc 4.6.0 compile warning
-rw-r--r--arch/s390/include/asm/cputime.h9
-rw-r--r--arch/s390/include/asm/debug.h1
-rw-r--r--arch/s390/include/asm/hardirq.h1
-rw-r--r--arch/s390/include/asm/ipl.h1
-rw-r--r--arch/s390/include/asm/irq.h7
-rw-r--r--arch/s390/include/asm/lowcore.h119
-rw-r--r--arch/s390/include/asm/os_info.h50
-rw-r--r--arch/s390/include/asm/sigp.h132
-rw-r--r--arch/s390/include/asm/smp.h63
-rw-r--r--arch/s390/include/asm/system.h34
-rw-r--r--arch/s390/include/asm/timer.h4
-rw-r--r--arch/s390/include/asm/vdso.h4
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/asm-offsets.c27
-rw-r--r--arch/s390/kernel/compat_signal.c6
-rw-r--r--arch/s390/kernel/crash_dump.c37
-rw-r--r--arch/s390/kernel/debug.c40
-rw-r--r--arch/s390/kernel/early.c22
-rw-r--r--arch/s390/kernel/entry.S159
-rw-r--r--arch/s390/kernel/entry.h17
-rw-r--r--arch/s390/kernel/entry64.S139
-rw-r--r--arch/s390/kernel/ipl.c99
-rw-r--r--arch/s390/kernel/irq.c14
-rw-r--r--arch/s390/kernel/lgr.c200
-rw-r--r--arch/s390/kernel/machine_kexec.c52
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/os_info.c169
-rw-r--r--arch/s390/kernel/process.c7
-rw-r--r--arch/s390/kernel/setup.c61
-rw-r--r--arch/s390/kernel/signal.c6
-rw-r--r--arch/s390/kernel/smp.c1147
-rw-r--r--arch/s390/kernel/switch_cpu.S58
-rw-r--r--arch/s390/kernel/switch_cpu64.S51
-rw-r--r--arch/s390/kernel/swsusp_asm64.S19
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/topology.c8
-rw-r--r--arch/s390/kernel/traps.c6
-rw-r--r--arch/s390/kernel/vdso.c28
-rw-r--r--arch/s390/kernel/vtime.c168
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/lib/delay.c31
-rw-r--r--arch/s390/lib/spinlock.c30
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/oprofile/hwsampler.c6
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_diag.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/sclp_quiesce.c1
-rw-r--r--drivers/s390/char/sclp_sdias.c101
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/qdio_main.c6
-rw-r--r--drivers/s390/crypto/Makefile10
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c4
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c100
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c4
-rw-r--r--drivers/s390/kvm/kvm_virtio.c6
-rw-r--r--net/iucv/iucv.c2
64 files changed, 1701 insertions, 1633 deletions
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index c23c3900c304..24ef186a1c4f 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -170,24 +170,17 @@ struct s390_idle_data {
170 unsigned int sequence; 170 unsigned int sequence;
171 unsigned long long idle_count; 171 unsigned long long idle_count;
172 unsigned long long idle_enter; 172 unsigned long long idle_enter;
173 unsigned long long idle_exit;
173 unsigned long long idle_time; 174 unsigned long long idle_time;
174 int nohz_delay; 175 int nohz_delay;
175}; 176};
176 177
177DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 178DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
178 179
179void vtime_start_cpu(__u64 int_clock, __u64 enter_timer);
180cputime64_t s390_get_idle_time(int cpu); 180cputime64_t s390_get_idle_time(int cpu);
181 181
182#define arch_idle_time(cpu) s390_get_idle_time(cpu) 182#define arch_idle_time(cpu) s390_get_idle_time(cpu)
183 183
184static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
185 __u64 enter_timer)
186{
187 if (regs->psw.mask & PSW_MASK_WAIT)
188 vtime_start_cpu(int_clock, enter_timer);
189}
190
191static inline int s390_nohz_delay(int cpu) 184static inline int s390_nohz_delay(int cpu)
192{ 185{
193 return __get_cpu_var(s390_idle).nohz_delay != 0; 186 return __get_cpu_var(s390_idle).nohz_delay != 0;
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 9d88db1f55d0..8a8245ed14d2 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -131,6 +131,7 @@ void debug_unregister(debug_info_t* id);
131 131
132void debug_set_level(debug_info_t* id, int new_level); 132void debug_set_level(debug_info_t* id, int new_level);
133 133
134void debug_set_critical(void);
134void debug_stop_all(void); 135void debug_stop_all(void);
135 136
136static inline debug_entry_t* 137static inline debug_entry_t*
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index e4155d3eb2cb..510ba9ef4248 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -18,6 +18,7 @@
18 18
19#define __ARCH_IRQ_STAT 19#define __ARCH_IRQ_STAT
20#define __ARCH_HAS_DO_SOFTIRQ 20#define __ARCH_HAS_DO_SOFTIRQ
21#define __ARCH_IRQ_EXIT_IRQS_DISABLED
21 22
22#define HARDIRQ_BITS 8 23#define HARDIRQ_BITS 8
23 24
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 6940abfbe1d9..2bd6cb897b90 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -169,5 +169,6 @@ enum diag308_rc {
169extern int diag308(unsigned long subcode, void *addr); 169extern int diag308(unsigned long subcode, void *addr);
170extern void diag308_reset(void); 170extern void diag308_reset(void);
171extern void store_status(void); 171extern void store_status(void);
172extern void lgr_info_log(void);
172 173
173#endif /* _ASM_S390_IPL_H */ 174#endif /* _ASM_S390_IPL_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index ba6d85f88d50..acee1806f61e 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -34,7 +34,12 @@ enum interruption_class {
34 NR_IRQS, 34 NR_IRQS,
35}; 35};
36 36
37typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); 37struct ext_code {
38 unsigned short subcode;
39 unsigned short code;
40};
41
42typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);
38 43
39int register_external_interrupt(u16 code, ext_int_handler_t handler); 44int register_external_interrupt(u16 code, ext_int_handler_t handler);
40int unregister_external_interrupt(u16 code, ext_int_handler_t handler); 45int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 707f2306725b..47853debb3b9 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999,2010 2 * Copyright IBM Corp. 1999,2012
3 * Author(s): Hartmut Penner <hp@de.ibm.com>, 3 * Author(s): Hartmut Penner <hp@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow, 5 * Denis Joseph Barrow,
@@ -12,14 +12,6 @@
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/cpu.h> 13#include <asm/cpu.h>
14 14
15void restart_int_handler(void);
16void ext_int_handler(void);
17void system_call(void);
18void pgm_check_handler(void);
19void mcck_int_handler(void);
20void io_int_handler(void);
21void psw_restart_int_handler(void);
22
23#ifdef CONFIG_32BIT 15#ifdef CONFIG_32BIT
24 16
25#define LC_ORDER 0 17#define LC_ORDER 0
@@ -56,7 +48,7 @@ struct _lowcore {
56 psw_t mcck_new_psw; /* 0x0070 */ 48 psw_t mcck_new_psw; /* 0x0070 */
57 psw_t io_new_psw; /* 0x0078 */ 49 psw_t io_new_psw; /* 0x0078 */
58 __u32 ext_params; /* 0x0080 */ 50 __u32 ext_params; /* 0x0080 */
59 __u16 cpu_addr; /* 0x0084 */ 51 __u16 ext_cpu_addr; /* 0x0084 */
60 __u16 ext_int_code; /* 0x0086 */ 52 __u16 ext_int_code; /* 0x0086 */
61 __u16 svc_ilc; /* 0x0088 */ 53 __u16 svc_ilc; /* 0x0088 */
62 __u16 svc_code; /* 0x008a */ 54 __u16 svc_code; /* 0x008a */
@@ -117,32 +109,37 @@ struct _lowcore {
117 __u64 steal_timer; /* 0x0288 */ 109 __u64 steal_timer; /* 0x0288 */
118 __u64 last_update_timer; /* 0x0290 */ 110 __u64 last_update_timer; /* 0x0290 */
119 __u64 last_update_clock; /* 0x0298 */ 111 __u64 last_update_clock; /* 0x0298 */
112 __u64 int_clock; /* 0x02a0 */
113 __u64 mcck_clock; /* 0x02a8 */
114 __u64 clock_comparator; /* 0x02b0 */
120 115
121 /* Current process. */ 116 /* Current process. */
122 __u32 current_task; /* 0x02a0 */ 117 __u32 current_task; /* 0x02b8 */
123 __u32 thread_info; /* 0x02a4 */ 118 __u32 thread_info; /* 0x02bc */
124 __u32 kernel_stack; /* 0x02a8 */ 119 __u32 kernel_stack; /* 0x02c0 */
120
121 /* Interrupt, panic and restart stack. */
122 __u32 async_stack; /* 0x02c4 */
123 __u32 panic_stack; /* 0x02c8 */
124 __u32 restart_stack; /* 0x02cc */
125 125
126 /* Interrupt and panic stack. */ 126 /* Restart function and parameter. */
127 __u32 async_stack; /* 0x02ac */ 127 __u32 restart_fn; /* 0x02d0 */
128 __u32 panic_stack; /* 0x02b0 */ 128 __u32 restart_data; /* 0x02d4 */
129 __u32 restart_source; /* 0x02d8 */
129 130
130 /* Address space pointer. */ 131 /* Address space pointer. */
131 __u32 kernel_asce; /* 0x02b4 */ 132 __u32 kernel_asce; /* 0x02dc */
132 __u32 user_asce; /* 0x02b8 */ 133 __u32 user_asce; /* 0x02e0 */
133 __u32 current_pid; /* 0x02bc */ 134 __u32 current_pid; /* 0x02e4 */
134 135
135 /* SMP info area */ 136 /* SMP info area */
136 __u32 cpu_nr; /* 0x02c0 */ 137 __u32 cpu_nr; /* 0x02e8 */
137 __u32 softirq_pending; /* 0x02c4 */ 138 __u32 softirq_pending; /* 0x02ec */
138 __u32 percpu_offset; /* 0x02c8 */ 139 __u32 percpu_offset; /* 0x02f0 */
139 __u32 ext_call_fast; /* 0x02cc */ 140 __u32 machine_flags; /* 0x02f4 */
140 __u64 int_clock; /* 0x02d0 */ 141 __u32 ftrace_func; /* 0x02f8 */
141 __u64 mcck_clock; /* 0x02d8 */ 142 __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
142 __u64 clock_comparator; /* 0x02e0 */
143 __u32 machine_flags; /* 0x02e8 */
144 __u32 ftrace_func; /* 0x02ec */
145 __u8 pad_0x02f8[0x0300-0x02f0]; /* 0x02f0 */
146 143
147 /* Interrupt response block */ 144 /* Interrupt response block */
148 __u8 irb[64]; /* 0x0300 */ 145 __u8 irb[64]; /* 0x0300 */
@@ -157,7 +154,9 @@ struct _lowcore {
157 __u32 ipib; /* 0x0e00 */ 154 __u32 ipib; /* 0x0e00 */
158 __u32 ipib_checksum; /* 0x0e04 */ 155 __u32 ipib_checksum; /* 0x0e04 */
159 __u32 vmcore_info; /* 0x0e08 */ 156 __u32 vmcore_info; /* 0x0e08 */
160 __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */ 157 __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */
158 __u32 os_info; /* 0x0e18 */
159 __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */
161 160
162 /* Extended facility list */ 161 /* Extended facility list */
163 __u64 stfle_fac_list[32]; /* 0x0f00 */ 162 __u64 stfle_fac_list[32]; /* 0x0f00 */
@@ -189,7 +188,7 @@ struct _lowcore {
189 __u32 ipl_parmblock_ptr; /* 0x0014 */ 188 __u32 ipl_parmblock_ptr; /* 0x0014 */
190 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ 189 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
191 __u32 ext_params; /* 0x0080 */ 190 __u32 ext_params; /* 0x0080 */
192 __u16 cpu_addr; /* 0x0084 */ 191 __u16 ext_cpu_addr; /* 0x0084 */
193 __u16 ext_int_code; /* 0x0086 */ 192 __u16 ext_int_code; /* 0x0086 */
194 __u16 svc_ilc; /* 0x0088 */ 193 __u16 svc_ilc; /* 0x0088 */
195 __u16 svc_code; /* 0x008a */ 194 __u16 svc_code; /* 0x008a */
@@ -254,34 +253,39 @@ struct _lowcore {
254 __u64 steal_timer; /* 0x02e0 */ 253 __u64 steal_timer; /* 0x02e0 */
255 __u64 last_update_timer; /* 0x02e8 */ 254 __u64 last_update_timer; /* 0x02e8 */
256 __u64 last_update_clock; /* 0x02f0 */ 255 __u64 last_update_clock; /* 0x02f0 */
256 __u64 int_clock; /* 0x02f8 */
257 __u64 mcck_clock; /* 0x0300 */
258 __u64 clock_comparator; /* 0x0308 */
257 259
258 /* Current process. */ 260 /* Current process. */
259 __u64 current_task; /* 0x02f8 */ 261 __u64 current_task; /* 0x0310 */
260 __u64 thread_info; /* 0x0300 */ 262 __u64 thread_info; /* 0x0318 */
261 __u64 kernel_stack; /* 0x0308 */ 263 __u64 kernel_stack; /* 0x0320 */
264
265 /* Interrupt, panic and restart stack. */
266 __u64 async_stack; /* 0x0328 */
267 __u64 panic_stack; /* 0x0330 */
268 __u64 restart_stack; /* 0x0338 */
262 269
263 /* Interrupt and panic stack. */ 270 /* Restart function and parameter. */
264 __u64 async_stack; /* 0x0310 */ 271 __u64 restart_fn; /* 0x0340 */
265 __u64 panic_stack; /* 0x0318 */ 272 __u64 restart_data; /* 0x0348 */
273 __u64 restart_source; /* 0x0350 */
266 274
267 /* Address space pointer. */ 275 /* Address space pointer. */
268 __u64 kernel_asce; /* 0x0320 */ 276 __u64 kernel_asce; /* 0x0358 */
269 __u64 user_asce; /* 0x0328 */ 277 __u64 user_asce; /* 0x0360 */
270 __u64 current_pid; /* 0x0330 */ 278 __u64 current_pid; /* 0x0368 */
271 279
272 /* SMP info area */ 280 /* SMP info area */
273 __u32 cpu_nr; /* 0x0338 */ 281 __u32 cpu_nr; /* 0x0370 */
274 __u32 softirq_pending; /* 0x033c */ 282 __u32 softirq_pending; /* 0x0374 */
275 __u64 percpu_offset; /* 0x0340 */ 283 __u64 percpu_offset; /* 0x0378 */
276 __u64 ext_call_fast; /* 0x0348 */ 284 __u64 vdso_per_cpu_data; /* 0x0380 */
277 __u64 int_clock; /* 0x0350 */ 285 __u64 machine_flags; /* 0x0388 */
278 __u64 mcck_clock; /* 0x0358 */ 286 __u64 ftrace_func; /* 0x0390 */
279 __u64 clock_comparator; /* 0x0360 */ 287 __u64 gmap; /* 0x0398 */
280 __u64 vdso_per_cpu_data; /* 0x0368 */ 288 __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
281 __u64 machine_flags; /* 0x0370 */
282 __u64 ftrace_func; /* 0x0378 */
283 __u64 gmap; /* 0x0380 */
284 __u8 pad_0x0388[0x0400-0x0388]; /* 0x0388 */
285 289
286 /* Interrupt response block. */ 290 /* Interrupt response block. */
287 __u8 irb[64]; /* 0x0400 */ 291 __u8 irb[64]; /* 0x0400 */
@@ -298,8 +302,15 @@ struct _lowcore {
298 */ 302 */
299 __u64 ipib; /* 0x0e00 */ 303 __u64 ipib; /* 0x0e00 */
300 __u32 ipib_checksum; /* 0x0e08 */ 304 __u32 ipib_checksum; /* 0x0e08 */
301 __u64 vmcore_info; /* 0x0e0c */ 305 /*
302 __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ 306 * Because the vmcore_info pointer is not 8 byte aligned it never
307 * should not be accessed directly. For accessing the pointer, first
308 * copy it to a local pointer variable.
309 */
310 __u8 vmcore_info[8]; /* 0x0e0c */
311 __u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */
312 __u64 os_info; /* 0x0e18 */
313 __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
303 314
304 /* Extended facility list */ 315 /* Extended facility list */
305 __u64 stfle_fac_list[32]; /* 0x0f00 */ 316 __u64 stfle_fac_list[32]; /* 0x0f00 */
diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h
new file mode 100644
index 000000000000..d07518af09ea
--- /dev/null
+++ b/arch/s390/include/asm/os_info.h
@@ -0,0 +1,50 @@
1/*
2 * OS info memory interface
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7#ifndef _ASM_S390_OS_INFO_H
8#define _ASM_S390_OS_INFO_H
9
10#define OS_INFO_VERSION_MAJOR 1
11#define OS_INFO_VERSION_MINOR 1
12#define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */
13
14#define OS_INFO_VMCOREINFO 0
15#define OS_INFO_REIPL_BLOCK 1
16#define OS_INFO_INIT_FN 2
17
18struct os_info_entry {
19 u64 addr;
20 u64 size;
21 u32 csum;
22} __packed;
23
24struct os_info {
25 u64 magic;
26 u32 csum;
27 u16 version_major;
28 u16 version_minor;
29 u64 crashkernel_addr;
30 u64 crashkernel_size;
31 struct os_info_entry entry[3];
32 u8 reserved[4004];
33} __packed;
34
35void os_info_init(void);
36void os_info_entry_add(int nr, void *ptr, u64 len);
37void os_info_crashkernel_add(unsigned long base, unsigned long size);
38u32 os_info_csum(struct os_info *os_info);
39
40#ifdef CONFIG_CRASH_DUMP
41void *os_info_old_entry(int nr, unsigned long *size);
42int copy_from_oldmem(void *dest, void *src, size_t count);
43#else
44static inline void *os_info_old_entry(int nr, unsigned long *size)
45{
46 return NULL;
47}
48#endif
49
50#endif /* _ASM_S390_OS_INFO_H */
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
deleted file mode 100644
index 7040b8567cd0..000000000000
--- a/arch/s390/include/asm/sigp.h
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Routines and structures for signalling other processors.
3 *
4 * Copyright IBM Corp. 1999,2010
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 */
9
10#ifndef __ASM_SIGP_H
11#define __ASM_SIGP_H
12
13#include <asm/system.h>
14
15/* Get real cpu address from logical cpu number. */
16extern unsigned short __cpu_logical_map[];
17
18static inline int cpu_logical_map(int cpu)
19{
20#ifdef CONFIG_SMP
21 return __cpu_logical_map[cpu];
22#else
23 return stap();
24#endif
25}
26
27enum {
28 sigp_sense = 1,
29 sigp_external_call = 2,
30 sigp_emergency_signal = 3,
31 sigp_start = 4,
32 sigp_stop = 5,
33 sigp_restart = 6,
34 sigp_stop_and_store_status = 9,
35 sigp_initial_cpu_reset = 11,
36 sigp_cpu_reset = 12,
37 sigp_set_prefix = 13,
38 sigp_store_status_at_address = 14,
39 sigp_store_extended_status_at_address = 15,
40 sigp_set_architecture = 18,
41 sigp_conditional_emergency_signal = 19,
42 sigp_sense_running = 21,
43};
44
45enum {
46 sigp_order_code_accepted = 0,
47 sigp_status_stored = 1,
48 sigp_busy = 2,
49 sigp_not_operational = 3,
50};
51
52/*
53 * Definitions for external call.
54 */
55enum {
56 ec_schedule = 0,
57 ec_call_function,
58 ec_call_function_single,
59 ec_stop_cpu,
60};
61
62/*
63 * Signal processor.
64 */
65static inline int raw_sigp(u16 cpu, int order)
66{
67 register unsigned long reg1 asm ("1") = 0;
68 int ccode;
69
70 asm volatile(
71 " sigp %1,%2,0(%3)\n"
72 " ipm %0\n"
73 " srl %0,28\n"
74 : "=d" (ccode)
75 : "d" (reg1), "d" (cpu),
76 "a" (order) : "cc" , "memory");
77 return ccode;
78}
79
80/*
81 * Signal processor with parameter.
82 */
83static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
84{
85 register unsigned int reg1 asm ("1") = parameter;
86 int ccode;
87
88 asm volatile(
89 " sigp %1,%2,0(%3)\n"
90 " ipm %0\n"
91 " srl %0,28\n"
92 : "=d" (ccode)
93 : "d" (reg1), "d" (cpu),
94 "a" (order) : "cc" , "memory");
95 return ccode;
96}
97
98/*
99 * Signal processor with parameter and return status.
100 */
101static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
102{
103 register unsigned int reg1 asm ("1") = parm;
104 int ccode;
105
106 asm volatile(
107 " sigp %1,%2,0(%3)\n"
108 " ipm %0\n"
109 " srl %0,28\n"
110 : "=d" (ccode), "+d" (reg1)
111 : "d" (cpu), "a" (order)
112 : "cc" , "memory");
113 *status = reg1;
114 return ccode;
115}
116
117static inline int sigp(int cpu, int order)
118{
119 return raw_sigp(cpu_logical_map(cpu), order);
120}
121
122static inline int sigp_p(u32 parameter, int cpu, int order)
123{
124 return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
125}
126
127static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
128{
129 return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
130}
131
132#endif /* __ASM_SIGP_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index c32e9123b40c..797f78729680 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999,2009 2 * Copyright IBM Corp. 1999,2012
3 * Author(s): Denis Joseph Barrow, 3 * Author(s): Denis Joseph Barrow,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>, 5 * Heiko Carstens <heiko.carstens@de.ibm.com>,
@@ -10,71 +10,52 @@
10#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
11 11
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/sigp.h>
14
15extern void machine_restart_smp(char *);
16extern void machine_halt_smp(void);
17extern void machine_power_off_smp(void);
18 13
19#define raw_smp_processor_id() (S390_lowcore.cpu_nr) 14#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
20 15
21extern int __cpu_disable (void);
22extern void __cpu_die (unsigned int cpu);
23extern int __cpu_up (unsigned int cpu);
24
25extern struct mutex smp_cpu_state_mutex; 16extern struct mutex smp_cpu_state_mutex;
17extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
18
19extern int __cpu_up(unsigned int cpu);
26 20
27extern void arch_send_call_function_single_ipi(int cpu); 21extern void arch_send_call_function_single_ipi(int cpu);
28extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 22extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
29 23
30extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 24extern void smp_call_online_cpu(void (*func)(void *), void *);
31 25extern void smp_call_ipl_cpu(void (*func)(void *), void *);
32extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
33extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
34 int from, int to);
35extern void smp_restart_with_online_cpu(void);
36extern void smp_restart_cpu(void);
37 26
38/* 27extern int smp_find_processor_id(u16 address);
39 * returns 1 if (virtual) cpu is scheduled 28extern int smp_store_status(int cpu);
40 * returns 0 otherwise 29extern int smp_vcpu_scheduled(int cpu);
41 */ 30extern void smp_yield_cpu(int cpu);
42static inline int smp_vcpu_scheduled(int cpu) 31extern void smp_yield(void);
43{ 32extern void smp_stop_cpu(void);
44 u32 status;
45
46 switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
47 case sigp_status_stored:
48 /* Check for running status */
49 if (status & 0x400)
50 return 0;
51 break;
52 case sigp_not_operational:
53 return 0;
54 default:
55 break;
56 }
57 return 1;
58}
59 33
60#else /* CONFIG_SMP */ 34#else /* CONFIG_SMP */
61 35
62static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) 36static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
63{ 37{
64 func(data); 38 func(data);
65} 39}
66 40
67static inline void smp_restart_with_online_cpu(void) 41static inline void smp_call_online_cpu(void (*func)(void *), void *data)
68{ 42{
43 func(data);
69} 44}
70 45
71#define smp_vcpu_scheduled (1) 46static inline int smp_find_processor_id(int address) { return 0; }
47static inline int smp_vcpu_scheduled(int cpu) { return 1; }
48static inline void smp_yield_cpu(int cpu) { }
49static inline void smp_yield(void) { }
50static inline void smp_stop_cpu(void) { }
72 51
73#endif /* CONFIG_SMP */ 52#endif /* CONFIG_SMP */
74 53
75#ifdef CONFIG_HOTPLUG_CPU 54#ifdef CONFIG_HOTPLUG_CPU
76extern int smp_rescan_cpus(void); 55extern int smp_rescan_cpus(void);
77extern void __noreturn cpu_die(void); 56extern void __noreturn cpu_die(void);
57extern void __cpu_die(unsigned int cpu);
58extern int __cpu_disable(void);
78#else 59#else
79static inline int smp_rescan_cpus(void) { return 0; } 60static inline int smp_rescan_cpus(void) { return 0; }
80static inline void cpu_die(void) { } 61static inline void cpu_die(void) { }
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index d73cc6b60000..2e0bb7f0f9b2 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -7,8 +7,10 @@
7#ifndef __ASM_SYSTEM_H 7#ifndef __ASM_SYSTEM_H
8#define __ASM_SYSTEM_H 8#define __ASM_SYSTEM_H
9 9
10#include <linux/preempt.h>
10#include <linux/kernel.h> 11#include <linux/kernel.h>
11#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/string.h>
12#include <asm/types.h> 14#include <asm/types.h>
13#include <asm/ptrace.h> 15#include <asm/ptrace.h>
14#include <asm/setup.h> 16#include <asm/setup.h>
@@ -248,6 +250,38 @@ static inline int test_facility(unsigned long nr)
248 return (*ptr & (0x80 >> (nr & 7))) != 0; 250 return (*ptr & (0x80 >> (nr & 7))) != 0;
249} 251}
250 252
253/**
254 * stfle - Store facility list extended
255 * @stfle_fac_list: array where facility list can be stored
256 * @size: size of passed in array in double words
257 */
258static inline void stfle(u64 *stfle_fac_list, int size)
259{
260 unsigned long nr;
261
262 preempt_disable();
263 S390_lowcore.stfl_fac_list = 0;
264 asm volatile(
265 " .insn s,0xb2b10000,0(0)\n" /* stfl */
266 "0:\n"
267 EX_TABLE(0b, 0b)
268 : "=m" (S390_lowcore.stfl_fac_list));
269 nr = 4; /* bytes stored by stfl */
270 memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
271 if (S390_lowcore.stfl_fac_list & 0x01000000) {
272 /* More facility bits available with stfle */
273 register unsigned long reg0 asm("0") = size - 1;
274
275 asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
276 : "+d" (reg0)
277 : "a" (stfle_fac_list)
278 : "memory", "cc");
279 nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
280 }
281 memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
282 preempt_enable();
283}
284
251static inline unsigned short stap(void) 285static inline unsigned short stap(void)
252{ 286{
253 unsigned short cpu_address; 287 unsigned short cpu_address;
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
index 814243cafdfe..e63069ba39e3 100644
--- a/arch/s390/include/asm/timer.h
+++ b/arch/s390/include/asm/timer.h
@@ -33,8 +33,8 @@ struct vtimer_queue {
33 spinlock_t lock; 33 spinlock_t lock;
34 __u64 timer; /* last programmed timer */ 34 __u64 timer; /* last programmed timer */
35 __u64 elapsed; /* elapsed time of timer expire values */ 35 __u64 elapsed; /* elapsed time of timer expire values */
36 __u64 idle; /* temp var for idle */ 36 __u64 idle_enter; /* cpu timer on idle enter */
37 int do_spt; /* =1: reprogram cpu timer in idle */ 37 __u64 idle_exit; /* cpu timer on idle exit */
38}; 38};
39 39
40extern void init_virt_timer(struct vtimer_list *timer); 40extern void init_virt_timer(struct vtimer_list *timer);
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 533f35751aeb..c4a11cfad3c8 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -40,8 +40,8 @@ struct vdso_per_cpu_data {
40extern struct vdso_data *vdso_data; 40extern struct vdso_data *vdso_data;
41 41
42#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
43int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore); 43int vdso_alloc_per_cpu(struct _lowcore *lowcore);
44void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore); 44void vdso_free_per_cpu(struct _lowcore *lowcore);
45#endif 45#endif
46 46
47#endif /* __ASSEMBLY__ */ 47#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 7d9ec924e7e7..16b0b433f1f4 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
26 sysinfo.o jump_label.o 26 sysinfo.o jump_label.o lgr.o os_info.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -34,8 +34,6 @@ extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
34obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 34obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
35obj-$(CONFIG_SMP) += smp.o 35obj-$(CONFIG_SMP) += smp.o
36obj-$(CONFIG_SCHED_BOOK) += topology.o 36obj-$(CONFIG_SCHED_BOOK) += topology.o
37obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
38 switch_cpu.o)
39obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o 37obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
40obj-$(CONFIG_AUDIT) += audit.o 38obj-$(CONFIG_AUDIT) += audit.o
41compat-obj-$(CONFIG_AUDIT) += compat_audit.o 39compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 6e6a72e66d60..ed8c913db79e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -8,9 +8,11 @@
8 8
9#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <asm/cputime.h>
12#include <asm/timer.h>
11#include <asm/vdso.h> 13#include <asm/vdso.h>
12#include <asm/sigp.h>
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/system.h>
14 16
15/* 17/*
16 * Make sure that the compiler is new enough. We want a compiler that 18 * Make sure that the compiler is new enough. We want a compiler that
@@ -70,15 +72,15 @@ int main(void)
70 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 72 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
71 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 73 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
72 BLANK(); 74 BLANK();
73 /* constants for SIGP */ 75 /* idle data offsets */
74 DEFINE(__SIGP_STOP, sigp_stop); 76 DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter));
75 DEFINE(__SIGP_RESTART, sigp_restart); 77 DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit));
76 DEFINE(__SIGP_SENSE, sigp_sense); 78 /* vtimer queue offsets */
77 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); 79 DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter));
78 BLANK(); 80 DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit));
79 /* lowcore offsets */ 81 /* lowcore offsets */
80 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); 82 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
81 DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr)); 83 DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
82 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code)); 84 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
83 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc)); 85 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
84 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); 86 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
@@ -95,20 +97,19 @@ int main(void)
95 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); 97 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
96 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); 98 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
97 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); 99 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
98 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
99 BLANK();
100 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
101 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); 100 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
102 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); 101 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
103 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); 102 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
104 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); 103 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
105 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); 104 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
106 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); 105 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
106 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
107 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); 107 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
108 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); 108 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
109 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); 109 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
110 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); 110 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
111 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); 111 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
112 BLANK();
112 DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); 113 DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
113 DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); 114 DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
114 DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); 115 DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
@@ -129,12 +130,16 @@ int main(void)
129 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 130 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
130 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 131 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
131 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 132 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
133 DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
134 DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
132 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); 135 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
133 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 136 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
134 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 137 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
135 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 138 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
136 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); 139 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
137 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); 140 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
141 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
142 BLANK();
138 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 143 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
139 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); 144 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
140 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); 145 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6fe78c2f95d9..53a82c8d50e9 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -581,7 +581,6 @@ give_sigsegv:
581int handle_signal32(unsigned long sig, struct k_sigaction *ka, 581int handle_signal32(unsigned long sig, struct k_sigaction *ka,
582 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 582 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
583{ 583{
584 sigset_t blocked;
585 int ret; 584 int ret;
586 585
587 /* Set up the stack frame */ 586 /* Set up the stack frame */
@@ -591,10 +590,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
591 ret = setup_frame32(sig, ka, oldset, regs); 590 ret = setup_frame32(sig, ka, oldset, regs);
592 if (ret) 591 if (ret)
593 return ret; 592 return ret;
594 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask); 593 block_sigmask(ka, sig);
595 if (!(ka->sa.sa_flags & SA_NODEFER))
596 sigaddset(&blocked, sig);
597 set_current_blocked(&blocked);
598 return 0; 594 return 0;
599} 595}
600 596
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index c383ce440d99..cc1172b26873 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -14,6 +14,7 @@
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/elf.h> 15#include <linux/elf.h>
16#include <asm/ipl.h> 16#include <asm/ipl.h>
17#include <asm/os_info.h>
17 18
18#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) 19#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
19#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 20#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
@@ -51,7 +52,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
51/* 52/*
52 * Copy memory from old kernel 53 * Copy memory from old kernel
53 */ 54 */
54static int copy_from_oldmem(void *dest, void *src, size_t count) 55int copy_from_oldmem(void *dest, void *src, size_t count)
55{ 56{
56 unsigned long copied = 0; 57 unsigned long copied = 0;
57 int rc; 58 int rc;
@@ -224,28 +225,44 @@ static void *nt_prpsinfo(void *ptr)
224} 225}
225 226
226/* 227/*
227 * Initialize vmcoreinfo note (new kernel) 228 * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
228 */ 229 */
229static void *nt_vmcoreinfo(void *ptr) 230static void *get_vmcoreinfo_old(unsigned long *size)
230{ 231{
231 char nt_name[11], *vmcoreinfo; 232 char nt_name[11], *vmcoreinfo;
232 Elf64_Nhdr note; 233 Elf64_Nhdr note;
233 void *addr; 234 void *addr;
234 235
235 if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) 236 if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
236 return ptr; 237 return NULL;
237 memset(nt_name, 0, sizeof(nt_name)); 238 memset(nt_name, 0, sizeof(nt_name));
238 if (copy_from_oldmem(&note, addr, sizeof(note))) 239 if (copy_from_oldmem(&note, addr, sizeof(note)))
239 return ptr; 240 return NULL;
240 if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1)) 241 if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
241 return ptr; 242 return NULL;
242 if (strcmp(nt_name, "VMCOREINFO") != 0) 243 if (strcmp(nt_name, "VMCOREINFO") != 0)
243 return ptr; 244 return NULL;
244 vmcoreinfo = kzalloc_panic(note.n_descsz + 1); 245 vmcoreinfo = kzalloc_panic(note.n_descsz);
245 if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz)) 246 if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
247 return NULL;
248 *size = note.n_descsz;
249 return vmcoreinfo;
250}
251
252/*
253 * Initialize vmcoreinfo note (new kernel)
254 */
255static void *nt_vmcoreinfo(void *ptr)
256{
257 unsigned long size;
258 void *vmcoreinfo;
259
260 vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
261 if (!vmcoreinfo)
262 vmcoreinfo = get_vmcoreinfo_old(&size);
263 if (!vmcoreinfo)
246 return ptr; 264 return ptr;
247 vmcoreinfo[note.n_descsz + 1] = 0; 265 return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
248 return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO");
249} 266}
250 267
251/* 268/*
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 6848828b962e..19e5e9eba546 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -2,8 +2,8 @@
2 * arch/s390/kernel/debug.c 2 * arch/s390/kernel/debug.c
3 * S/390 debug facility 3 * S/390 debug facility
4 * 4 *
5 * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 1999, 2012
6 * IBM Corporation 6 *
7 * Author(s): Michael Holzheu (holzheu@de.ibm.com), 7 * Author(s): Michael Holzheu (holzheu@de.ibm.com),
8 * Holger Smolinski (Holger.Smolinski@de.ibm.com) 8 * Holger Smolinski (Holger.Smolinski@de.ibm.com)
9 * 9 *
@@ -167,6 +167,7 @@ static debug_info_t *debug_area_last = NULL;
167static DEFINE_MUTEX(debug_mutex); 167static DEFINE_MUTEX(debug_mutex);
168 168
169static int initialized; 169static int initialized;
170static int debug_critical;
170 171
171static const struct file_operations debug_file_ops = { 172static const struct file_operations debug_file_ops = {
172 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
@@ -932,6 +933,11 @@ debug_stop_all(void)
932} 933}
933 934
934 935
936void debug_set_critical(void)
937{
938 debug_critical = 1;
939}
940
935/* 941/*
936 * debug_event_common: 942 * debug_event_common:
937 * - write debug entry with given size 943 * - write debug entry with given size
@@ -945,7 +951,11 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len)
945 951
946 if (!debug_active || !id->areas) 952 if (!debug_active || !id->areas)
947 return NULL; 953 return NULL;
948 spin_lock_irqsave(&id->lock, flags); 954 if (debug_critical) {
955 if (!spin_trylock_irqsave(&id->lock, flags))
956 return NULL;
957 } else
958 spin_lock_irqsave(&id->lock, flags);
949 active = get_active_entry(id); 959 active = get_active_entry(id);
950 memset(DEBUG_DATA(active), 0, id->buf_size); 960 memset(DEBUG_DATA(active), 0, id->buf_size);
951 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); 961 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
@@ -968,7 +978,11 @@ debug_entry_t
968 978
969 if (!debug_active || !id->areas) 979 if (!debug_active || !id->areas)
970 return NULL; 980 return NULL;
971 spin_lock_irqsave(&id->lock, flags); 981 if (debug_critical) {
982 if (!spin_trylock_irqsave(&id->lock, flags))
983 return NULL;
984 } else
985 spin_lock_irqsave(&id->lock, flags);
972 active = get_active_entry(id); 986 active = get_active_entry(id);
973 memset(DEBUG_DATA(active), 0, id->buf_size); 987 memset(DEBUG_DATA(active), 0, id->buf_size);
974 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); 988 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
@@ -1013,7 +1027,11 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
1013 return NULL; 1027 return NULL;
1014 numargs=debug_count_numargs(string); 1028 numargs=debug_count_numargs(string);
1015 1029
1016 spin_lock_irqsave(&id->lock, flags); 1030 if (debug_critical) {
1031 if (!spin_trylock_irqsave(&id->lock, flags))
1032 return NULL;
1033 } else
1034 spin_lock_irqsave(&id->lock, flags);
1017 active = get_active_entry(id); 1035 active = get_active_entry(id);
1018 curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active); 1036 curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
1019 va_start(ap,string); 1037 va_start(ap,string);
@@ -1047,7 +1065,11 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
1047 1065
1048 numargs=debug_count_numargs(string); 1066 numargs=debug_count_numargs(string);
1049 1067
1050 spin_lock_irqsave(&id->lock, flags); 1068 if (debug_critical) {
1069 if (!spin_trylock_irqsave(&id->lock, flags))
1070 return NULL;
1071 } else
1072 spin_lock_irqsave(&id->lock, flags);
1051 active = get_active_entry(id); 1073 active = get_active_entry(id);
1052 curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active); 1074 curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
1053 va_start(ap,string); 1075 va_start(ap,string);
@@ -1428,10 +1450,10 @@ debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
1428 rc += sprintf(out_buf + rc, "| "); 1450 rc += sprintf(out_buf + rc, "| ");
1429 for (i = 0; i < id->buf_size; i++) { 1451 for (i = 0; i < id->buf_size; i++) {
1430 unsigned char c = in_buf[i]; 1452 unsigned char c = in_buf[i];
1431 if (!isprint(c)) 1453 if (isascii(c) && isprint(c))
1432 rc += sprintf(out_buf + rc, ".");
1433 else
1434 rc += sprintf(out_buf + rc, "%c", c); 1454 rc += sprintf(out_buf + rc, "%c", c);
1455 else
1456 rc += sprintf(out_buf + rc, ".");
1435 } 1457 }
1436 rc += sprintf(out_buf + rc, "\n"); 1458 rc += sprintf(out_buf + rc, "\n");
1437 return rc; 1459 return rc;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 52098d6dfaa7..578eb4e6d157 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -29,6 +29,7 @@
29#include <asm/sysinfo.h> 29#include <asm/sysinfo.h>
30#include <asm/cpcmd.h> 30#include <asm/cpcmd.h>
31#include <asm/sclp.h> 31#include <asm/sclp.h>
32#include <asm/system.h>
32#include "entry.h" 33#include "entry.h"
33 34
34/* 35/*
@@ -262,25 +263,8 @@ static noinline __init void setup_lowcore_early(void)
262 263
263static noinline __init void setup_facility_list(void) 264static noinline __init void setup_facility_list(void)
264{ 265{
265 unsigned long nr; 266 stfle(S390_lowcore.stfle_fac_list,
266 267 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
267 S390_lowcore.stfl_fac_list = 0;
268 asm volatile(
269 " .insn s,0xb2b10000,0(0)\n" /* stfl */
270 "0:\n"
271 EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list));
272 memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
273 nr = 4; /* # bytes stored by stfl */
274 if (test_facility(7)) {
275 /* More facility bits available with stfle */
276 register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1;
277 asm volatile(".insn s,0xb2b00000,%0" /* stfle */
278 : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0)
279 : : "cc");
280 nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
281 }
282 memset((char *) S390_lowcore.stfle_fac_list + nr, 0,
283 MAX_FACILITY_BIT/8 - nr);
284} 268}
285 269
286static noinline __init void setup_hpage(void) 270static noinline __init void setup_hpage(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3705700ed374..74ee563fe62b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/entry.S 2 * arch/s390/kernel/entry.S
3 * S390 low-level entry points. 3 * S390 low-level entry points.
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright (C) IBM Corp. 1999,2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -105,14 +105,14 @@ STACK_SIZE = 1 << STACK_SHIFT
105 105
106 .macro ADD64 high,low,timer 106 .macro ADD64 high,low,timer
107 al \high,\timer 107 al \high,\timer
108 al \low,\timer+4 108 al \low,4+\timer
109 brc 12,.+8 109 brc 12,.+8
110 ahi \high,1 110 ahi \high,1
111 .endm 111 .endm
112 112
113 .macro SUB64 high,low,timer 113 .macro SUB64 high,low,timer
114 sl \high,\timer 114 sl \high,\timer
115 sl \low,\timer+4 115 sl \low,4+\timer
116 brc 3,.+8 116 brc 3,.+8
117 ahi \high,-1 117 ahi \high,-1
118 .endm 118 .endm
@@ -471,7 +471,6 @@ io_tif:
471 jnz io_work # there is work to do (signals etc.) 471 jnz io_work # there is work to do (signals etc.)
472io_restore: 472io_restore:
473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
474 ni __LC_RETURN_PSW+1,0xfd # clean wait state bit
475 stpt __LC_EXIT_TIMER 474 stpt __LC_EXIT_TIMER
476 lm %r0,%r15,__PT_R0(%r11) 475 lm %r0,%r15,__PT_R0(%r11)
477 lpsw __LC_RETURN_PSW 476 lpsw __LC_RETURN_PSW
@@ -606,12 +605,32 @@ ext_skip:
606 stm %r8,%r9,__PT_PSW(%r11) 605 stm %r8,%r9,__PT_PSW(%r11)
607 TRACE_IRQS_OFF 606 TRACE_IRQS_OFF
608 lr %r2,%r11 # pass pointer to pt_regs 607 lr %r2,%r11 # pass pointer to pt_regs
609 l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code 608 l %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code
610 l %r4,__LC_EXT_PARAMS # get external parameters 609 l %r4,__LC_EXT_PARAMS # get external parameters
611 l %r1,BASED(.Ldo_extint) 610 l %r1,BASED(.Ldo_extint)
612 basr %r14,%r1 # call do_extint 611 basr %r14,%r1 # call do_extint
613 j io_return 612 j io_return
614 613
614/*
615 * Load idle PSW. The second "half" of this function is in cleanup_idle.
616 */
617ENTRY(psw_idle)
618 st %r4,__SF_EMPTY(%r15)
619 basr %r1,0
620 la %r1,psw_idle_lpsw+4-.(%r1)
621 st %r1,__SF_EMPTY+4(%r15)
622 oi __SF_EMPTY+4(%r15),0x80
623 la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1)
624 stck __IDLE_ENTER(%r2)
625 ltr %r5,%r5
626 stpt __VQ_IDLE_ENTER(%r3)
627 jz psw_idle_lpsw
628 spt 0(%r1)
629psw_idle_lpsw:
630 lpsw __SF_EMPTY(%r15)
631 br %r14
632psw_idle_end:
633
615__critical_end: 634__critical_end:
616 635
617/* 636/*
@@ -673,7 +692,6 @@ mcck_skip:
673 TRACE_IRQS_ON 692 TRACE_IRQS_ON
674mcck_return: 693mcck_return:
675 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 694 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
676 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
677 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 695 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
678 jno 0f 696 jno 0f
679 lm %r0,%r15,__PT_R0(%r11) 697 lm %r0,%r15,__PT_R0(%r11)
@@ -691,77 +709,30 @@ mcck_panic:
6910: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 7090: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
692 j mcck_skip 710 j mcck_skip
693 711
694/*
695 * Restart interruption handler, kick starter for additional CPUs
696 */
697#ifdef CONFIG_SMP
698 __CPUINIT
699ENTRY(restart_int_handler)
700 basr %r1,0
701restart_base:
702 spt restart_vtime-restart_base(%r1)
703 stck __LC_LAST_UPDATE_CLOCK
704 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
705 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
706 l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
707 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
708 lam %a0,%a15,__LC_AREGS_SAVE_AREA
709 lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone
710 l %r1,__LC_THREAD_INFO
711 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
712 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
713 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
714 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
715 basr %r14,0
716 l %r14,restart_addr-.(%r14)
717 basr %r14,%r14 # call start_secondary
718restart_addr:
719 .long start_secondary
720 .align 8
721restart_vtime:
722 .long 0x7fffffff,0xffffffff
723 .previous
724#else
725/*
726 * If we do not run with SMP enabled, let the new CPU crash ...
727 */
728ENTRY(restart_int_handler)
729 basr %r1,0
730restart_base:
731 lpsw restart_crash-restart_base(%r1)
732 .align 8
733restart_crash:
734 .long 0x000a0000,0x00000000
735restart_go:
736#endif
737
738# 712#
739# PSW restart interrupt handler 713# PSW restart interrupt handler
740# 714#
741ENTRY(psw_restart_int_handler) 715ENTRY(restart_int_handler)
742 st %r15,__LC_SAVE_AREA_RESTART 716 st %r15,__LC_SAVE_AREA_RESTART
743 basr %r15,0 717 l %r15,__LC_RESTART_STACK
7440: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
745 l %r15,0(%r15)
746 ahi %r15,-__PT_SIZE # create pt_regs on stack 718 ahi %r15,-__PT_SIZE # create pt_regs on stack
719 xc 0(__PT_SIZE,%r15),0(%r15)
747 stm %r0,%r14,__PT_R0(%r15) 720 stm %r0,%r14,__PT_R0(%r15)
748 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART 721 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
749 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw 722 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
750 ahi %r15,-STACK_FRAME_OVERHEAD 723 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
751 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 724 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
752 basr %r14,0 725 lm %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu
7531: l %r14,.Ldo_restart-1b(%r14) 726 ltr %r3,%r3 # test source cpu address
754 basr %r14,%r14 727 jm 1f # negative -> skip source stop
755 basr %r14,0 # load disabled wait PSW if 7280: sigp %r4,%r3,1 # sigp sense to source cpu
7562: lpsw restart_psw_crash-2b(%r14) # do_restart returns 729 brc 10,0b # wait for status stored
757 .align 4 7301: basr %r14,%r1 # call function
758.Ldo_restart: 731 stap __SF_EMPTY(%r15) # store cpu address
759 .long do_restart 732 lh %r3,__SF_EMPTY(%r15)
760.Lrestart_stack: 7332: sigp %r4,%r3,5 # sigp stop to current cpu
761 .long restart_stack 734 brc 2,2b
762 .align 8 7353: j 3b
763restart_psw_crash:
764 .long 0x000a0000,0x00000000 + restart_psw_crash
765 736
766 .section .kprobes.text, "ax" 737 .section .kprobes.text, "ax"
767 738
@@ -795,6 +766,8 @@ cleanup_table:
795 .long io_tif + 0x80000000 766 .long io_tif + 0x80000000
796 .long io_restore + 0x80000000 767 .long io_restore + 0x80000000
797 .long io_done + 0x80000000 768 .long io_done + 0x80000000
769 .long psw_idle + 0x80000000
770 .long psw_idle_end + 0x80000000
798 771
799cleanup_critical: 772cleanup_critical:
800 cl %r9,BASED(cleanup_table) # system_call 773 cl %r9,BASED(cleanup_table) # system_call
@@ -813,6 +786,10 @@ cleanup_critical:
813 jl cleanup_io_tif 786 jl cleanup_io_tif
814 cl %r9,BASED(cleanup_table+28) # io_done 787 cl %r9,BASED(cleanup_table+28) # io_done
815 jl cleanup_io_restore 788 jl cleanup_io_restore
789 cl %r9,BASED(cleanup_table+32) # psw_idle
790 jl 0f
791 cl %r9,BASED(cleanup_table+36) # psw_idle_end
792 jl cleanup_idle
8160: br %r14 7930: br %r14
817 794
818cleanup_system_call: 795cleanup_system_call:
@@ -896,7 +873,6 @@ cleanup_io_restore:
896 jhe 0f 873 jhe 0f
897 l %r9,12(%r11) # get saved r11 pointer to pt_regs 874 l %r9,12(%r11) # get saved r11 pointer to pt_regs
898 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 875 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
899 ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
900 mvc 0(32,%r11),__PT_R8(%r9) 876 mvc 0(32,%r11),__PT_R8(%r9)
901 lm %r0,%r7,__PT_R0(%r9) 877 lm %r0,%r7,__PT_R0(%r9)
9020: lm %r8,%r9,__LC_RETURN_PSW 8780: lm %r8,%r9,__LC_RETURN_PSW
@@ -904,11 +880,52 @@ cleanup_io_restore:
904cleanup_io_restore_insn: 880cleanup_io_restore_insn:
905 .long io_done - 4 + 0x80000000 881 .long io_done - 4 + 0x80000000
906 882
883cleanup_idle:
884 # copy interrupt clock & cpu timer
885 mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK
886 mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
887 chi %r11,__LC_SAVE_AREA_ASYNC
888 je 0f
889 mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
890 mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
8910: # check if stck has been executed
892 cl %r9,BASED(cleanup_idle_insn)
893 jhe 1f
894 mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
895 mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
896 j 2f
8971: # check if the cpu timer has been reprogrammed
898 ltr %r5,%r5
899 jz 2f
900 spt __VQ_IDLE_ENTER(%r3)
9012: # account system time going idle
902 lm %r9,%r10,__LC_STEAL_TIMER
903 ADD64 %r9,%r10,__IDLE_ENTER(%r2)
904 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
905 stm %r9,%r10,__LC_STEAL_TIMER
906 mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
907 lm %r9,%r10,__LC_SYSTEM_TIMER
908 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
909 SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3)
910 stm %r9,%r10,__LC_SYSTEM_TIMER
911 mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
912 # prepare return psw
913 n %r8,BASED(cleanup_idle_wait) # clear wait state bit
914 l %r9,24(%r11) # return from psw_idle
915 br %r14
916cleanup_idle_insn:
917 .long psw_idle_lpsw + 0x80000000
918cleanup_idle_wait:
919 .long 0xfffdffff
920
907/* 921/*
908 * Integer constants 922 * Integer constants
909 */ 923 */
910 .align 4 924 .align 4
911.Lnr_syscalls: .long NR_syscalls 925.Lnr_syscalls:
926 .long NR_syscalls
927.Lvtimer_max:
928 .quad 0x7fffffffffffffff
912 929
913/* 930/*
914 * Symbol constants 931 * Symbol constants
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index bf538aaf407d..6cdddac93a2e 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -4,11 +4,22 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7 7#include <asm/cputime.h>
8#include <asm/timer.h>
8 9
9extern void (*pgm_check_table[128])(struct pt_regs *); 10extern void (*pgm_check_table[128])(struct pt_regs *);
10extern void *restart_stack; 11extern void *restart_stack;
11 12
13void system_call(void);
14void pgm_check_handler(void);
15void ext_int_handler(void);
16void io_int_handler(void);
17void mcck_int_handler(void);
18void restart_int_handler(void);
19void restart_call_handler(void);
20void psw_idle(struct s390_idle_data *, struct vtimer_queue *,
21 unsigned long, int);
22
12asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 23asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
13asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); 24asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
14 25
@@ -24,9 +35,9 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
24 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); 35 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
25void do_notify_resume(struct pt_regs *regs); 36void do_notify_resume(struct pt_regs *regs);
26 37
27void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); 38struct ext_code;
39void do_extint(struct pt_regs *regs, struct ext_code, unsigned int, unsigned long);
28void do_restart(void); 40void do_restart(void);
29int __cpuinit start_secondary(void *cpuvoid);
30void __init startup_init(void); 41void __init startup_init(void);
31void die(struct pt_regs *regs, const char *str); 42void die(struct pt_regs *regs, const char *str);
32 43
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 412a7b8783d7..4e1c292fa7e3 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/entry64.S 2 * arch/s390/kernel/entry64.S
3 * S390 low-level entry points. 3 * S390 low-level entry points.
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2010 5 * Copyright (C) IBM Corp. 1999,2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -489,7 +489,6 @@ io_restore:
489 lg %r14,__LC_VDSO_PER_CPU 489 lg %r14,__LC_VDSO_PER_CPU
490 lmg %r0,%r10,__PT_R0(%r11) 490 lmg %r0,%r10,__PT_R0(%r11)
491 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 491 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
492 ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
493 stpt __LC_EXIT_TIMER 492 stpt __LC_EXIT_TIMER
494 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 493 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
495 lmg %r11,%r15,__PT_R11(%r11) 494 lmg %r11,%r15,__PT_R11(%r11)
@@ -625,12 +624,30 @@ ext_skip:
625 TRACE_IRQS_OFF 624 TRACE_IRQS_OFF
626 lghi %r1,4096 625 lghi %r1,4096
627 lgr %r2,%r11 # pass pointer to pt_regs 626 lgr %r2,%r11 # pass pointer to pt_regs
628 llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code 627 llgf %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code
629 llgf %r4,__LC_EXT_PARAMS # get external parameter 628 llgf %r4,__LC_EXT_PARAMS # get external parameter
630 lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter 629 lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter
631 brasl %r14,do_extint 630 brasl %r14,do_extint
632 j io_return 631 j io_return
633 632
633/*
634 * Load idle PSW. The second "half" of this function is in cleanup_idle.
635 */
636ENTRY(psw_idle)
637 stg %r4,__SF_EMPTY(%r15)
638 larl %r1,psw_idle_lpsw+4
639 stg %r1,__SF_EMPTY+8(%r15)
640 larl %r1,.Lvtimer_max
641 stck __IDLE_ENTER(%r2)
642 ltr %r5,%r5
643 stpt __VQ_IDLE_ENTER(%r3)
644 jz psw_idle_lpsw
645 spt 0(%r1)
646psw_idle_lpsw:
647 lpswe __SF_EMPTY(%r15)
648 br %r14
649psw_idle_end:
650
634__critical_end: 651__critical_end:
635 652
636/* 653/*
@@ -696,7 +713,6 @@ mcck_return:
696 lg %r14,__LC_VDSO_PER_CPU 713 lg %r14,__LC_VDSO_PER_CPU
697 lmg %r0,%r10,__PT_R0(%r11) 714 lmg %r0,%r10,__PT_R0(%r11)
698 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 715 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
699 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
700 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 716 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
701 jno 0f 717 jno 0f
702 stpt __LC_EXIT_TIMER 718 stpt __LC_EXIT_TIMER
@@ -713,68 +729,30 @@ mcck_panic:
7130: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 7290: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
714 j mcck_skip 730 j mcck_skip
715 731
716/*
717 * Restart interruption handler, kick starter for additional CPUs
718 */
719#ifdef CONFIG_SMP
720 __CPUINIT
721ENTRY(restart_int_handler)
722 basr %r1,0
723restart_base:
724 spt restart_vtime-restart_base(%r1)
725 stck __LC_LAST_UPDATE_CLOCK
726 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
727 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
728 lghi %r10,__LC_GPREGS_SAVE_AREA
729 lg %r15,120(%r10) # load ksp
730 lghi %r10,__LC_CREGS_SAVE_AREA
731 lctlg %c0,%c15,0(%r10) # get new ctl regs
732 lghi %r10,__LC_AREGS_SAVE_AREA
733 lam %a0,%a15,0(%r10)
734 lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone
735 lg %r1,__LC_THREAD_INFO
736 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
737 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
738 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
739 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
740 brasl %r14,start_secondary
741 .align 8
742restart_vtime:
743 .long 0x7fffffff,0xffffffff
744 .previous
745#else
746/*
747 * If we do not run with SMP enabled, let the new CPU crash ...
748 */
749ENTRY(restart_int_handler)
750 basr %r1,0
751restart_base:
752 lpswe restart_crash-restart_base(%r1)
753 .align 8
754restart_crash:
755 .long 0x000a0000,0x00000000,0x00000000,0x00000000
756restart_go:
757#endif
758
759# 732#
760# PSW restart interrupt handler 733# PSW restart interrupt handler
761# 734#
762ENTRY(psw_restart_int_handler) 735ENTRY(restart_int_handler)
763 stg %r15,__LC_SAVE_AREA_RESTART 736 stg %r15,__LC_SAVE_AREA_RESTART
764 larl %r15,restart_stack # load restart stack 737 lg %r15,__LC_RESTART_STACK
765 lg %r15,0(%r15)
766 aghi %r15,-__PT_SIZE # create pt_regs on stack 738 aghi %r15,-__PT_SIZE # create pt_regs on stack
739 xc 0(__PT_SIZE,%r15),0(%r15)
767 stmg %r0,%r14,__PT_R0(%r15) 740 stmg %r0,%r14,__PT_R0(%r15)
768 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 741 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
769 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw 742 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
770 aghi %r15,-STACK_FRAME_OVERHEAD 743 aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
771 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 744 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
772 brasl %r14,do_restart 745 lmg %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu
773 larl %r14,restart_psw_crash # load disabled wait PSW if 746 ltgr %r3,%r3 # test source cpu address
774 lpswe 0(%r14) # do_restart returns 747 jm 1f # negative -> skip source stop
775 .align 8 7480: sigp %r4,%r3,1 # sigp sense to source cpu
776restart_psw_crash: 749 brc 10,0b # wait for status stored
777 .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash 7501: basr %r14,%r1 # call function
751 stap __SF_EMPTY(%r15) # store cpu address
752 llgh %r3,__SF_EMPTY(%r15)
7532: sigp %r4,%r3,5 # sigp stop to current cpu
754 brc 2,2b
7553: j 3b
778 756
779 .section .kprobes.text, "ax" 757 .section .kprobes.text, "ax"
780 758
@@ -808,6 +786,8 @@ cleanup_table:
808 .quad io_tif 786 .quad io_tif
809 .quad io_restore 787 .quad io_restore
810 .quad io_done 788 .quad io_done
789 .quad psw_idle
790 .quad psw_idle_end
811 791
812cleanup_critical: 792cleanup_critical:
813 clg %r9,BASED(cleanup_table) # system_call 793 clg %r9,BASED(cleanup_table) # system_call
@@ -826,6 +806,10 @@ cleanup_critical:
826 jl cleanup_io_tif 806 jl cleanup_io_tif
827 clg %r9,BASED(cleanup_table+56) # io_done 807 clg %r9,BASED(cleanup_table+56) # io_done
828 jl cleanup_io_restore 808 jl cleanup_io_restore
809 clg %r9,BASED(cleanup_table+64) # psw_idle
810 jl 0f
811 clg %r9,BASED(cleanup_table+72) # psw_idle_end
812 jl cleanup_idle
8290: br %r14 8130: br %r14
830 814
831 815
@@ -915,7 +899,6 @@ cleanup_io_restore:
915 je 0f 899 je 0f
916 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 900 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
917 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 901 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
918 ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
919 mvc 0(64,%r11),__PT_R8(%r9) 902 mvc 0(64,%r11),__PT_R8(%r9)
920 lmg %r0,%r7,__PT_R0(%r9) 903 lmg %r0,%r7,__PT_R0(%r9)
9210: lmg %r8,%r9,__LC_RETURN_PSW 9040: lmg %r8,%r9,__LC_RETURN_PSW
@@ -923,6 +906,42 @@ cleanup_io_restore:
923cleanup_io_restore_insn: 906cleanup_io_restore_insn:
924 .quad io_done - 4 907 .quad io_done - 4
925 908
909cleanup_idle:
910 # copy interrupt clock & cpu timer
911 mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK
912 mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
913 cghi %r11,__LC_SAVE_AREA_ASYNC
914 je 0f
915 mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
916 mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
9170: # check if stck & stpt have been executed
918 clg %r9,BASED(cleanup_idle_insn)
919 jhe 1f
920 mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
921 mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
922 j 2f
9231: # check if the cpu timer has been reprogrammed
924 ltr %r5,%r5
925 jz 2f
926 spt __VQ_IDLE_ENTER(%r3)
9272: # account system time going idle
928 lg %r9,__LC_STEAL_TIMER
929 alg %r9,__IDLE_ENTER(%r2)
930 slg %r9,__LC_LAST_UPDATE_CLOCK
931 stg %r9,__LC_STEAL_TIMER
932 mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
933 lg %r9,__LC_SYSTEM_TIMER
934 alg %r9,__LC_LAST_UPDATE_TIMER
935 slg %r9,__VQ_IDLE_ENTER(%r3)
936 stg %r9,__LC_SYSTEM_TIMER
937 mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
938 # prepare return psw
939 nihh %r8,0xfffd # clear wait state bit
940 lg %r9,48(%r11) # return from psw_idle
941 br %r14
942cleanup_idle_insn:
943 .quad psw_idle_lpsw
944
926/* 945/*
927 * Integer constants 946 * Integer constants
928 */ 947 */
@@ -931,6 +950,8 @@ cleanup_io_restore_insn:
931 .quad __critical_start 950 .quad __critical_start
932.Lcritical_length: 951.Lcritical_length:
933 .quad __critical_end - __critical_start 952 .quad __critical_end - __critical_start
953.Lvtimer_max:
954 .quad 0x7fffffffffffffff
934 955
935 956
936#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 957#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index affa8e68124a..8342e65a140d 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/ipl.c 2 * arch/s390/kernel/ipl.c
3 * ipl/reipl/dump support for Linux on s390. 3 * ipl/reipl/dump support for Linux on s390.
4 * 4 *
5 * Copyright IBM Corp. 2005,2007 5 * Copyright IBM Corp. 2005,2012
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Volker Sameske <sameske@de.ibm.com> 8 * Volker Sameske <sameske@de.ibm.com>
@@ -17,6 +17,7 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/gfp.h> 18#include <linux/gfp.h>
19#include <linux/crash_dump.h> 19#include <linux/crash_dump.h>
20#include <linux/debug_locks.h>
20#include <asm/ipl.h> 21#include <asm/ipl.h>
21#include <asm/smp.h> 22#include <asm/smp.h>
22#include <asm/setup.h> 23#include <asm/setup.h>
@@ -25,8 +26,9 @@
25#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
26#include <asm/reset.h> 27#include <asm/reset.h>
27#include <asm/sclp.h> 28#include <asm/sclp.h>
28#include <asm/sigp.h>
29#include <asm/checksum.h> 29#include <asm/checksum.h>
30#include <asm/debug.h>
31#include <asm/os_info.h>
30#include "entry.h" 32#include "entry.h"
31 33
32#define IPL_PARM_BLOCK_VERSION 0 34#define IPL_PARM_BLOCK_VERSION 0
@@ -571,7 +573,7 @@ static void __ipl_run(void *unused)
571 573
572static void ipl_run(struct shutdown_trigger *trigger) 574static void ipl_run(struct shutdown_trigger *trigger)
573{ 575{
574 smp_switch_to_ipl_cpu(__ipl_run, NULL); 576 smp_call_ipl_cpu(__ipl_run, NULL);
575} 577}
576 578
577static int __init ipl_init(void) 579static int __init ipl_init(void)
@@ -950,6 +952,13 @@ static struct attribute_group reipl_nss_attr_group = {
950 .attrs = reipl_nss_attrs, 952 .attrs = reipl_nss_attrs,
951}; 953};
952 954
955static void set_reipl_block_actual(struct ipl_parameter_block *reipl_block)
956{
957 reipl_block_actual = reipl_block;
958 os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual,
959 reipl_block->hdr.len);
960}
961
953/* reipl type */ 962/* reipl type */
954 963
955static int reipl_set_type(enum ipl_type type) 964static int reipl_set_type(enum ipl_type type)
@@ -965,7 +974,7 @@ static int reipl_set_type(enum ipl_type type)
965 reipl_method = REIPL_METHOD_CCW_VM; 974 reipl_method = REIPL_METHOD_CCW_VM;
966 else 975 else
967 reipl_method = REIPL_METHOD_CCW_CIO; 976 reipl_method = REIPL_METHOD_CCW_CIO;
968 reipl_block_actual = reipl_block_ccw; 977 set_reipl_block_actual(reipl_block_ccw);
969 break; 978 break;
970 case IPL_TYPE_FCP: 979 case IPL_TYPE_FCP:
971 if (diag308_set_works) 980 if (diag308_set_works)
@@ -974,7 +983,7 @@ static int reipl_set_type(enum ipl_type type)
974 reipl_method = REIPL_METHOD_FCP_RO_VM; 983 reipl_method = REIPL_METHOD_FCP_RO_VM;
975 else 984 else
976 reipl_method = REIPL_METHOD_FCP_RO_DIAG; 985 reipl_method = REIPL_METHOD_FCP_RO_DIAG;
977 reipl_block_actual = reipl_block_fcp; 986 set_reipl_block_actual(reipl_block_fcp);
978 break; 987 break;
979 case IPL_TYPE_FCP_DUMP: 988 case IPL_TYPE_FCP_DUMP:
980 reipl_method = REIPL_METHOD_FCP_DUMP; 989 reipl_method = REIPL_METHOD_FCP_DUMP;
@@ -984,7 +993,7 @@ static int reipl_set_type(enum ipl_type type)
984 reipl_method = REIPL_METHOD_NSS_DIAG; 993 reipl_method = REIPL_METHOD_NSS_DIAG;
985 else 994 else
986 reipl_method = REIPL_METHOD_NSS; 995 reipl_method = REIPL_METHOD_NSS;
987 reipl_block_actual = reipl_block_nss; 996 set_reipl_block_actual(reipl_block_nss);
988 break; 997 break;
989 case IPL_TYPE_UNKNOWN: 998 case IPL_TYPE_UNKNOWN:
990 reipl_method = REIPL_METHOD_DEFAULT; 999 reipl_method = REIPL_METHOD_DEFAULT;
@@ -1101,7 +1110,7 @@ static void __reipl_run(void *unused)
1101 1110
1102static void reipl_run(struct shutdown_trigger *trigger) 1111static void reipl_run(struct shutdown_trigger *trigger)
1103{ 1112{
1104 smp_switch_to_ipl_cpu(__reipl_run, NULL); 1113 smp_call_ipl_cpu(__reipl_run, NULL);
1105} 1114}
1106 1115
1107static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) 1116static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
@@ -1256,6 +1265,29 @@ static int __init reipl_fcp_init(void)
1256 return 0; 1265 return 0;
1257} 1266}
1258 1267
1268static int __init reipl_type_init(void)
1269{
1270 enum ipl_type reipl_type = ipl_info.type;
1271 struct ipl_parameter_block *reipl_block;
1272 unsigned long size;
1273
1274 reipl_block = os_info_old_entry(OS_INFO_REIPL_BLOCK, &size);
1275 if (!reipl_block)
1276 goto out;
1277 /*
1278 * If we have an OS info reipl block, this will be used
1279 */
1280 if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) {
1281 memcpy(reipl_block_fcp, reipl_block, size);
1282 reipl_type = IPL_TYPE_FCP;
1283 } else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) {
1284 memcpy(reipl_block_ccw, reipl_block, size);
1285 reipl_type = IPL_TYPE_CCW;
1286 }
1287out:
1288 return reipl_set_type(reipl_type);
1289}
1290
1259static int __init reipl_init(void) 1291static int __init reipl_init(void)
1260{ 1292{
1261 int rc; 1293 int rc;
@@ -1277,10 +1309,7 @@ static int __init reipl_init(void)
1277 rc = reipl_nss_init(); 1309 rc = reipl_nss_init();
1278 if (rc) 1310 if (rc)
1279 return rc; 1311 return rc;
1280 rc = reipl_set_type(ipl_info.type); 1312 return reipl_type_init();
1281 if (rc)
1282 return rc;
1283 return 0;
1284} 1313}
1285 1314
1286static struct shutdown_action __refdata reipl_action = { 1315static struct shutdown_action __refdata reipl_action = {
@@ -1421,7 +1450,7 @@ static void dump_run(struct shutdown_trigger *trigger)
1421 if (dump_method == DUMP_METHOD_NONE) 1450 if (dump_method == DUMP_METHOD_NONE)
1422 return; 1451 return;
1423 smp_send_stop(); 1452 smp_send_stop();
1424 smp_switch_to_ipl_cpu(__dump_run, NULL); 1453 smp_call_ipl_cpu(__dump_run, NULL);
1425} 1454}
1426 1455
1427static int __init dump_ccw_init(void) 1456static int __init dump_ccw_init(void)
@@ -1499,30 +1528,12 @@ static struct shutdown_action __refdata dump_action = {
1499 1528
1500static void dump_reipl_run(struct shutdown_trigger *trigger) 1529static void dump_reipl_run(struct shutdown_trigger *trigger)
1501{ 1530{
1502 preempt_disable(); 1531 u32 csum;
1503 /* 1532
1504 * Bypass dynamic address translation (DAT) when storing IPL parameter 1533 csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
1505 * information block address and checksum into the prefix area 1534 copy_to_absolute_zero(&S390_lowcore.ipib_checksum, &csum, sizeof(csum));
1506 * (corresponding to absolute addresses 0-8191). 1535 copy_to_absolute_zero(&S390_lowcore.ipib, &reipl_block_actual,
1507 * When enhanced DAT applies and the STE format control in one, 1536 sizeof(reipl_block_actual));
1508 * the absolute address is formed without prefixing. In this case a
1509 * normal store (stg/st) into the prefix area would no more match to
1510 * absolute addresses 0-8191.
1511 */
1512#ifdef CONFIG_64BIT
1513 asm volatile("sturg %0,%1"
1514 :: "a" ((unsigned long) reipl_block_actual),
1515 "a" (&lowcore_ptr[smp_processor_id()]->ipib));
1516#else
1517 asm volatile("stura %0,%1"
1518 :: "a" ((unsigned long) reipl_block_actual),
1519 "a" (&lowcore_ptr[smp_processor_id()]->ipib));
1520#endif
1521 asm volatile("stura %0,%1"
1522 :: "a" (csum_partial(reipl_block_actual,
1523 reipl_block_actual->hdr.len, 0)),
1524 "a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum));
1525 preempt_enable();
1526 dump_run(trigger); 1537 dump_run(trigger);
1527} 1538}
1528 1539
@@ -1623,9 +1634,7 @@ static void stop_run(struct shutdown_trigger *trigger)
1623 if (strcmp(trigger->name, ON_PANIC_STR) == 0 || 1634 if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
1624 strcmp(trigger->name, ON_RESTART_STR) == 0) 1635 strcmp(trigger->name, ON_RESTART_STR) == 0)
1625 disabled_wait((unsigned long) __builtin_return_address(0)); 1636 disabled_wait((unsigned long) __builtin_return_address(0));
1626 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 1637 smp_stop_cpu();
1627 cpu_relax();
1628 for (;;);
1629} 1638}
1630 1639
1631static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, 1640static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
@@ -1713,6 +1722,7 @@ static struct kobj_attribute on_panic_attr =
1713 1722
1714static void do_panic(void) 1723static void do_panic(void)
1715{ 1724{
1725 lgr_info_log();
1716 on_panic_trigger.action->fn(&on_panic_trigger); 1726 on_panic_trigger.action->fn(&on_panic_trigger);
1717 stop_run(&on_panic_trigger); 1727 stop_run(&on_panic_trigger);
1718} 1728}
@@ -1738,9 +1748,8 @@ static ssize_t on_restart_store(struct kobject *kobj,
1738static struct kobj_attribute on_restart_attr = 1748static struct kobj_attribute on_restart_attr =
1739 __ATTR(on_restart, 0644, on_restart_show, on_restart_store); 1749 __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
1740 1750
1741void do_restart(void) 1751static void __do_restart(void *ignore)
1742{ 1752{
1743 smp_restart_with_online_cpu();
1744 smp_send_stop(); 1753 smp_send_stop();
1745#ifdef CONFIG_CRASH_DUMP 1754#ifdef CONFIG_CRASH_DUMP
1746 crash_kexec(NULL); 1755 crash_kexec(NULL);
@@ -1749,6 +1758,14 @@ void do_restart(void)
1749 stop_run(&on_restart_trigger); 1758 stop_run(&on_restart_trigger);
1750} 1759}
1751 1760
1761void do_restart(void)
1762{
1763 tracing_off();
1764 debug_locks_off();
1765 lgr_info_log();
1766 smp_call_online_cpu(__do_restart, NULL);
1767}
1768
1752/* on halt */ 1769/* on halt */
1753 1770
1754static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; 1771static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e30b2dfa8ba0..2429ecd68872 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -202,31 +202,27 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
202} 202}
203EXPORT_SYMBOL(unregister_external_interrupt); 203EXPORT_SYMBOL(unregister_external_interrupt);
204 204
205void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, 205void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
206 unsigned int param32, unsigned long param64) 206 unsigned int param32, unsigned long param64)
207{ 207{
208 struct pt_regs *old_regs; 208 struct pt_regs *old_regs;
209 unsigned short code;
210 struct ext_int_info *p; 209 struct ext_int_info *p;
211 int index; 210 int index;
212 211
213 code = (unsigned short) ext_int_code;
214 old_regs = set_irq_regs(regs); 212 old_regs = set_irq_regs(regs);
215 s390_idle_check(regs, S390_lowcore.int_clock,
216 S390_lowcore.async_enter_timer);
217 irq_enter(); 213 irq_enter();
218 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 214 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
219 /* Serve timer interrupts first. */ 215 /* Serve timer interrupts first. */
220 clock_comparator_work(); 216 clock_comparator_work();
221 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 217 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
222 if (code != 0x1004) 218 if (ext_code.code != 0x1004)
223 __get_cpu_var(s390_idle).nohz_delay = 1; 219 __get_cpu_var(s390_idle).nohz_delay = 1;
224 220
225 index = ext_hash(code); 221 index = ext_hash(ext_code.code);
226 rcu_read_lock(); 222 rcu_read_lock();
227 list_for_each_entry_rcu(p, &ext_int_hash[index], entry) 223 list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
228 if (likely(p->code == code)) 224 if (likely(p->code == ext_code.code))
229 p->handler(ext_int_code, param32, param64); 225 p->handler(ext_code, param32, param64);
230 rcu_read_unlock(); 226 rcu_read_unlock();
231 irq_exit(); 227 irq_exit();
232 set_irq_regs(old_regs); 228 set_irq_regs(old_regs);
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
new file mode 100644
index 000000000000..8431b92ca3ae
--- /dev/null
+++ b/arch/s390/kernel/lgr.c
@@ -0,0 +1,200 @@
1/*
2 * Linux Guest Relocation (LGR) detection
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#include <linux/module.h>
9#include <linux/timer.h>
10#include <linux/slab.h>
11#include <asm/sysinfo.h>
12#include <asm/ebcdic.h>
13#include <asm/system.h>
14#include <asm/debug.h>
15#include <asm/ipl.h>
16
17#define LGR_TIMER_INTERVAL_SECS (30 * 60)
18#define VM_LEVEL_MAX 2 /* Maximum is 8, but we only record two levels */
19
20/*
21 * LGR info: Contains stfle and stsi data
22 */
23struct lgr_info {
24 /* Bit field with facility information: 4 DWORDs are stored */
25 u64 stfle_fac_list[4];
26 /* Level of system (1 = CEC, 2 = LPAR, 3 = z/VM */
27 u32 level;
28 /* Level 1: CEC info (stsi 1.1.1) */
29 char manufacturer[16];
30 char type[4];
31 char sequence[16];
32 char plant[4];
33 char model[16];
34 /* Level 2: LPAR info (stsi 2.2.2) */
35 u16 lpar_number;
36 char name[8];
37 /* Level 3: VM info (stsi 3.2.2) */
38 u8 vm_count;
39 struct {
40 char name[8];
41 char cpi[16];
42 } vm[VM_LEVEL_MAX];
43} __packed __aligned(8);
44
45/*
46 * LGR globals
47 */
48static void *lgr_page;
49static struct lgr_info lgr_info_last;
50static struct lgr_info lgr_info_cur;
51static struct debug_info *lgr_dbf;
52
53/*
54 * Return number of valid stsi levels
55 */
56static inline int stsi_0(void)
57{
58 int rc = stsi(NULL, 0, 0, 0);
59
60 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
61}
62
63/*
64 * Copy buffer and then convert it to ASCII
65 */
66static void cpascii(char *dst, char *src, int size)
67{
68 memcpy(dst, src, size);
69 EBCASC(dst, size);
70}
71
72/*
73 * Fill LGR info with 1.1.1 stsi data
74 */
75static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
76{
77 struct sysinfo_1_1_1 *si = lgr_page;
78
79 if (stsi(si, 1, 1, 1) == -ENOSYS)
80 return;
81 cpascii(lgr_info->manufacturer, si->manufacturer,
82 sizeof(si->manufacturer));
83 cpascii(lgr_info->type, si->type, sizeof(si->type));
84 cpascii(lgr_info->model, si->model, sizeof(si->model));
85 cpascii(lgr_info->sequence, si->sequence, sizeof(si->sequence));
86 cpascii(lgr_info->plant, si->plant, sizeof(si->plant));
87}
88
89/*
90 * Fill LGR info with 2.2.2 stsi data
91 */
92static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
93{
94 struct sysinfo_2_2_2 *si = lgr_page;
95
96 if (stsi(si, 2, 2, 2) == -ENOSYS)
97 return;
98 cpascii(lgr_info->name, si->name, sizeof(si->name));
99 memcpy(&lgr_info->lpar_number, &si->lpar_number,
100 sizeof(lgr_info->lpar_number));
101}
102
103/*
104 * Fill LGR info with 3.2.2 stsi data
105 */
106static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
107{
108 struct sysinfo_3_2_2 *si = lgr_page;
109 int i;
110
111 if (stsi(si, 3, 2, 2) == -ENOSYS)
112 return;
113 for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) {
114 cpascii(lgr_info->vm[i].name, si->vm[i].name,
115 sizeof(si->vm[i].name));
116 cpascii(lgr_info->vm[i].cpi, si->vm[i].cpi,
117 sizeof(si->vm[i].cpi));
118 }
119 lgr_info->vm_count = si->count;
120}
121
122/*
123 * Fill LGR info with current data
124 */
125static void lgr_info_get(struct lgr_info *lgr_info)
126{
127 memset(lgr_info, 0, sizeof(*lgr_info));
128 stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list));
129 lgr_info->level = stsi_0();
130 if (lgr_info->level == -ENOSYS)
131 return;
132 if (lgr_info->level >= 1)
133 lgr_stsi_1_1_1(lgr_info);
134 if (lgr_info->level >= 2)
135 lgr_stsi_2_2_2(lgr_info);
136 if (lgr_info->level >= 3)
137 lgr_stsi_3_2_2(lgr_info);
138}
139
140/*
141 * Check if LGR info has changed and if yes log new LGR info to s390dbf
142 */
143void lgr_info_log(void)
144{
145 static DEFINE_SPINLOCK(lgr_info_lock);
146 unsigned long flags;
147
148 if (!spin_trylock_irqsave(&lgr_info_lock, flags))
149 return;
150 lgr_info_get(&lgr_info_cur);
151 if (memcmp(&lgr_info_last, &lgr_info_cur, sizeof(lgr_info_cur)) != 0) {
152 debug_event(lgr_dbf, 1, &lgr_info_cur, sizeof(lgr_info_cur));
153 lgr_info_last = lgr_info_cur;
154 }
155 spin_unlock_irqrestore(&lgr_info_lock, flags);
156}
157EXPORT_SYMBOL_GPL(lgr_info_log);
158
159static void lgr_timer_set(void);
160
161/*
162 * LGR timer callback
163 */
164static void lgr_timer_fn(unsigned long ignored)
165{
166 lgr_info_log();
167 lgr_timer_set();
168}
169
170static struct timer_list lgr_timer =
171 TIMER_DEFERRED_INITIALIZER(lgr_timer_fn, 0, 0);
172
173/*
174 * Setup next LGR timer
175 */
176static void lgr_timer_set(void)
177{
178 mod_timer(&lgr_timer, jiffies + LGR_TIMER_INTERVAL_SECS * HZ);
179}
180
181/*
182 * Initialize LGR: Add s390dbf, write initial lgr_info and setup timer
183 */
184static int __init lgr_init(void)
185{
186 lgr_page = (void *) __get_free_pages(GFP_KERNEL, 0);
187 if (!lgr_page)
188 return -ENOMEM;
189 lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info));
190 if (!lgr_dbf) {
191 free_page((unsigned long) lgr_page);
192 return -ENOMEM;
193 }
194 debug_register_view(lgr_dbf, &debug_hex_ascii_view);
195 lgr_info_get(&lgr_info_last);
196 debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last));
197 lgr_timer_set();
198 return 0;
199}
200module_init(lgr_init);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 47b168fb29c4..0f8cdf1268d0 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -14,6 +14,7 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/ftrace.h> 16#include <linux/ftrace.h>
17#include <linux/debug_locks.h>
17#include <asm/cio.h> 18#include <asm/cio.h>
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
@@ -49,50 +50,21 @@ static void add_elf_notes(int cpu)
49} 50}
50 51
51/* 52/*
52 * Store status of next available physical CPU
53 */
54static int store_status_next(int start_cpu, int this_cpu)
55{
56 struct save_area *sa = (void *) 4608 + store_prefix();
57 int cpu, rc;
58
59 for (cpu = start_cpu; cpu < 65536; cpu++) {
60 if (cpu == this_cpu)
61 continue;
62 do {
63 rc = raw_sigp(cpu, sigp_stop_and_store_status);
64 } while (rc == sigp_busy);
65 if (rc != sigp_order_code_accepted)
66 continue;
67 if (sa->pref_reg)
68 return cpu;
69 }
70 return -1;
71}
72
73/*
74 * Initialize CPU ELF notes 53 * Initialize CPU ELF notes
75 */ 54 */
76void setup_regs(void) 55void setup_regs(void)
77{ 56{
78 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; 57 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
79 int cpu, this_cpu, phys_cpu = 0, first = 1; 58 int cpu, this_cpu;
80 59
81 this_cpu = stap(); 60 this_cpu = smp_find_processor_id(stap());
82 61 add_elf_notes(this_cpu);
83 if (!S390_lowcore.prefixreg_save_area)
84 first = 0;
85 for_each_online_cpu(cpu) { 62 for_each_online_cpu(cpu) {
86 if (first) { 63 if (cpu == this_cpu)
87 add_elf_notes(cpu); 64 continue;
88 first = 0; 65 if (smp_store_status(cpu))
89 continue; 66 continue;
90 }
91 phys_cpu = store_status_next(phys_cpu, this_cpu);
92 if (phys_cpu == -1)
93 break;
94 add_elf_notes(cpu); 67 add_elf_notes(cpu);
95 phys_cpu++;
96 } 68 }
97 /* Copy dump CPU store status info to absolute zero */ 69 /* Copy dump CPU store status info to absolute zero */
98 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); 70 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
@@ -238,10 +210,14 @@ static void __machine_kexec(void *data)
238 struct kimage *image = data; 210 struct kimage *image = data;
239 211
240 pfault_fini(); 212 pfault_fini();
241 if (image->type == KEXEC_TYPE_CRASH) 213 tracing_off();
214 debug_locks_off();
215 if (image->type == KEXEC_TYPE_CRASH) {
216 lgr_info_log();
242 s390_reset_system(__do_machine_kdump, data); 217 s390_reset_system(__do_machine_kdump, data);
243 else 218 } else {
244 s390_reset_system(__do_machine_kexec, data); 219 s390_reset_system(__do_machine_kexec, data);
220 }
245 disabled_wait((unsigned long) __builtin_return_address(0)); 221 disabled_wait((unsigned long) __builtin_return_address(0));
246} 222}
247 223
@@ -255,5 +231,5 @@ void machine_kexec(struct kimage *image)
255 return; 231 return;
256 tracer_disable(); 232 tracer_disable();
257 smp_send_stop(); 233 smp_send_stop();
258 smp_switch_to_ipl_cpu(__machine_kexec, image); 234 smp_call_ipl_cpu(__machine_kexec, image);
259} 235}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 0fd2e863e114..8c372ca61350 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -254,8 +254,6 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
254 int umode; 254 int umode;
255 255
256 nmi_enter(); 256 nmi_enter();
257 s390_idle_check(regs, S390_lowcore.mcck_clock,
258 S390_lowcore.mcck_enter_timer);
259 kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++; 257 kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 258 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
261 mcck = &__get_cpu_var(cpu_mcck); 259 mcck = &__get_cpu_var(cpu_mcck);
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
new file mode 100644
index 000000000000..bbe522672e06
--- /dev/null
+++ b/arch/s390/kernel/os_info.c
@@ -0,0 +1,169 @@
1/*
2 * OS info memory interface
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "os_info"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/crash_dump.h>
12#include <linux/kernel.h>
13#include <asm/checksum.h>
14#include <asm/lowcore.h>
15#include <asm/system.h>
16#include <asm/os_info.h>
17
18/*
19 * OS info structure has to be page aligned
20 */
21static struct os_info os_info __page_aligned_data;
22
23/*
24 * Compute checksum over OS info structure
25 */
26u32 os_info_csum(struct os_info *os_info)
27{
28 int size = sizeof(*os_info) - offsetof(struct os_info, version_major);
29 return csum_partial(&os_info->version_major, size, 0);
30}
31
32/*
33 * Add crashkernel info to OS info and update checksum
34 */
35void os_info_crashkernel_add(unsigned long base, unsigned long size)
36{
37 os_info.crashkernel_addr = (u64)(unsigned long)base;
38 os_info.crashkernel_size = (u64)(unsigned long)size;
39 os_info.csum = os_info_csum(&os_info);
40}
41
42/*
43 * Add OS info entry and update checksum
44 */
45void os_info_entry_add(int nr, void *ptr, u64 size)
46{
47 os_info.entry[nr].addr = (u64)(unsigned long)ptr;
48 os_info.entry[nr].size = size;
49 os_info.entry[nr].csum = csum_partial(ptr, size, 0);
50 os_info.csum = os_info_csum(&os_info);
51}
52
53/*
54 * Initialize OS info struture and set lowcore pointer
55 */
56void __init os_info_init(void)
57{
58 void *ptr = &os_info;
59
60 os_info.version_major = OS_INFO_VERSION_MAJOR;
61 os_info.version_minor = OS_INFO_VERSION_MINOR;
62 os_info.magic = OS_INFO_MAGIC;
63 os_info.csum = os_info_csum(&os_info);
64 copy_to_absolute_zero(&S390_lowcore.os_info, &ptr, sizeof(ptr));
65}
66
67#ifdef CONFIG_CRASH_DUMP
68
69static struct os_info *os_info_old;
70
71/*
72 * Allocate and copy OS info entry from oldmem
73 */
74static void os_info_old_alloc(int nr, int align)
75{
76 unsigned long addr, size = 0;
77 char *buf, *buf_align, *msg;
78 u32 csum;
79
80 addr = os_info_old->entry[nr].addr;
81 if (!addr) {
82 msg = "not available";
83 goto fail;
84 }
85 size = os_info_old->entry[nr].size;
86 buf = kmalloc(size + align - 1, GFP_KERNEL);
87 if (!buf) {
88 msg = "alloc failed";
89 goto fail;
90 }
91 buf_align = PTR_ALIGN(buf, align);
92 if (copy_from_oldmem(buf_align, (void *) addr, size)) {
93 msg = "copy failed";
94 goto fail_free;
95 }
96 csum = csum_partial(buf_align, size, 0);
97 if (csum != os_info_old->entry[nr].csum) {
98 msg = "checksum failed";
99 goto fail_free;
100 }
101 os_info_old->entry[nr].addr = (u64)(unsigned long)buf_align;
102 msg = "copied";
103 goto out;
104fail_free:
105 kfree(buf);
106fail:
107 os_info_old->entry[nr].addr = 0;
108out:
109 pr_info("entry %i: %s (addr=0x%lx size=%lu)\n",
110 nr, msg, addr, size);
111}
112
113/*
114 * Initialize os info and os info entries from oldmem
115 */
116static void os_info_old_init(void)
117{
118 static int os_info_init;
119 unsigned long addr;
120
121 if (os_info_init)
122 return;
123 if (!OLDMEM_BASE)
124 goto fail;
125 if (copy_from_oldmem(&addr, &S390_lowcore.os_info, sizeof(addr)))
126 goto fail;
127 if (addr == 0 || addr % PAGE_SIZE)
128 goto fail;
129 os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL);
130 if (!os_info_old)
131 goto fail;
132 if (copy_from_oldmem(os_info_old, (void *) addr, sizeof(*os_info_old)))
133 goto fail_free;
134 if (os_info_old->magic != OS_INFO_MAGIC)
135 goto fail_free;
136 if (os_info_old->csum != os_info_csum(os_info_old))
137 goto fail_free;
138 if (os_info_old->version_major > OS_INFO_VERSION_MAJOR)
139 goto fail_free;
140 os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
141 os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
142 os_info_old_alloc(OS_INFO_INIT_FN, PAGE_SIZE);
143 pr_info("crashkernel: addr=0x%lx size=%lu\n",
144 (unsigned long) os_info_old->crashkernel_addr,
145 (unsigned long) os_info_old->crashkernel_size);
146 os_info_init = 1;
147 return;
148fail_free:
149 kfree(os_info_old);
150fail:
151 os_info_init = 1;
152 os_info_old = NULL;
153}
154
155/*
156 * Return pointer to os infor entry and its size
157 */
158void *os_info_old_entry(int nr, unsigned long *size)
159{
160 os_info_old_init();
161
162 if (!os_info_old)
163 return NULL;
164 if (!os_info_old->entry[nr].addr)
165 return NULL;
166 *size = (unsigned long) os_info_old->entry[nr].size;
167 return (void *)(unsigned long)os_info_old->entry[nr].addr;
168}
169#endif
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7618085b4164..3732e4c09cbe 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -77,13 +77,8 @@ static void default_idle(void)
77 local_irq_enable(); 77 local_irq_enable();
78 return; 78 return;
79 } 79 }
80 trace_hardirqs_on(); 80 /* Halt the cpu and keep track of cpu time accounting. */
81 /* Don't trace preempt off for idle. */
82 stop_critical_timings();
83 /* Stop virtual timer and halt the cpu. */
84 vtime_stop_cpu(); 81 vtime_stop_cpu();
85 /* Reenable preemption tracer. */
86 start_critical_timings();
87} 82}
88 83
89void cpu_idle(void) 84void cpu_idle(void)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 3b2efc81f34e..38e751278bf7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/setup.c 2 * arch/s390/kernel/setup.c
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright (C) IBM Corp. 1999,2010 5 * Copyright (C) IBM Corp. 1999,2012
6 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * 8 *
@@ -62,6 +62,8 @@
62#include <asm/ebcdic.h> 62#include <asm/ebcdic.h>
63#include <asm/kvm_virtio.h> 63#include <asm/kvm_virtio.h>
64#include <asm/diag.h> 64#include <asm/diag.h>
65#include <asm/os_info.h>
66#include "entry.h"
65 67
66long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | 68long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
67 PSW_MASK_EA | PSW_MASK_BA; 69 PSW_MASK_EA | PSW_MASK_BA;
@@ -351,8 +353,9 @@ static void setup_addressing_mode(void)
351 } 353 }
352} 354}
353 355
354static void __init 356void *restart_stack __attribute__((__section__(".data")));
355setup_lowcore(void) 357
358static void __init setup_lowcore(void)
356{ 359{
357 struct _lowcore *lc; 360 struct _lowcore *lc;
358 361
@@ -363,7 +366,7 @@ setup_lowcore(void)
363 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 366 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
364 lc->restart_psw.mask = psw_kernel_bits; 367 lc->restart_psw.mask = psw_kernel_bits;
365 lc->restart_psw.addr = 368 lc->restart_psw.addr =
366 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 369 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
367 lc->external_new_psw.mask = psw_kernel_bits | 370 lc->external_new_psw.mask = psw_kernel_bits |
368 PSW_MASK_DAT | PSW_MASK_MCHECK; 371 PSW_MASK_DAT | PSW_MASK_MCHECK;
369 lc->external_new_psw.addr = 372 lc->external_new_psw.addr =
@@ -412,6 +415,24 @@ setup_lowcore(void)
412 lc->last_update_timer = S390_lowcore.last_update_timer; 415 lc->last_update_timer = S390_lowcore.last_update_timer;
413 lc->last_update_clock = S390_lowcore.last_update_clock; 416 lc->last_update_clock = S390_lowcore.last_update_clock;
414 lc->ftrace_func = S390_lowcore.ftrace_func; 417 lc->ftrace_func = S390_lowcore.ftrace_func;
418
419 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
420 restart_stack += ASYNC_SIZE;
421
422 /*
423 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
424 * restart data to the absolute zero lowcore. This is necesary if
425 * PSW restart is done on an offline CPU that has lowcore zero.
426 */
427 lc->restart_stack = (unsigned long) restart_stack;
428 lc->restart_fn = (unsigned long) do_restart;
429 lc->restart_data = 0;
430 lc->restart_source = -1UL;
431 memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
432 4*sizeof(unsigned long));
433 copy_to_absolute_zero(&S390_lowcore.restart_psw,
434 &lc->restart_psw, sizeof(psw_t));
435
415 set_prefix((u32)(unsigned long) lc); 436 set_prefix((u32)(unsigned long) lc);
416 lowcore_ptr[0] = lc; 437 lowcore_ptr[0] = lc;
417} 438}
@@ -572,27 +593,6 @@ static void __init setup_memory_end(void)
572 } 593 }
573} 594}
574 595
575void *restart_stack __attribute__((__section__(".data")));
576
577/*
578 * Setup new PSW and allocate stack for PSW restart interrupt
579 */
580static void __init setup_restart_psw(void)
581{
582 psw_t psw;
583
584 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
585 restart_stack += ASYNC_SIZE;
586
587 /*
588 * Setup restart PSW for absolute zero lowcore. This is necesary
589 * if PSW restart is done on an offline CPU that has lowcore zero
590 */
591 psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
592 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
593 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
594}
595
596static void __init setup_vmcoreinfo(void) 596static void __init setup_vmcoreinfo(void)
597{ 597{
598#ifdef CONFIG_KEXEC 598#ifdef CONFIG_KEXEC
@@ -747,7 +747,7 @@ static void __init reserve_crashkernel(void)
747{ 747{
748#ifdef CONFIG_CRASH_DUMP 748#ifdef CONFIG_CRASH_DUMP
749 unsigned long long crash_base, crash_size; 749 unsigned long long crash_base, crash_size;
750 char *msg; 750 char *msg = NULL;
751 int rc; 751 int rc;
752 752
753 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, 753 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
@@ -779,11 +779,11 @@ static void __init reserve_crashkernel(void)
779 pr_info("Reserving %lluMB of memory at %lluMB " 779 pr_info("Reserving %lluMB of memory at %lluMB "
780 "for crashkernel (System RAM: %luMB)\n", 780 "for crashkernel (System RAM: %luMB)\n",
781 crash_size >> 20, crash_base >> 20, memory_end >> 20); 781 crash_size >> 20, crash_base >> 20, memory_end >> 20);
782 os_info_crashkernel_add(crash_base, crash_size);
782#endif 783#endif
783} 784}
784 785
785static void __init 786static void __init setup_memory(void)
786setup_memory(void)
787{ 787{
788 unsigned long bootmap_size; 788 unsigned long bootmap_size;
789 unsigned long start_pfn, end_pfn; 789 unsigned long start_pfn, end_pfn;
@@ -1014,8 +1014,7 @@ static void __init setup_hwcaps(void)
1014 * was printed. 1014 * was printed.
1015 */ 1015 */
1016 1016
1017void __init 1017void __init setup_arch(char **cmdline_p)
1018setup_arch(char **cmdline_p)
1019{ 1018{
1020 /* 1019 /*
1021 * print what head.S has found out about the machine 1020 * print what head.S has found out about the machine
@@ -1060,6 +1059,7 @@ setup_arch(char **cmdline_p)
1060 1059
1061 parse_early_param(); 1060 parse_early_param();
1062 1061
1062 os_info_init();
1063 setup_ipl(); 1063 setup_ipl();
1064 setup_memory_end(); 1064 setup_memory_end();
1065 setup_addressing_mode(); 1065 setup_addressing_mode();
@@ -1068,7 +1068,6 @@ setup_arch(char **cmdline_p)
1068 setup_memory(); 1068 setup_memory();
1069 setup_resources(); 1069 setup_resources();
1070 setup_vmcoreinfo(); 1070 setup_vmcoreinfo();
1071 setup_restart_psw();
1072 setup_lowcore(); 1071 setup_lowcore();
1073 1072
1074 cpu_init(); 1073 cpu_init();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 2d421d90fada..f29f5ef400e5 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -384,7 +384,6 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
384 siginfo_t *info, sigset_t *oldset, 384 siginfo_t *info, sigset_t *oldset,
385 struct pt_regs *regs) 385 struct pt_regs *regs)
386{ 386{
387 sigset_t blocked;
388 int ret; 387 int ret;
389 388
390 /* Set up the stack frame */ 389 /* Set up the stack frame */
@@ -394,10 +393,7 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
394 ret = setup_frame(sig, ka, oldset, regs); 393 ret = setup_frame(sig, ka, oldset, regs);
395 if (ret) 394 if (ret)
396 return ret; 395 return ret;
397 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask); 396 block_sigmask(ka, sig);
398 if (!(ka->sa.sa_flags & SA_NODEFER))
399 sigaddset(&blocked, sig);
400 set_current_blocked(&blocked);
401 return 0; 397 return 0;
402} 398}
403 399
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b0e28c47ab83..a8bf9994b086 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,23 +1,18 @@
1/* 1/*
2 * arch/s390/kernel/smp.c 2 * SMP related functions
3 * 3 *
4 * Copyright IBM Corp. 1999, 2009 4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens (heiko.carstens@de.ibm.com) 7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * 8 *
9 * based on other smp stuff by 9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar 11 * (c) 1998 Ingo Molnar
12 * 12 *
13 * We work with logical cpu numbering everywhere we can. The only 13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * functions using the real cpu address (got from STAP) are the sigp 14 * the translation of logical to physical cpu ids. All new code that
15 * functions. For all other functions we use the identity mapping. 15 * operates on physical cpu numbers needs to go into smp.c.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
21 */ 16 */
22 17
23#define KMSG_COMPONENT "cpu" 18#define KMSG_COMPONENT "cpu"
@@ -31,198 +26,433 @@
31#include <linux/spinlock.h> 26#include <linux/spinlock.h>
32#include <linux/kernel_stat.h> 27#include <linux/kernel_stat.h>
33#include <linux/delay.h> 28#include <linux/delay.h>
34#include <linux/cache.h>
35#include <linux/interrupt.h> 29#include <linux/interrupt.h>
36#include <linux/irqflags.h> 30#include <linux/irqflags.h>
37#include <linux/cpu.h> 31#include <linux/cpu.h>
38#include <linux/timex.h>
39#include <linux/bootmem.h>
40#include <linux/slab.h> 32#include <linux/slab.h>
41#include <linux/crash_dump.h> 33#include <linux/crash_dump.h>
42#include <asm/asm-offsets.h> 34#include <asm/asm-offsets.h>
43#include <asm/ipl.h> 35#include <asm/ipl.h>
44#include <asm/setup.h> 36#include <asm/setup.h>
45#include <asm/sigp.h>
46#include <asm/pgalloc.h>
47#include <asm/irq.h> 37#include <asm/irq.h>
48#include <asm/cpcmd.h>
49#include <asm/tlbflush.h> 38#include <asm/tlbflush.h>
50#include <asm/timer.h> 39#include <asm/timer.h>
51#include <asm/lowcore.h> 40#include <asm/lowcore.h>
52#include <asm/sclp.h> 41#include <asm/sclp.h>
53#include <asm/cputime.h>
54#include <asm/vdso.h> 42#include <asm/vdso.h>
55#include <asm/cpu.h> 43#include <asm/debug.h>
44#include <asm/os_info.h>
56#include "entry.h" 45#include "entry.h"
57 46
58/* logical cpu to cpu address */ 47enum {
59unsigned short __cpu_logical_map[NR_CPUS]; 48 sigp_sense = 1,
49 sigp_external_call = 2,
50 sigp_emergency_signal = 3,
51 sigp_start = 4,
52 sigp_stop = 5,
53 sigp_restart = 6,
54 sigp_stop_and_store_status = 9,
55 sigp_initial_cpu_reset = 11,
56 sigp_cpu_reset = 12,
57 sigp_set_prefix = 13,
58 sigp_store_status_at_address = 14,
59 sigp_store_extended_status_at_address = 15,
60 sigp_set_architecture = 18,
61 sigp_conditional_emergency_signal = 19,
62 sigp_sense_running = 21,
63};
60 64
61static struct task_struct *current_set[NR_CPUS]; 65enum {
66 sigp_order_code_accepted = 0,
67 sigp_status_stored = 1,
68 sigp_busy = 2,
69 sigp_not_operational = 3,
70};
62 71
63static u8 smp_cpu_type; 72enum {
64static int smp_use_sigp_detection; 73 ec_schedule = 0,
74 ec_call_function,
75 ec_call_function_single,
76 ec_stop_cpu,
77};
65 78
66enum s390_cpu_state { 79enum {
67 CPU_STATE_STANDBY, 80 CPU_STATE_STANDBY,
68 CPU_STATE_CONFIGURED, 81 CPU_STATE_CONFIGURED,
69}; 82};
70 83
84struct pcpu {
85 struct cpu cpu;
86 struct task_struct *idle; /* idle process for the cpu */
87 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
88 unsigned long async_stack; /* async stack for the cpu */
89 unsigned long panic_stack; /* panic stack for the cpu */
90 unsigned long ec_mask; /* bit mask for ec_xxx functions */
91 int state; /* physical cpu state */
92 u32 status; /* last status received via sigp */
93 u16 address; /* physical cpu address */
94};
95
96static u8 boot_cpu_type;
97static u16 boot_cpu_address;
98static struct pcpu pcpu_devices[NR_CPUS];
99
71DEFINE_MUTEX(smp_cpu_state_mutex); 100DEFINE_MUTEX(smp_cpu_state_mutex);
72static int smp_cpu_state[NR_CPUS];
73 101
74static DEFINE_PER_CPU(struct cpu, cpu_devices); 102/*
103 * Signal processor helper functions.
104 */
105static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
106{
107 register unsigned int reg1 asm ("1") = parm;
108 int cc;
75 109
76static void smp_ext_bitcall(int, int); 110 asm volatile(
111 " sigp %1,%2,0(%3)\n"
112 " ipm %0\n"
113 " srl %0,28\n"
114 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
115 if (status && cc == 1)
116 *status = reg1;
117 return cc;
118}
77 119
78static int raw_cpu_stopped(int cpu) 120static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
79{ 121{
80 u32 status; 122 int cc;
81 123
82 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { 124 while (1) {
83 case sigp_status_stored: 125 cc = __pcpu_sigp(addr, order, parm, status);
84 /* Check for stopped and check stop state */ 126 if (cc != sigp_busy)
85 if (status & 0x50) 127 return cc;
86 return 1; 128 cpu_relax();
87 break;
88 default:
89 break;
90 } 129 }
91 return 0;
92} 130}
93 131
94static inline int cpu_stopped(int cpu) 132static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
133{
134 int cc, retry;
135
136 for (retry = 0; ; retry++) {
137 cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
138 if (cc != sigp_busy)
139 break;
140 if (retry >= 3)
141 udelay(10);
142 }
143 return cc;
144}
145
146static inline int pcpu_stopped(struct pcpu *pcpu)
147{
148 if (__pcpu_sigp(pcpu->address, sigp_sense,
149 0, &pcpu->status) != sigp_status_stored)
150 return 0;
151 /* Check for stopped and check stop state */
152 return !!(pcpu->status & 0x50);
153}
154
155static inline int pcpu_running(struct pcpu *pcpu)
95{ 156{
96 return raw_cpu_stopped(cpu_logical_map(cpu)); 157 if (__pcpu_sigp(pcpu->address, sigp_sense_running,
158 0, &pcpu->status) != sigp_status_stored)
159 return 1;
160 /* Check for running status */
161 return !(pcpu->status & 0x400);
97} 162}
98 163
99/* 164/*
100 * Ensure that PSW restart is done on an online CPU 165 * Find struct pcpu by cpu address.
101 */ 166 */
102void smp_restart_with_online_cpu(void) 167static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
103{ 168{
104 int cpu; 169 int cpu;
105 170
106 for_each_online_cpu(cpu) { 171 for_each_cpu(cpu, mask)
107 if (stap() == __cpu_logical_map[cpu]) { 172 if (pcpu_devices[cpu].address == address)
108 /* We are online: Enable DAT again and return */ 173 return pcpu_devices + cpu;
109 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 174 return NULL;
110 return; 175}
111 } 176
177static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
178{
179 int order;
180
181 set_bit(ec_bit, &pcpu->ec_mask);
182 order = pcpu_running(pcpu) ?
183 sigp_external_call : sigp_emergency_signal;
184 pcpu_sigp_retry(pcpu, order, 0);
185}
186
187static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
188{
189 struct _lowcore *lc;
190
191 if (pcpu != &pcpu_devices[0]) {
192 pcpu->lowcore = (struct _lowcore *)
193 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
194 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
195 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
196 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
197 goto out;
112 } 198 }
113 /* We are not online: Do PSW restart on an online CPU */ 199 lc = pcpu->lowcore;
114 while (sigp(cpu, sigp_restart) == sigp_busy) 200 memcpy(lc, &S390_lowcore, 512);
115 cpu_relax(); 201 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
116 /* And stop ourself */ 202 lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
117 while (raw_sigp(stap(), sigp_stop) == sigp_busy) 203 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
118 cpu_relax(); 204 lc->cpu_nr = cpu;
119 for (;;); 205#ifndef CONFIG_64BIT
206 if (MACHINE_HAS_IEEE) {
207 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
208 if (!lc->extended_save_area_addr)
209 goto out;
210 }
211#else
212 if (vdso_alloc_per_cpu(lc))
213 goto out;
214#endif
215 lowcore_ptr[cpu] = lc;
216 pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
217 return 0;
218out:
219 if (pcpu != &pcpu_devices[0]) {
220 free_page(pcpu->panic_stack);
221 free_pages(pcpu->async_stack, ASYNC_ORDER);
222 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
223 }
224 return -ENOMEM;
120} 225}
121 226
122void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) 227static void pcpu_free_lowcore(struct pcpu *pcpu)
123{ 228{
124 struct _lowcore *lc, *current_lc; 229 pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
125 struct stack_frame *sf; 230 lowcore_ptr[pcpu - pcpu_devices] = NULL;
126 struct pt_regs *regs; 231#ifndef CONFIG_64BIT
127 unsigned long sp; 232 if (MACHINE_HAS_IEEE) {
128 233 struct _lowcore *lc = pcpu->lowcore;
129 if (smp_processor_id() == 0) 234
130 func(data); 235 free_page((unsigned long) lc->extended_save_area_addr);
131 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | 236 lc->extended_save_area_addr = 0;
132 PSW_MASK_EA | PSW_MASK_BA); 237 }
133 /* Disable lowcore protection */ 238#else
134 __ctl_clear_bit(0, 28); 239 vdso_free_per_cpu(pcpu->lowcore);
135 current_lc = lowcore_ptr[smp_processor_id()]; 240#endif
136 lc = lowcore_ptr[0]; 241 if (pcpu != &pcpu_devices[0]) {
137 if (!lc) 242 free_page(pcpu->panic_stack);
138 lc = current_lc; 243 free_pages(pcpu->async_stack, ASYNC_ORDER);
139 lc->restart_psw.mask = 244 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
140 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; 245 }
141 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; 246}
142 if (!cpu_online(0)) 247
143 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); 248static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
144 while (sigp(0, sigp_stop_and_store_status) == sigp_busy) 249{
145 cpu_relax(); 250 struct _lowcore *lc = pcpu->lowcore;
146 sp = lc->panic_stack; 251
147 sp -= sizeof(struct pt_regs); 252 atomic_inc(&init_mm.context.attach_count);
148 regs = (struct pt_regs *) sp; 253 lc->cpu_nr = cpu;
149 memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs)); 254 lc->percpu_offset = __per_cpu_offset[cpu];
150 regs->psw = current_lc->psw_save_area; 255 lc->kernel_asce = S390_lowcore.kernel_asce;
151 sp -= STACK_FRAME_OVERHEAD; 256 lc->machine_flags = S390_lowcore.machine_flags;
152 sf = (struct stack_frame *) sp; 257 lc->ftrace_func = S390_lowcore.ftrace_func;
153 sf->back_chain = 0; 258 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
154 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); 259 __ctl_store(lc->cregs_save_area, 0, 15);
260 save_access_regs((unsigned int *) lc->access_regs_save_area);
261 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
262 MAX_FACILITY_BIT/8);
263}
264
265static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
266{
267 struct _lowcore *lc = pcpu->lowcore;
268 struct thread_info *ti = task_thread_info(tsk);
269
270 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
271 lc->thread_info = (unsigned long) task_thread_info(tsk);
272 lc->current_task = (unsigned long) tsk;
273 lc->user_timer = ti->user_timer;
274 lc->system_timer = ti->system_timer;
275 lc->steal_timer = 0;
276}
277
278static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
279{
280 struct _lowcore *lc = pcpu->lowcore;
281
282 lc->restart_stack = lc->kernel_stack;
283 lc->restart_fn = (unsigned long) func;
284 lc->restart_data = (unsigned long) data;
285 lc->restart_source = -1UL;
286 pcpu_sigp_retry(pcpu, sigp_restart, 0);
287}
288
289/*
290 * Call function via PSW restart on pcpu and stop the current cpu.
291 */
292static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
293 void *data, unsigned long stack)
294{
295 struct _lowcore *lc = pcpu->lowcore;
296 unsigned short this_cpu;
297
298 __load_psw_mask(psw_kernel_bits);
299 this_cpu = stap();
300 if (pcpu->address == this_cpu)
301 func(data); /* should not return */
302 /* Stop target cpu (if func returns this stops the current cpu). */
303 pcpu_sigp_retry(pcpu, sigp_stop, 0);
304 /* Restart func on the target cpu and stop the current cpu. */
305 lc->restart_stack = stack;
306 lc->restart_fn = (unsigned long) func;
307 lc->restart_data = (unsigned long) data;
308 lc->restart_source = (unsigned long) this_cpu;
309 asm volatile(
310 "0: sigp 0,%0,6 # sigp restart to target cpu\n"
311 " brc 2,0b # busy, try again\n"
312 "1: sigp 0,%1,5 # sigp stop to current cpu\n"
313 " brc 2,1b # busy, try again\n"
314 : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
315 for (;;) ;
316}
317
318/*
319 * Call function on an online CPU.
320 */
321void smp_call_online_cpu(void (*func)(void *), void *data)
322{
323 struct pcpu *pcpu;
324
325 /* Use the current cpu if it is online. */
326 pcpu = pcpu_find_address(cpu_online_mask, stap());
327 if (!pcpu)
328 /* Use the first online cpu. */
329 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
330 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
331}
332
333/*
334 * Call function on the ipl CPU.
335 */
336void smp_call_ipl_cpu(void (*func)(void *), void *data)
337{
338 pcpu_delegate(&pcpu_devices[0], func, data,
339 pcpu_devices->panic_stack + PAGE_SIZE);
340}
341
342int smp_find_processor_id(u16 address)
343{
344 int cpu;
345
346 for_each_present_cpu(cpu)
347 if (pcpu_devices[cpu].address == address)
348 return cpu;
349 return -1;
350}
351
352int smp_vcpu_scheduled(int cpu)
353{
354 return pcpu_running(pcpu_devices + cpu);
355}
356
357void smp_yield(void)
358{
359 if (MACHINE_HAS_DIAG44)
360 asm volatile("diag 0,0,0x44");
155} 361}
156 362
157static void smp_stop_cpu(void) 363void smp_yield_cpu(int cpu)
158{ 364{
159 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 365 if (MACHINE_HAS_DIAG9C)
366 asm volatile("diag %0,0,0x9c"
367 : : "d" (pcpu_devices[cpu].address));
368 else if (MACHINE_HAS_DIAG44)
369 asm volatile("diag 0,0,0x44");
370}
371
372/*
373 * Send cpus emergency shutdown signal. This gives the cpus the
374 * opportunity to complete outstanding interrupts.
375 */
376void smp_emergency_stop(cpumask_t *cpumask)
377{
378 u64 end;
379 int cpu;
380
381 end = get_clock() + (1000000UL << 12);
382 for_each_cpu(cpu, cpumask) {
383 struct pcpu *pcpu = pcpu_devices + cpu;
384 set_bit(ec_stop_cpu, &pcpu->ec_mask);
385 while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
386 0, NULL) == sigp_busy &&
387 get_clock() < end)
388 cpu_relax();
389 }
390 while (get_clock() < end) {
391 for_each_cpu(cpu, cpumask)
392 if (pcpu_stopped(pcpu_devices + cpu))
393 cpumask_clear_cpu(cpu, cpumask);
394 if (cpumask_empty(cpumask))
395 break;
160 cpu_relax(); 396 cpu_relax();
397 }
161} 398}
162 399
400/*
401 * Stop all cpus but the current one.
402 */
163void smp_send_stop(void) 403void smp_send_stop(void)
164{ 404{
165 cpumask_t cpumask; 405 cpumask_t cpumask;
166 int cpu; 406 int cpu;
167 u64 end;
168 407
169 /* Disable all interrupts/machine checks */ 408 /* Disable all interrupts/machine checks */
170 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 409 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
171 trace_hardirqs_off(); 410 trace_hardirqs_off();
172 411
412 debug_set_critical();
173 cpumask_copy(&cpumask, cpu_online_mask); 413 cpumask_copy(&cpumask, cpu_online_mask);
174 cpumask_clear_cpu(smp_processor_id(), &cpumask); 414 cpumask_clear_cpu(smp_processor_id(), &cpumask);
175 415
176 if (oops_in_progress) { 416 if (oops_in_progress)
177 /* 417 smp_emergency_stop(&cpumask);
178 * Give the other cpus the opportunity to complete
179 * outstanding interrupts before stopping them.
180 */
181 end = get_clock() + (1000000UL << 12);
182 for_each_cpu(cpu, &cpumask) {
183 set_bit(ec_stop_cpu, (unsigned long *)
184 &lowcore_ptr[cpu]->ext_call_fast);
185 while (sigp(cpu, sigp_emergency_signal) == sigp_busy &&
186 get_clock() < end)
187 cpu_relax();
188 }
189 while (get_clock() < end) {
190 for_each_cpu(cpu, &cpumask)
191 if (cpu_stopped(cpu))
192 cpumask_clear_cpu(cpu, &cpumask);
193 if (cpumask_empty(&cpumask))
194 break;
195 cpu_relax();
196 }
197 }
198 418
199 /* stop all processors */ 419 /* stop all processors */
200 for_each_cpu(cpu, &cpumask) { 420 for_each_cpu(cpu, &cpumask) {
201 while (sigp(cpu, sigp_stop) == sigp_busy) 421 struct pcpu *pcpu = pcpu_devices + cpu;
202 cpu_relax(); 422 pcpu_sigp_retry(pcpu, sigp_stop, 0);
203 while (!cpu_stopped(cpu)) 423 while (!pcpu_stopped(pcpu))
204 cpu_relax(); 424 cpu_relax();
205 } 425 }
206} 426}
207 427
208/* 428/*
429 * Stop the current cpu.
430 */
431void smp_stop_cpu(void)
432{
433 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
434 for (;;) ;
435}
436
437/*
209 * This is the main routine where commands issued by other 438 * This is the main routine where commands issued by other
210 * cpus are handled. 439 * cpus are handled.
211 */ 440 */
212 441static void do_ext_call_interrupt(struct ext_code ext_code,
213static void do_ext_call_interrupt(unsigned int ext_int_code,
214 unsigned int param32, unsigned long param64) 442 unsigned int param32, unsigned long param64)
215{ 443{
216 unsigned long bits; 444 unsigned long bits;
445 int cpu;
217 446
218 if ((ext_int_code & 0xffff) == 0x1202) 447 cpu = smp_processor_id();
219 kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; 448 if (ext_code.code == 0x1202)
449 kstat_cpu(cpu).irqs[EXTINT_EXC]++;
220 else 450 else
221 kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; 451 kstat_cpu(cpu).irqs[EXTINT_EMS]++;
222 /* 452 /*
223 * handle bit signal external calls 453 * handle bit signal external calls
224 */ 454 */
225 bits = xchg(&S390_lowcore.ext_call_fast, 0); 455 bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
226 456
227 if (test_bit(ec_stop_cpu, &bits)) 457 if (test_bit(ec_stop_cpu, &bits))
228 smp_stop_cpu(); 458 smp_stop_cpu();
@@ -238,38 +468,17 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
238 468
239} 469}
240 470
241/*
242 * Send an external call sigp to another cpu and return without waiting
243 * for its completion.
244 */
245static void smp_ext_bitcall(int cpu, int sig)
246{
247 int order;
248
249 /*
250 * Set signaling bit in lowcore of target cpu and kick it
251 */
252 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
253 while (1) {
254 order = smp_vcpu_scheduled(cpu) ?
255 sigp_external_call : sigp_emergency_signal;
256 if (sigp(cpu, order) != sigp_busy)
257 break;
258 udelay(10);
259 }
260}
261
262void arch_send_call_function_ipi_mask(const struct cpumask *mask) 471void arch_send_call_function_ipi_mask(const struct cpumask *mask)
263{ 472{
264 int cpu; 473 int cpu;
265 474
266 for_each_cpu(cpu, mask) 475 for_each_cpu(cpu, mask)
267 smp_ext_bitcall(cpu, ec_call_function); 476 pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
268} 477}
269 478
270void arch_send_call_function_single_ipi(int cpu) 479void arch_send_call_function_single_ipi(int cpu)
271{ 480{
272 smp_ext_bitcall(cpu, ec_call_function_single); 481 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
273} 482}
274 483
275#ifndef CONFIG_64BIT 484#ifndef CONFIG_64BIT
@@ -295,15 +504,16 @@ EXPORT_SYMBOL(smp_ptlb_all);
295 */ 504 */
296void smp_send_reschedule(int cpu) 505void smp_send_reschedule(int cpu)
297{ 506{
298 smp_ext_bitcall(cpu, ec_schedule); 507 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
299} 508}
300 509
301/* 510/*
302 * parameter area for the set/clear control bit callbacks 511 * parameter area for the set/clear control bit callbacks
303 */ 512 */
304struct ec_creg_mask_parms { 513struct ec_creg_mask_parms {
305 unsigned long orvals[16]; 514 unsigned long orval;
306 unsigned long andvals[16]; 515 unsigned long andval;
516 int cr;
307}; 517};
308 518
309/* 519/*
@@ -313,11 +523,9 @@ static void smp_ctl_bit_callback(void *info)
313{ 523{
314 struct ec_creg_mask_parms *pp = info; 524 struct ec_creg_mask_parms *pp = info;
315 unsigned long cregs[16]; 525 unsigned long cregs[16];
316 int i;
317 526
318 __ctl_store(cregs, 0, 15); 527 __ctl_store(cregs, 0, 15);
319 for (i = 0; i <= 15; i++) 528 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
320 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
321 __ctl_load(cregs, 0, 15); 529 __ctl_load(cregs, 0, 15);
322} 530}
323 531
@@ -326,11 +534,8 @@ static void smp_ctl_bit_callback(void *info)
326 */ 534 */
327void smp_ctl_set_bit(int cr, int bit) 535void smp_ctl_set_bit(int cr, int bit)
328{ 536{
329 struct ec_creg_mask_parms parms; 537 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
330 538
331 memset(&parms.orvals, 0, sizeof(parms.orvals));
332 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
333 parms.orvals[cr] = 1UL << bit;
334 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 539 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
335} 540}
336EXPORT_SYMBOL(smp_ctl_set_bit); 541EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -340,220 +545,178 @@ EXPORT_SYMBOL(smp_ctl_set_bit);
340 */ 545 */
341void smp_ctl_clear_bit(int cr, int bit) 546void smp_ctl_clear_bit(int cr, int bit)
342{ 547{
343 struct ec_creg_mask_parms parms; 548 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
344 549
345 memset(&parms.orvals, 0, sizeof(parms.orvals));
346 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
347 parms.andvals[cr] = ~(1UL << bit);
348 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 550 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
349} 551}
350EXPORT_SYMBOL(smp_ctl_clear_bit); 552EXPORT_SYMBOL(smp_ctl_clear_bit);
351 553
352#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) 554#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
353 555
354static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 556struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
557EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
558
559static void __init smp_get_save_area(int cpu, u16 address)
355{ 560{
356 if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) 561 void *lc = pcpu_devices[0].lowcore;
357 return; 562 struct save_area *save_area;
563
358 if (is_kdump_kernel()) 564 if (is_kdump_kernel())
359 return; 565 return;
566 if (!OLDMEM_BASE && (address == boot_cpu_address ||
567 ipl_info.type != IPL_TYPE_FCP_DUMP))
568 return;
360 if (cpu >= NR_CPUS) { 569 if (cpu >= NR_CPUS) {
361 pr_warning("CPU %i exceeds the maximum %i and is excluded from " 570 pr_warning("CPU %i exceeds the maximum %i and is excluded "
362 "the dump\n", cpu, NR_CPUS - 1); 571 "from the dump\n", cpu, NR_CPUS - 1);
363 return; 572 return;
364 } 573 }
365 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); 574 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
366 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) 575 if (!save_area)
367 cpu_relax(); 576 panic("could not allocate memory for save area\n");
368 memcpy_real(zfcpdump_save_areas[cpu], 577 zfcpdump_save_areas[cpu] = save_area;
369 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 578#ifdef CONFIG_CRASH_DUMP
370 sizeof(struct save_area)); 579 if (address == boot_cpu_address) {
580 /* Copy the registers of the boot cpu. */
581 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
582 SAVE_AREA_BASE - PAGE_SIZE, 0);
583 return;
584 }
585#endif
586 /* Get the registers of a non-boot cpu. */
587 __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
588 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
371} 589}
372 590
373struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 591int smp_store_status(int cpu)
374EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
375
376#else
377
378static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
379
380#endif /* CONFIG_ZFCPDUMP */
381
382static int cpu_known(int cpu_id)
383{ 592{
384 int cpu; 593 struct pcpu *pcpu;
385 594
386 for_each_present_cpu(cpu) { 595 pcpu = pcpu_devices + cpu;
387 if (__cpu_logical_map[cpu] == cpu_id) 596 if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
388 return 1; 597 0, NULL) != sigp_order_code_accepted)
389 } 598 return -EIO;
390 return 0; 599 return 0;
391} 600}
392 601
393static int smp_rescan_cpus_sigp(cpumask_t avail) 602#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
394{
395 int cpu_id, logical_cpu;
396 603
397 logical_cpu = cpumask_first(&avail); 604static inline void smp_get_save_area(int cpu, u16 address) { }
398 if (logical_cpu >= nr_cpu_ids)
399 return 0;
400 for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
401 if (cpu_known(cpu_id))
402 continue;
403 __cpu_logical_map[logical_cpu] = cpu_id;
404 cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
405 if (!cpu_stopped(logical_cpu))
406 continue;
407 set_cpu_present(logical_cpu, true);
408 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
409 logical_cpu = cpumask_next(logical_cpu, &avail);
410 if (logical_cpu >= nr_cpu_ids)
411 break;
412 }
413 return 0;
414}
415 605
416static int smp_rescan_cpus_sclp(cpumask_t avail) 606#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
607
608static struct sclp_cpu_info *smp_get_cpu_info(void)
417{ 609{
610 static int use_sigp_detection;
418 struct sclp_cpu_info *info; 611 struct sclp_cpu_info *info;
419 int cpu_id, logical_cpu, cpu; 612 int address;
420 int rc; 613
421 614 info = kzalloc(sizeof(*info), GFP_KERNEL);
422 logical_cpu = cpumask_first(&avail); 615 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
423 if (logical_cpu >= nr_cpu_ids) 616 use_sigp_detection = 1;
424 return 0; 617 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
425 info = kmalloc(sizeof(*info), GFP_KERNEL); 618 if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
426 if (!info) 619 sigp_not_operational)
427 return -ENOMEM; 620 continue;
428 rc = sclp_get_cpu_info(info); 621 info->cpu[info->configured].address = address;
429 if (rc) 622 info->configured++;
430 goto out; 623 }
431 for (cpu = 0; cpu < info->combined; cpu++) { 624 info->combined = info->configured;
432 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
433 continue;
434 cpu_id = info->cpu[cpu].address;
435 if (cpu_known(cpu_id))
436 continue;
437 __cpu_logical_map[logical_cpu] = cpu_id;
438 cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
439 set_cpu_present(logical_cpu, true);
440 if (cpu >= info->configured)
441 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
442 else
443 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
444 logical_cpu = cpumask_next(logical_cpu, &avail);
445 if (logical_cpu >= nr_cpu_ids)
446 break;
447 } 625 }
448out: 626 return info;
449 kfree(info);
450 return rc;
451} 627}
452 628
453static int __smp_rescan_cpus(void) 629static int __devinit smp_add_present_cpu(int cpu);
630
631static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
632 int sysfs_add)
454{ 633{
634 struct pcpu *pcpu;
455 cpumask_t avail; 635 cpumask_t avail;
636 int cpu, nr, i;
456 637
638 nr = 0;
457 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 639 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
458 if (smp_use_sigp_detection) 640 cpu = cpumask_first(&avail);
459 return smp_rescan_cpus_sigp(avail); 641 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
460 else 642 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
461 return smp_rescan_cpus_sclp(avail); 643 continue;
644 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
645 continue;
646 pcpu = pcpu_devices + cpu;
647 pcpu->address = info->cpu[i].address;
648 pcpu->state = (cpu >= info->configured) ?
649 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
650 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
651 set_cpu_present(cpu, true);
652 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
653 set_cpu_present(cpu, false);
654 else
655 nr++;
656 cpu = cpumask_next(cpu, &avail);
657 }
658 return nr;
462} 659}
463 660
464static void __init smp_detect_cpus(void) 661static void __init smp_detect_cpus(void)
465{ 662{
466 unsigned int cpu, c_cpus, s_cpus; 663 unsigned int cpu, c_cpus, s_cpus;
467 struct sclp_cpu_info *info; 664 struct sclp_cpu_info *info;
468 u16 boot_cpu_addr, cpu_addr;
469 665
470 c_cpus = 1; 666 info = smp_get_cpu_info();
471 s_cpus = 0;
472 boot_cpu_addr = __cpu_logical_map[0];
473 info = kmalloc(sizeof(*info), GFP_KERNEL);
474 if (!info) 667 if (!info)
475 panic("smp_detect_cpus failed to allocate memory\n"); 668 panic("smp_detect_cpus failed to allocate memory\n");
476#ifdef CONFIG_CRASH_DUMP
477 if (OLDMEM_BASE && !is_kdump_kernel()) {
478 struct save_area *save_area;
479
480 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
481 if (!save_area)
482 panic("could not allocate memory for save area\n");
483 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
484 0x200, 0);
485 zfcpdump_save_areas[0] = save_area;
486 }
487#endif
488 /* Use sigp detection algorithm if sclp doesn't work. */
489 if (sclp_get_cpu_info(info)) {
490 smp_use_sigp_detection = 1;
491 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
492 if (cpu == boot_cpu_addr)
493 continue;
494 if (!raw_cpu_stopped(cpu))
495 continue;
496 smp_get_save_area(c_cpus, cpu);
497 c_cpus++;
498 }
499 goto out;
500 }
501
502 if (info->has_cpu_type) { 669 if (info->has_cpu_type) {
503 for (cpu = 0; cpu < info->combined; cpu++) { 670 for (cpu = 0; cpu < info->combined; cpu++) {
504 if (info->cpu[cpu].address == boot_cpu_addr) { 671 if (info->cpu[cpu].address != boot_cpu_address)
505 smp_cpu_type = info->cpu[cpu].type; 672 continue;
506 break; 673 /* The boot cpu dictates the cpu type. */
507 } 674 boot_cpu_type = info->cpu[cpu].type;
675 break;
508 } 676 }
509 } 677 }
510 678 c_cpus = s_cpus = 0;
511 for (cpu = 0; cpu < info->combined; cpu++) { 679 for (cpu = 0; cpu < info->combined; cpu++) {
512 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) 680 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
513 continue; 681 continue;
514 cpu_addr = info->cpu[cpu].address; 682 if (cpu < info->configured) {
515 if (cpu_addr == boot_cpu_addr) 683 smp_get_save_area(c_cpus, info->cpu[cpu].address);
516 continue; 684 c_cpus++;
517 if (!raw_cpu_stopped(cpu_addr)) { 685 } else
518 s_cpus++; 686 s_cpus++;
519 continue;
520 }
521 smp_get_save_area(c_cpus, cpu_addr);
522 c_cpus++;
523 } 687 }
524out:
525 kfree(info);
526 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 688 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
527 get_online_cpus(); 689 get_online_cpus();
528 __smp_rescan_cpus(); 690 __smp_rescan_cpus(info, 0);
529 put_online_cpus(); 691 put_online_cpus();
692 kfree(info);
530} 693}
531 694
532/* 695/*
533 * Activate a secondary processor. 696 * Activate a secondary processor.
534 */ 697 */
535int __cpuinit start_secondary(void *cpuvoid) 698static void __cpuinit smp_start_secondary(void *cpuvoid)
536{ 699{
700 S390_lowcore.last_update_clock = get_clock();
701 S390_lowcore.restart_stack = (unsigned long) restart_stack;
702 S390_lowcore.restart_fn = (unsigned long) do_restart;
703 S390_lowcore.restart_data = 0;
704 S390_lowcore.restart_source = -1UL;
705 restore_access_regs(S390_lowcore.access_regs_save_area);
706 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
707 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
537 cpu_init(); 708 cpu_init();
538 preempt_disable(); 709 preempt_disable();
539 init_cpu_timer(); 710 init_cpu_timer();
540 init_cpu_vtimer(); 711 init_cpu_vtimer();
541 pfault_init(); 712 pfault_init();
542
543 notify_cpu_starting(smp_processor_id()); 713 notify_cpu_starting(smp_processor_id());
544 ipi_call_lock(); 714 ipi_call_lock();
545 set_cpu_online(smp_processor_id(), true); 715 set_cpu_online(smp_processor_id(), true);
546 ipi_call_unlock(); 716 ipi_call_unlock();
547 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
548 S390_lowcore.restart_psw.mask =
549 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
550 S390_lowcore.restart_psw.addr =
551 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
552 __ctl_set_bit(0, 28); /* Enable lowcore protection */
553 local_irq_enable(); 717 local_irq_enable();
554 /* cpu_idle will call schedule for us */ 718 /* cpu_idle will call schedule for us */
555 cpu_idle(); 719 cpu_idle();
556 return 0;
557} 720}
558 721
559struct create_idle { 722struct create_idle {
@@ -572,82 +735,20 @@ static void __cpuinit smp_fork_idle(struct work_struct *work)
572 complete(&c_idle->done); 735 complete(&c_idle->done);
573} 736}
574 737
575static int __cpuinit smp_alloc_lowcore(int cpu)
576{
577 unsigned long async_stack, panic_stack;
578 struct _lowcore *lowcore;
579
580 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
581 if (!lowcore)
582 return -ENOMEM;
583 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
584 panic_stack = __get_free_page(GFP_KERNEL);
585 if (!panic_stack || !async_stack)
586 goto out;
587 memcpy(lowcore, &S390_lowcore, 512);
588 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
589 lowcore->async_stack = async_stack + ASYNC_SIZE;
590 lowcore->panic_stack = panic_stack + PAGE_SIZE;
591 lowcore->restart_psw.mask =
592 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
593 lowcore->restart_psw.addr =
594 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
595 if (user_mode != HOME_SPACE_MODE)
596 lowcore->restart_psw.mask |= PSW_ASC_HOME;
597#ifndef CONFIG_64BIT
598 if (MACHINE_HAS_IEEE) {
599 unsigned long save_area;
600
601 save_area = get_zeroed_page(GFP_KERNEL);
602 if (!save_area)
603 goto out;
604 lowcore->extended_save_area_addr = (u32) save_area;
605 }
606#else
607 if (vdso_alloc_per_cpu(cpu, lowcore))
608 goto out;
609#endif
610 lowcore_ptr[cpu] = lowcore;
611 return 0;
612
613out:
614 free_page(panic_stack);
615 free_pages(async_stack, ASYNC_ORDER);
616 free_pages((unsigned long) lowcore, LC_ORDER);
617 return -ENOMEM;
618}
619
620static void smp_free_lowcore(int cpu)
621{
622 struct _lowcore *lowcore;
623
624 lowcore = lowcore_ptr[cpu];
625#ifndef CONFIG_64BIT
626 if (MACHINE_HAS_IEEE)
627 free_page((unsigned long) lowcore->extended_save_area_addr);
628#else
629 vdso_free_per_cpu(cpu, lowcore);
630#endif
631 free_page(lowcore->panic_stack - PAGE_SIZE);
632 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
633 free_pages((unsigned long) lowcore, LC_ORDER);
634 lowcore_ptr[cpu] = NULL;
635}
636
637/* Upping and downing of CPUs */ 738/* Upping and downing of CPUs */
638int __cpuinit __cpu_up(unsigned int cpu) 739int __cpuinit __cpu_up(unsigned int cpu)
639{ 740{
640 struct _lowcore *cpu_lowcore;
641 struct create_idle c_idle; 741 struct create_idle c_idle;
642 struct task_struct *idle; 742 struct pcpu *pcpu;
643 struct stack_frame *sf; 743 int rc;
644 u32 lowcore;
645 int ccode;
646 744
647 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 745 pcpu = pcpu_devices + cpu;
746 if (pcpu->state != CPU_STATE_CONFIGURED)
747 return -EIO;
748 if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
749 sigp_order_code_accepted)
648 return -EIO; 750 return -EIO;
649 idle = current_set[cpu]; 751 if (!pcpu->idle) {
650 if (!idle) {
651 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); 752 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
652 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); 753 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
653 c_idle.cpu = cpu; 754 c_idle.cpu = cpu;
@@ -655,68 +756,28 @@ int __cpuinit __cpu_up(unsigned int cpu)
655 wait_for_completion(&c_idle.done); 756 wait_for_completion(&c_idle.done);
656 if (IS_ERR(c_idle.idle)) 757 if (IS_ERR(c_idle.idle))
657 return PTR_ERR(c_idle.idle); 758 return PTR_ERR(c_idle.idle);
658 idle = c_idle.idle; 759 pcpu->idle = c_idle.idle;
659 current_set[cpu] = c_idle.idle;
660 } 760 }
661 init_idle(idle, cpu); 761 init_idle(pcpu->idle, cpu);
662 if (smp_alloc_lowcore(cpu)) 762 rc = pcpu_alloc_lowcore(pcpu, cpu);
663 return -ENOMEM; 763 if (rc)
664 do { 764 return rc;
665 ccode = sigp(cpu, sigp_initial_cpu_reset); 765 pcpu_prepare_secondary(pcpu, cpu);
666 if (ccode == sigp_busy) 766 pcpu_attach_task(pcpu, pcpu->idle);
667 udelay(10); 767 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
668 if (ccode == sigp_not_operational)
669 goto err_out;
670 } while (ccode == sigp_busy);
671
672 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
673 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
674 udelay(10);
675
676 cpu_lowcore = lowcore_ptr[cpu];
677 cpu_lowcore->kernel_stack = (unsigned long)
678 task_stack_page(idle) + THREAD_SIZE;
679 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
680 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
681 - sizeof(struct pt_regs)
682 - sizeof(struct stack_frame));
683 memset(sf, 0, sizeof(struct stack_frame));
684 sf->gprs[9] = (unsigned long) sf;
685 cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf;
686 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
687 atomic_inc(&init_mm.context.attach_count);
688 asm volatile(
689 " stam 0,15,0(%0)"
690 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
691 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
692 cpu_lowcore->current_task = (unsigned long) idle;
693 cpu_lowcore->cpu_nr = cpu;
694 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
695 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
696 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
697 memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
698 MAX_FACILITY_BIT/8);
699 eieio();
700
701 while (sigp(cpu, sigp_restart) == sigp_busy)
702 udelay(10);
703
704 while (!cpu_online(cpu)) 768 while (!cpu_online(cpu))
705 cpu_relax(); 769 cpu_relax();
706 return 0; 770 return 0;
707
708err_out:
709 smp_free_lowcore(cpu);
710 return -EIO;
711} 771}
712 772
713static int __init setup_possible_cpus(char *s) 773static int __init setup_possible_cpus(char *s)
714{ 774{
715 int pcpus, cpu; 775 int max, cpu;
716 776
717 pcpus = simple_strtoul(s, NULL, 0); 777 if (kstrtoint(s, 0, &max) < 0)
778 return 0;
718 init_cpu_possible(cpumask_of(0)); 779 init_cpu_possible(cpumask_of(0));
719 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) 780 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
720 set_cpu_possible(cpu, true); 781 set_cpu_possible(cpu, true);
721 return 0; 782 return 0;
722} 783}
@@ -726,113 +787,79 @@ early_param("possible_cpus", setup_possible_cpus);
726 787
727int __cpu_disable(void) 788int __cpu_disable(void)
728{ 789{
729 struct ec_creg_mask_parms cr_parms; 790 unsigned long cregs[16];
730 int cpu = smp_processor_id();
731
732 set_cpu_online(cpu, false);
733 791
734 /* Disable pfault pseudo page faults on this cpu. */ 792 set_cpu_online(smp_processor_id(), false);
793 /* Disable pseudo page faults on this cpu. */
735 pfault_fini(); 794 pfault_fini();
736 795 /* Disable interrupt sources via control register. */
737 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 796 __ctl_store(cregs, 0, 15);
738 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 797 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
739 798 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
740 /* disable all external interrupts */ 799 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
741 cr_parms.orvals[0] = 0; 800 __ctl_load(cregs, 0, 15);
742 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
743 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
744 1 << 4);
745 /* disable all I/O interrupts */
746 cr_parms.orvals[6] = 0;
747 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
748 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
749 /* disable most machine checks */
750 cr_parms.orvals[14] = 0;
751 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
752 1 << 25 | 1 << 24);
753
754 smp_ctl_bit_callback(&cr_parms);
755
756 return 0; 801 return 0;
757} 802}
758 803
759void __cpu_die(unsigned int cpu) 804void __cpu_die(unsigned int cpu)
760{ 805{
806 struct pcpu *pcpu;
807
761 /* Wait until target cpu is down */ 808 /* Wait until target cpu is down */
762 while (!cpu_stopped(cpu)) 809 pcpu = pcpu_devices + cpu;
810 while (!pcpu_stopped(pcpu))
763 cpu_relax(); 811 cpu_relax();
764 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) 812 pcpu_free_lowcore(pcpu);
765 udelay(10);
766 smp_free_lowcore(cpu);
767 atomic_dec(&init_mm.context.attach_count); 813 atomic_dec(&init_mm.context.attach_count);
768} 814}
769 815
770void __noreturn cpu_die(void) 816void __noreturn cpu_die(void)
771{ 817{
772 idle_task_exit(); 818 idle_task_exit();
773 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 819 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
774 cpu_relax(); 820 for (;;) ;
775 for (;;);
776} 821}
777 822
778#endif /* CONFIG_HOTPLUG_CPU */ 823#endif /* CONFIG_HOTPLUG_CPU */
779 824
780void __init smp_prepare_cpus(unsigned int max_cpus) 825static void smp_call_os_info_init_fn(void)
781{ 826{
782#ifndef CONFIG_64BIT 827 int (*init_fn)(void);
783 unsigned long save_area = 0; 828 unsigned long size;
784#endif
785 unsigned long async_stack, panic_stack;
786 struct _lowcore *lowcore;
787 829
788 smp_detect_cpus(); 830 init_fn = os_info_old_entry(OS_INFO_INIT_FN, &size);
831 if (!init_fn)
832 return;
833 init_fn();
834}
789 835
836void __init smp_prepare_cpus(unsigned int max_cpus)
837{
790 /* request the 0x1201 emergency signal external interrupt */ 838 /* request the 0x1201 emergency signal external interrupt */
791 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 839 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
792 panic("Couldn't request external interrupt 0x1201"); 840 panic("Couldn't request external interrupt 0x1201");
793 /* request the 0x1202 external call external interrupt */ 841 /* request the 0x1202 external call external interrupt */
794 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) 842 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
795 panic("Couldn't request external interrupt 0x1202"); 843 panic("Couldn't request external interrupt 0x1202");
796 844 smp_call_os_info_init_fn();
797 /* Reallocate current lowcore, but keep its contents. */ 845 smp_detect_cpus();
798 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
799 panic_stack = __get_free_page(GFP_KERNEL);
800 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
801 BUG_ON(!lowcore || !panic_stack || !async_stack);
802#ifndef CONFIG_64BIT
803 if (MACHINE_HAS_IEEE)
804 save_area = get_zeroed_page(GFP_KERNEL);
805#endif
806 local_irq_disable();
807 local_mcck_disable();
808 lowcore_ptr[smp_processor_id()] = lowcore;
809 *lowcore = S390_lowcore;
810 lowcore->panic_stack = panic_stack + PAGE_SIZE;
811 lowcore->async_stack = async_stack + ASYNC_SIZE;
812#ifndef CONFIG_64BIT
813 if (MACHINE_HAS_IEEE)
814 lowcore->extended_save_area_addr = (u32) save_area;
815#endif
816 set_prefix((u32)(unsigned long) lowcore);
817 local_mcck_enable();
818 local_irq_enable();
819#ifdef CONFIG_64BIT
820 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
821 BUG();
822#endif
823} 846}
824 847
825void __init smp_prepare_boot_cpu(void) 848void __init smp_prepare_boot_cpu(void)
826{ 849{
827 BUG_ON(smp_processor_id() != 0); 850 struct pcpu *pcpu = pcpu_devices;
828 851
829 current_thread_info()->cpu = 0; 852 boot_cpu_address = stap();
830 set_cpu_present(0, true); 853 pcpu->idle = current;
831 set_cpu_online(0, true); 854 pcpu->state = CPU_STATE_CONFIGURED;
855 pcpu->address = boot_cpu_address;
856 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
857 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
858 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
832 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 859 S390_lowcore.percpu_offset = __per_cpu_offset[0];
833 current_set[0] = current;
834 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
835 cpu_set_polarization(0, POLARIZATION_UNKNOWN); 860 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
861 set_cpu_present(0, true);
862 set_cpu_online(0, true);
836} 863}
837 864
838void __init smp_cpus_done(unsigned int max_cpus) 865void __init smp_cpus_done(unsigned int max_cpus)
@@ -842,7 +869,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
842void __init smp_setup_processor_id(void) 869void __init smp_setup_processor_id(void)
843{ 870{
844 S390_lowcore.cpu_nr = 0; 871 S390_lowcore.cpu_nr = 0;
845 __cpu_logical_map[0] = stap();
846} 872}
847 873
848/* 874/*
@@ -858,56 +884,57 @@ int setup_profiling_timer(unsigned int multiplier)
858 884
859#ifdef CONFIG_HOTPLUG_CPU 885#ifdef CONFIG_HOTPLUG_CPU
860static ssize_t cpu_configure_show(struct device *dev, 886static ssize_t cpu_configure_show(struct device *dev,
861 struct device_attribute *attr, char *buf) 887 struct device_attribute *attr, char *buf)
862{ 888{
863 ssize_t count; 889 ssize_t count;
864 890
865 mutex_lock(&smp_cpu_state_mutex); 891 mutex_lock(&smp_cpu_state_mutex);
866 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); 892 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
867 mutex_unlock(&smp_cpu_state_mutex); 893 mutex_unlock(&smp_cpu_state_mutex);
868 return count; 894 return count;
869} 895}
870 896
871static ssize_t cpu_configure_store(struct device *dev, 897static ssize_t cpu_configure_store(struct device *dev,
872 struct device_attribute *attr, 898 struct device_attribute *attr,
873 const char *buf, size_t count) 899 const char *buf, size_t count)
874{ 900{
875 int cpu = dev->id; 901 struct pcpu *pcpu;
876 int val, rc; 902 int cpu, val, rc;
877 char delim; 903 char delim;
878 904
879 if (sscanf(buf, "%d %c", &val, &delim) != 1) 905 if (sscanf(buf, "%d %c", &val, &delim) != 1)
880 return -EINVAL; 906 return -EINVAL;
881 if (val != 0 && val != 1) 907 if (val != 0 && val != 1)
882 return -EINVAL; 908 return -EINVAL;
883
884 get_online_cpus(); 909 get_online_cpus();
885 mutex_lock(&smp_cpu_state_mutex); 910 mutex_lock(&smp_cpu_state_mutex);
886 rc = -EBUSY; 911 rc = -EBUSY;
887 /* disallow configuration changes of online cpus and cpu 0 */ 912 /* disallow configuration changes of online cpus and cpu 0 */
913 cpu = dev->id;
888 if (cpu_online(cpu) || cpu == 0) 914 if (cpu_online(cpu) || cpu == 0)
889 goto out; 915 goto out;
916 pcpu = pcpu_devices + cpu;
890 rc = 0; 917 rc = 0;
891 switch (val) { 918 switch (val) {
892 case 0: 919 case 0:
893 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { 920 if (pcpu->state != CPU_STATE_CONFIGURED)
894 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); 921 break;
895 if (!rc) { 922 rc = sclp_cpu_deconfigure(pcpu->address);
896 smp_cpu_state[cpu] = CPU_STATE_STANDBY; 923 if (rc)
897 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 924 break;
898 topology_expect_change(); 925 pcpu->state = CPU_STATE_STANDBY;
899 } 926 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
900 } 927 topology_expect_change();
901 break; 928 break;
902 case 1: 929 case 1:
903 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { 930 if (pcpu->state != CPU_STATE_STANDBY)
904 rc = sclp_cpu_configure(__cpu_logical_map[cpu]); 931 break;
905 if (!rc) { 932 rc = sclp_cpu_configure(pcpu->address);
906 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; 933 if (rc)
907 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 934 break;
908 topology_expect_change(); 935 pcpu->state = CPU_STATE_CONFIGURED;
909 } 936 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
910 } 937 topology_expect_change();
911 break; 938 break;
912 default: 939 default:
913 break; 940 break;
@@ -923,7 +950,7 @@ static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
923static ssize_t show_cpu_address(struct device *dev, 950static ssize_t show_cpu_address(struct device *dev,
924 struct device_attribute *attr, char *buf) 951 struct device_attribute *attr, char *buf)
925{ 952{
926 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); 953 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
927} 954}
928static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 955static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
929 956
@@ -955,22 +982,16 @@ static DEVICE_ATTR(capability, 0444, show_capability, NULL);
955static ssize_t show_idle_count(struct device *dev, 982static ssize_t show_idle_count(struct device *dev,
956 struct device_attribute *attr, char *buf) 983 struct device_attribute *attr, char *buf)
957{ 984{
958 struct s390_idle_data *idle; 985 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
959 unsigned long long idle_count; 986 unsigned long long idle_count;
960 unsigned int sequence; 987 unsigned int sequence;
961 988
962 idle = &per_cpu(s390_idle, dev->id); 989 do {
963repeat: 990 sequence = ACCESS_ONCE(idle->sequence);
964 sequence = idle->sequence; 991 idle_count = ACCESS_ONCE(idle->idle_count);
965 smp_rmb(); 992 if (ACCESS_ONCE(idle->idle_enter))
966 if (sequence & 1) 993 idle_count++;
967 goto repeat; 994 } while ((sequence & 1) || (idle->sequence != sequence));
968 idle_count = idle->idle_count;
969 if (idle->idle_enter)
970 idle_count++;
971 smp_rmb();
972 if (idle->sequence != sequence)
973 goto repeat;
974 return sprintf(buf, "%llu\n", idle_count); 995 return sprintf(buf, "%llu\n", idle_count);
975} 996}
976static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); 997static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -978,24 +999,18 @@ static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
978static ssize_t show_idle_time(struct device *dev, 999static ssize_t show_idle_time(struct device *dev,
979 struct device_attribute *attr, char *buf) 1000 struct device_attribute *attr, char *buf)
980{ 1001{
981 struct s390_idle_data *idle; 1002 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
982 unsigned long long now, idle_time, idle_enter; 1003 unsigned long long now, idle_time, idle_enter, idle_exit;
983 unsigned int sequence; 1004 unsigned int sequence;
984 1005
985 idle = &per_cpu(s390_idle, dev->id); 1006 do {
986 now = get_clock(); 1007 now = get_clock();
987repeat: 1008 sequence = ACCESS_ONCE(idle->sequence);
988 sequence = idle->sequence; 1009 idle_time = ACCESS_ONCE(idle->idle_time);
989 smp_rmb(); 1010 idle_enter = ACCESS_ONCE(idle->idle_enter);
990 if (sequence & 1) 1011 idle_exit = ACCESS_ONCE(idle->idle_exit);
991 goto repeat; 1012 } while ((sequence & 1) || (idle->sequence != sequence));
992 idle_time = idle->idle_time; 1013 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
993 idle_enter = idle->idle_enter;
994 if (idle_enter != 0ULL && idle_enter < now)
995 idle_time += now - idle_enter;
996 smp_rmb();
997 if (idle->sequence != sequence)
998 goto repeat;
999 return sprintf(buf, "%llu\n", idle_time >> 12); 1014 return sprintf(buf, "%llu\n", idle_time >> 12);
1000} 1015}
1001static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); 1016static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -1015,7 +1030,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
1015 unsigned long action, void *hcpu) 1030 unsigned long action, void *hcpu)
1016{ 1031{
1017 unsigned int cpu = (unsigned int)(long)hcpu; 1032 unsigned int cpu = (unsigned int)(long)hcpu;
1018 struct cpu *c = &per_cpu(cpu_devices, cpu); 1033 struct cpu *c = &pcpu_devices[cpu].cpu;
1019 struct device *s = &c->dev; 1034 struct device *s = &c->dev;
1020 struct s390_idle_data *idle; 1035 struct s390_idle_data *idle;
1021 int err = 0; 1036 int err = 0;
@@ -1041,7 +1056,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
1041 1056
1042static int __devinit smp_add_present_cpu(int cpu) 1057static int __devinit smp_add_present_cpu(int cpu)
1043{ 1058{
1044 struct cpu *c = &per_cpu(cpu_devices, cpu); 1059 struct cpu *c = &pcpu_devices[cpu].cpu;
1045 struct device *s = &c->dev; 1060 struct device *s = &c->dev;
1046 int rc; 1061 int rc;
1047 1062
@@ -1079,29 +1094,21 @@ out:
1079 1094
1080int __ref smp_rescan_cpus(void) 1095int __ref smp_rescan_cpus(void)
1081{ 1096{
1082 cpumask_t newcpus; 1097 struct sclp_cpu_info *info;
1083 int cpu; 1098 int nr;
1084 int rc;
1085 1099
1100 info = smp_get_cpu_info();
1101 if (!info)
1102 return -ENOMEM;
1086 get_online_cpus(); 1103 get_online_cpus();
1087 mutex_lock(&smp_cpu_state_mutex); 1104 mutex_lock(&smp_cpu_state_mutex);
1088 cpumask_copy(&newcpus, cpu_present_mask); 1105 nr = __smp_rescan_cpus(info, 1);
1089 rc = __smp_rescan_cpus();
1090 if (rc)
1091 goto out;
1092 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1093 for_each_cpu(cpu, &newcpus) {
1094 rc = smp_add_present_cpu(cpu);
1095 if (rc)
1096 set_cpu_present(cpu, false);
1097 }
1098 rc = 0;
1099out:
1100 mutex_unlock(&smp_cpu_state_mutex); 1106 mutex_unlock(&smp_cpu_state_mutex);
1101 put_online_cpus(); 1107 put_online_cpus();
1102 if (!cpumask_empty(&newcpus)) 1108 kfree(info);
1109 if (nr)
1103 topology_schedule_update(); 1110 topology_schedule_update();
1104 return rc; 1111 return 0;
1105} 1112}
1106 1113
1107static ssize_t __ref rescan_store(struct device *dev, 1114static ssize_t __ref rescan_store(struct device *dev,
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
deleted file mode 100644
index bfe070bc7659..000000000000
--- a/arch/s390/kernel/switch_cpu.S
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * 31-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <linux/linkage.h>
9#include <asm/asm-offsets.h>
10#include <asm/ptrace.h>
11
12# smp_switch_to_cpu switches to destination cpu and executes the passed function
13# Parameter: %r2 - function to call
14# %r3 - function parameter
15# %r4 - stack poiner
16# %r5 - current cpu
17# %r6 - destination cpu
18
19 .section .text
20ENTRY(smp_switch_to_cpu)
21 stm %r6,%r15,__SF_GPRS(%r15)
22 lr %r1,%r15
23 ahi %r15,-STACK_FRAME_OVERHEAD
24 st %r1,__SF_BACKCHAIN(%r15)
25 basr %r13,0
260: la %r1,.gprregs_addr-0b(%r13)
27 l %r1,0(%r1)
28 stm %r0,%r15,0(%r1)
291: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
30 brc 2,1b /* busy, try again */
312: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
32 brc 2,2b /* busy, try again */
333: j 3b
34
35ENTRY(smp_restart_cpu)
36 basr %r13,0
370: la %r1,.gprregs_addr-0b(%r13)
38 l %r1,0(%r1)
39 lm %r0,%r15,0(%r1)
401: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
41 brc 10,1b /* busy, accepted (status 0), running */
42 tmll %r0,0x40 /* Test if calling CPU is stopped */
43 jz 1b
44 ltr %r4,%r4 /* New stack ? */
45 jz 1f
46 lr %r15,%r4
471: lr %r14,%r2 /* r14: Function to call */
48 lr %r2,%r3 /* r2 : Parameter for function*/
49 basr %r14,%r14 /* Call function */
50
51.gprregs_addr:
52 .long .gprregs
53
54 .section .data,"aw",@progbits
55.gprregs:
56 .rept 16
57 .long 0
58 .endr
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
deleted file mode 100644
index fcc42d799e41..000000000000
--- a/arch/s390/kernel/switch_cpu64.S
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * 64-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <linux/linkage.h>
9#include <asm/asm-offsets.h>
10#include <asm/ptrace.h>
11
12# smp_switch_to_cpu switches to destination cpu and executes the passed function
13# Parameter: %r2 - function to call
14# %r3 - function parameter
15# %r4 - stack poiner
16# %r5 - current cpu
17# %r6 - destination cpu
18
19 .section .text
20ENTRY(smp_switch_to_cpu)
21 stmg %r6,%r15,__SF_GPRS(%r15)
22 lgr %r1,%r15
23 aghi %r15,-STACK_FRAME_OVERHEAD
24 stg %r1,__SF_BACKCHAIN(%r15)
25 larl %r1,.gprregs
26 stmg %r0,%r15,0(%r1)
271: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
28 brc 2,1b /* busy, try again */
292: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
30 brc 2,2b /* busy, try again */
313: j 3b
32
33ENTRY(smp_restart_cpu)
34 larl %r1,.gprregs
35 lmg %r0,%r15,0(%r1)
361: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
37 brc 10,1b /* busy, accepted (status 0), running */
38 tmll %r0,0x40 /* Test if calling CPU is stopped */
39 jz 1b
40 ltgr %r4,%r4 /* New stack ? */
41 jz 1f
42 lgr %r15,%r4
431: lgr %r14,%r2 /* r14: Function to call */
44 lgr %r2,%r3 /* r2 : Parameter for function*/
45 basr %r14,%r14 /* Call function */
46
47 .section .data,"aw",@progbits
48.gprregs:
49 .rept 16
50 .quad 0
51 .endr
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index acb78cdee896..dd70ef046058 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -42,7 +42,7 @@ ENTRY(swsusp_arch_suspend)
42 lghi %r1,0x1000 42 lghi %r1,0x1000
43 43
44 /* Save CPU address */ 44 /* Save CPU address */
45 stap __LC_CPU_ADDRESS(%r0) 45 stap __LC_EXT_CPU_ADDR(%r0)
46 46
47 /* Store registers */ 47 /* Store registers */
48 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ 48 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
@@ -173,15 +173,15 @@ pgm_check_entry:
173 larl %r1,.Lresume_cpu /* Resume CPU address: r2 */ 173 larl %r1,.Lresume_cpu /* Resume CPU address: r2 */
174 stap 0(%r1) 174 stap 0(%r1)
175 llgh %r2,0(%r1) 175 llgh %r2,0(%r1)
176 llgh %r1,__LC_CPU_ADDRESS(%r0) /* Suspend CPU address: r1 */ 176 llgh %r1,__LC_EXT_CPU_ADDR(%r0) /* Suspend CPU address: r1 */
177 cgr %r1,%r2 177 cgr %r1,%r2
178 je restore_registers /* r1 = r2 -> nothing to do */ 178 je restore_registers /* r1 = r2 -> nothing to do */
179 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ 179 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
180 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 180 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
1813: 1813:
182 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET 182 sigp %r9,%r1,11 /* sigp initial cpu reset */
183 brc 8,4f /* accepted */ 183 brc 8,4f /* accepted */
184 brc 2,3b /* busy, try again */ 184 brc 2,3b /* busy, try again */
185 185
186 /* Suspend CPU not available -> panic */ 186 /* Suspend CPU not available -> panic */
187 larl %r15,init_thread_union 187 larl %r15,init_thread_union
@@ -196,10 +196,10 @@ pgm_check_entry:
196 lpsw 0(%r3) 196 lpsw 0(%r3)
1974: 1974:
198 /* Switch to suspend CPU */ 198 /* Switch to suspend CPU */
199 sigp %r9,%r1,__SIGP_RESTART /* start suspend CPU */ 199 sigp %r9,%r1,6 /* sigp restart to suspend CPU */
200 brc 2,4b /* busy, try again */ 200 brc 2,4b /* busy, try again */
2015: 2015:
202 sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ 202 sigp %r9,%r2,5 /* sigp stop to current resume CPU */
203 brc 2,5b /* busy, try again */ 203 brc 2,5b /* busy, try again */
2046: j 6b 2046: j 6b
205 205
@@ -207,7 +207,7 @@ restart_suspend:
207 larl %r1,.Lresume_cpu 207 larl %r1,.Lresume_cpu
208 llgh %r2,0(%r1) 208 llgh %r2,0(%r1)
2097: 2097:
210 sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ 210 sigp %r9,%r2,1 /* sigp sense, wait for resume CPU */
211 brc 8,7b /* accepted, status 0, still running */ 211 brc 8,7b /* accepted, status 0, still running */
212 brc 2,7b /* busy, try again */ 212 brc 2,7b /* busy, try again */
213 tmll %r9,0x40 /* Test if resume CPU is stopped */ 213 tmll %r9,0x40 /* Test if resume CPU is stopped */
@@ -257,6 +257,9 @@ restore_registers:
257 lghi %r2,0 257 lghi %r2,0
258 brasl %r14,arch_set_page_states 258 brasl %r14,arch_set_page_states
259 259
260 /* Log potential guest relocation */
261 brasl %r14,lgr_info_log
262
260 /* Reinitialize the channel subsystem */ 263 /* Reinitialize the channel subsystem */
261 brasl %r14,channel_subsystem_reinit 264 brasl %r14,channel_subsystem_reinit
262 265
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 14da278febbf..d4e1cb1dbcd1 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -165,7 +165,7 @@ void init_cpu_timer(void)
165 __ctl_set_bit(0, 4); 165 __ctl_set_bit(0, 4);
166} 166}
167 167
168static void clock_comparator_interrupt(unsigned int ext_int_code, 168static void clock_comparator_interrupt(struct ext_code ext_code,
169 unsigned int param32, 169 unsigned int param32,
170 unsigned long param64) 170 unsigned long param64)
171{ 171{
@@ -177,7 +177,7 @@ static void clock_comparator_interrupt(unsigned int ext_int_code,
177static void etr_timing_alert(struct etr_irq_parm *); 177static void etr_timing_alert(struct etr_irq_parm *);
178static void stp_timing_alert(struct stp_irq_parm *); 178static void stp_timing_alert(struct stp_irq_parm *);
179 179
180static void timing_alert_interrupt(unsigned int ext_int_code, 180static void timing_alert_interrupt(struct ext_code ext_code,
181 unsigned int param32, unsigned long param64) 181 unsigned int param32, unsigned long param64)
182{ 182{
183 kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++; 183 kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 7370a41948ca..4f8dc942257c 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -79,12 +79,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
79 cpu < TOPOLOGY_CPU_BITS; 79 cpu < TOPOLOGY_CPU_BITS;
80 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1)) 80 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
81 { 81 {
82 unsigned int rcpu, lcpu; 82 unsigned int rcpu;
83 int lcpu;
83 84
84 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; 85 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
85 for_each_present_cpu(lcpu) { 86 lcpu = smp_find_processor_id(rcpu);
86 if (cpu_logical_map(lcpu) != rcpu) 87 if (lcpu >= 0) {
87 continue;
88 cpumask_set_cpu(lcpu, &book->mask); 88 cpumask_set_cpu(lcpu, &book->mask);
89 cpu_book_id[lcpu] = book->id; 89 cpu_book_id[lcpu] = book->id;
90 cpumask_set_cpu(lcpu, &core->mask); 90 cpumask_set_cpu(lcpu, &core->mask);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 5ce3750b181f..cd6ebe12c481 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -41,6 +41,7 @@
41#include <asm/cpcmd.h> 41#include <asm/cpcmd.h>
42#include <asm/lowcore.h> 42#include <asm/lowcore.h>
43#include <asm/debug.h> 43#include <asm/debug.h>
44#include <asm/ipl.h>
44#include "entry.h" 45#include "entry.h"
45 46
46void (*pgm_check_table[128])(struct pt_regs *regs); 47void (*pgm_check_table[128])(struct pt_regs *regs);
@@ -144,8 +145,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
144 for (i = 0; i < kstack_depth_to_print; i++) { 145 for (i = 0; i < kstack_depth_to_print; i++) {
145 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 146 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
146 break; 147 break;
147 if (i && ((i * sizeof (long) % 32) == 0)) 148 if ((i * sizeof(long) % 32) == 0)
148 printk("\n "); 149 printk("%s ", i == 0 ? "" : "\n");
149 printk(LONG, *stack++); 150 printk(LONG, *stack++);
150 } 151 }
151 printk("\n"); 152 printk("\n");
@@ -239,6 +240,7 @@ void die(struct pt_regs *regs, const char *str)
239 static int die_counter; 240 static int die_counter;
240 241
241 oops_enter(); 242 oops_enter();
243 lgr_info_log();
242 debug_stop_all(); 244 debug_stop_all();
243 console_verbose(); 245 console_verbose();
244 spin_lock_irq(&die_lock); 246 spin_lock_irq(&die_lock);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index d73630b4fe1d..e704a9965f90 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -89,18 +89,11 @@ static void vdso_init_data(struct vdso_data *vd)
89 89
90#ifdef CONFIG_64BIT 90#ifdef CONFIG_64BIT
91/* 91/*
92 * Setup per cpu vdso data page.
93 */
94static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
95{
96}
97
98/*
99 * Allocate/free per cpu vdso data. 92 * Allocate/free per cpu vdso data.
100 */ 93 */
101#define SEGMENT_ORDER 2 94#define SEGMENT_ORDER 2
102 95
103int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) 96int vdso_alloc_per_cpu(struct _lowcore *lowcore)
104{ 97{
105 unsigned long segment_table, page_table, page_frame; 98 unsigned long segment_table, page_table, page_frame;
106 u32 *psal, *aste; 99 u32 *psal, *aste;
@@ -139,7 +132,6 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
139 aste[4] = (u32)(addr_t) psal; 132 aste[4] = (u32)(addr_t) psal;
140 lowcore->vdso_per_cpu_data = page_frame; 133 lowcore->vdso_per_cpu_data = page_frame;
141 134
142 vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
143 return 0; 135 return 0;
144 136
145out: 137out:
@@ -149,7 +141,7 @@ out:
149 return -ENOMEM; 141 return -ENOMEM;
150} 142}
151 143
152void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) 144void vdso_free_per_cpu(struct _lowcore *lowcore)
153{ 145{
154 unsigned long segment_table, page_table, page_frame; 146 unsigned long segment_table, page_table, page_frame;
155 u32 *psal, *aste; 147 u32 *psal, *aste;
@@ -168,19 +160,15 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
168 free_pages(segment_table, SEGMENT_ORDER); 160 free_pages(segment_table, SEGMENT_ORDER);
169} 161}
170 162
171static void __vdso_init_cr5(void *dummy) 163static void vdso_init_cr5(void)
172{ 164{
173 unsigned long cr5; 165 unsigned long cr5;
174 166
167 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
168 return;
175 cr5 = offsetof(struct _lowcore, paste); 169 cr5 = offsetof(struct _lowcore, paste);
176 __ctl_load(cr5, 5, 5); 170 __ctl_load(cr5, 5, 5);
177} 171}
178
179static void vdso_init_cr5(void)
180{
181 if (user_mode != HOME_SPACE_MODE && vdso_enabled)
182 on_each_cpu(__vdso_init_cr5, NULL, 1);
183}
184#endif /* CONFIG_64BIT */ 172#endif /* CONFIG_64BIT */
185 173
186/* 174/*
@@ -322,10 +310,8 @@ static int __init vdso_init(void)
322 } 310 }
323 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 311 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
324 vdso64_pagelist[vdso64_pages] = NULL; 312 vdso64_pagelist[vdso64_pages] = NULL;
325#ifndef CONFIG_SMP 313 if (vdso_alloc_per_cpu(&S390_lowcore))
326 if (vdso_alloc_per_cpu(0, &S390_lowcore))
327 BUG(); 314 BUG();
328#endif
329 vdso_init_cr5(); 315 vdso_init_cr5();
330#endif /* CONFIG_64BIT */ 316#endif /* CONFIG_64BIT */
331 317
@@ -335,7 +321,7 @@ static int __init vdso_init(void)
335 321
336 return 0; 322 return 0;
337} 323}
338arch_initcall(vdso_init); 324early_initcall(vdso_init);
339 325
340int in_gate_area_no_mm(unsigned long addr) 326int in_gate_area_no_mm(unsigned long addr)
341{ 327{
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index bb48977f5469..39ebff506946 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -26,6 +26,7 @@
26#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
27#include <asm/cputime.h> 27#include <asm/cputime.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include "entry.h"
29 30
30static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 31static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
31 32
@@ -123,153 +124,53 @@ void account_system_vtime(struct task_struct *tsk)
123} 124}
124EXPORT_SYMBOL_GPL(account_system_vtime); 125EXPORT_SYMBOL_GPL(account_system_vtime);
125 126
126void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer) 127void __kprobes vtime_stop_cpu(void)
127{ 128{
128 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 129 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
129 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 130 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
130 __u64 idle_time, expires; 131 unsigned long long idle_time;
132 unsigned long psw_mask;
131 133
132 if (idle->idle_enter == 0ULL) 134 trace_hardirqs_on();
133 return; 135 /* Don't trace preempt off for idle. */
136 stop_critical_timings();
134 137
135 /* Account time spent with enabled wait psw loaded as idle time. */ 138 /* Wait for external, I/O or machine check interrupt. */
136 idle_time = int_clock - idle->idle_enter; 139 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
137 account_idle_time(idle_time); 140 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
138 S390_lowcore.steal_timer += 141 idle->nohz_delay = 0;
139 idle->idle_enter - S390_lowcore.last_update_clock;
140 S390_lowcore.last_update_clock = int_clock;
141
142 /* Account system time spent going idle. */
143 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
144 S390_lowcore.last_update_timer = enter_timer;
145
146 /* Restart vtime CPU timer */
147 if (vq->do_spt) {
148 /* Program old expire value but first save progress. */
149 expires = vq->idle - enter_timer;
150 expires += get_vtimer();
151 set_vtimer(expires);
152 } else {
153 /* Don't account the CPU timer delta while the cpu was idle. */
154 vq->elapsed -= vq->idle - enter_timer;
155 }
156 142
143 /* Call the assembler magic in entry.S */
144 psw_idle(idle, vq, psw_mask, !list_empty(&vq->list));
145
146 /* Reenable preemption tracer. */
147 start_critical_timings();
148
149 /* Account time spent with enabled wait psw loaded as idle time. */
157 idle->sequence++; 150 idle->sequence++;
158 smp_wmb(); 151 smp_wmb();
152 idle_time = idle->idle_exit - idle->idle_enter;
159 idle->idle_time += idle_time; 153 idle->idle_time += idle_time;
160 idle->idle_enter = 0ULL; 154 idle->idle_enter = idle->idle_exit = 0ULL;
161 idle->idle_count++; 155 idle->idle_count++;
156 account_idle_time(idle_time);
162 smp_wmb(); 157 smp_wmb();
163 idle->sequence++; 158 idle->sequence++;
164} 159}
165 160
166void __kprobes vtime_stop_cpu(void)
167{
168 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
169 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
170 psw_t psw;
171
172 /* Wait for external, I/O or machine check interrupt. */
173 psw.mask = psw_kernel_bits | PSW_MASK_WAIT |
174 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
175
176 idle->nohz_delay = 0;
177
178 /* Check if the CPU timer needs to be reprogrammed. */
179 if (vq->do_spt) {
180 __u64 vmax = VTIMER_MAX_SLICE;
181 /*
182 * The inline assembly is equivalent to
183 * vq->idle = get_cpu_timer();
184 * set_cpu_timer(VTIMER_MAX_SLICE);
185 * idle->idle_enter = get_clock();
186 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
187 * PSW_MASK_DAT | PSW_MASK_IO |
188 * PSW_MASK_EXT | PSW_MASK_MCHECK);
189 * The difference is that the inline assembly makes sure that
190 * the last three instruction are stpt, stck and lpsw in that
191 * order. This is done to increase the precision.
192 */
193 asm volatile(
194#ifndef CONFIG_64BIT
195 " basr 1,0\n"
196 "0: ahi 1,1f-0b\n"
197 " st 1,4(%2)\n"
198#else /* CONFIG_64BIT */
199 " larl 1,1f\n"
200 " stg 1,8(%2)\n"
201#endif /* CONFIG_64BIT */
202 " stpt 0(%4)\n"
203 " spt 0(%5)\n"
204 " stck 0(%3)\n"
205#ifndef CONFIG_64BIT
206 " lpsw 0(%2)\n"
207#else /* CONFIG_64BIT */
208 " lpswe 0(%2)\n"
209#endif /* CONFIG_64BIT */
210 "1:"
211 : "=m" (idle->idle_enter), "=m" (vq->idle)
212 : "a" (&psw), "a" (&idle->idle_enter),
213 "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw)
214 : "memory", "cc", "1");
215 } else {
216 /*
217 * The inline assembly is equivalent to
218 * vq->idle = get_cpu_timer();
219 * idle->idle_enter = get_clock();
220 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
221 * PSW_MASK_DAT | PSW_MASK_IO |
222 * PSW_MASK_EXT | PSW_MASK_MCHECK);
223 * The difference is that the inline assembly makes sure that
224 * the last three instruction are stpt, stck and lpsw in that
225 * order. This is done to increase the precision.
226 */
227 asm volatile(
228#ifndef CONFIG_64BIT
229 " basr 1,0\n"
230 "0: ahi 1,1f-0b\n"
231 " st 1,4(%2)\n"
232#else /* CONFIG_64BIT */
233 " larl 1,1f\n"
234 " stg 1,8(%2)\n"
235#endif /* CONFIG_64BIT */
236 " stpt 0(%4)\n"
237 " stck 0(%3)\n"
238#ifndef CONFIG_64BIT
239 " lpsw 0(%2)\n"
240#else /* CONFIG_64BIT */
241 " lpswe 0(%2)\n"
242#endif /* CONFIG_64BIT */
243 "1:"
244 : "=m" (idle->idle_enter), "=m" (vq->idle)
245 : "a" (&psw), "a" (&idle->idle_enter),
246 "a" (&vq->idle), "m" (psw)
247 : "memory", "cc", "1");
248 }
249}
250
251cputime64_t s390_get_idle_time(int cpu) 161cputime64_t s390_get_idle_time(int cpu)
252{ 162{
253 struct s390_idle_data *idle; 163 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
254 unsigned long long now, idle_time, idle_enter; 164 unsigned long long now, idle_enter, idle_exit;
255 unsigned int sequence; 165 unsigned int sequence;
256 166
257 idle = &per_cpu(s390_idle, cpu); 167 do {
258 168 now = get_clock();
259 now = get_clock(); 169 sequence = ACCESS_ONCE(idle->sequence);
260repeat: 170 idle_enter = ACCESS_ONCE(idle->idle_enter);
261 sequence = idle->sequence; 171 idle_exit = ACCESS_ONCE(idle->idle_exit);
262 smp_rmb(); 172 } while ((sequence & 1) || (idle->sequence != sequence));
263 if (sequence & 1) 173 return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
264 goto repeat;
265 idle_time = 0;
266 idle_enter = idle->idle_enter;
267 if (idle_enter != 0ULL && idle_enter < now)
268 idle_time = now - idle_enter;
269 smp_rmb();
270 if (idle->sequence != sequence)
271 goto repeat;
272 return idle_time;
273} 174}
274 175
275/* 176/*
@@ -319,7 +220,7 @@ static void do_callbacks(struct list_head *cb_list)
319/* 220/*
320 * Handler for the virtual CPU timer. 221 * Handler for the virtual CPU timer.
321 */ 222 */
322static void do_cpu_timer_interrupt(unsigned int ext_int_code, 223static void do_cpu_timer_interrupt(struct ext_code ext_code,
323 unsigned int param32, unsigned long param64) 224 unsigned int param32, unsigned long param64)
324{ 225{
325 struct vtimer_queue *vq; 226 struct vtimer_queue *vq;
@@ -346,7 +247,6 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code,
346 } 247 }
347 spin_unlock(&vq->lock); 248 spin_unlock(&vq->lock);
348 249
349 vq->do_spt = list_empty(&cb_list);
350 do_callbacks(&cb_list); 250 do_callbacks(&cb_list);
351 251
352 /* next event is first in list */ 252 /* next event is first in list */
@@ -355,8 +255,7 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code,
355 if (!list_empty(&vq->list)) { 255 if (!list_empty(&vq->list)) {
356 event = list_first_entry(&vq->list, struct vtimer_list, entry); 256 event = list_first_entry(&vq->list, struct vtimer_list, entry);
357 next = event->expires; 257 next = event->expires;
358 } else 258 }
359 vq->do_spt = 0;
360 spin_unlock(&vq->lock); 259 spin_unlock(&vq->lock);
361 /* 260 /*
362 * To improve precision add the time spent by the 261 * To improve precision add the time spent by the
@@ -570,6 +469,9 @@ void init_cpu_vtimer(void)
570 469
571 /* enable cpu timer interrupts */ 470 /* enable cpu timer interrupts */
572 __ctl_set_bit(0,10); 471 __ctl_set_bit(0,10);
472
473 /* set initial cpu timer */
474 set_vtimer(0x7fffffffffffffffULL);
573} 475}
574 476
575static int __cpuinit s390_nohz_notify(struct notifier_block *self, 477static int __cpuinit s390_nohz_notify(struct notifier_block *self,
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 278ee009ce65..f0647ce6da21 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -134,7 +134,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
134 if (rc == -EFAULT) 134 if (rc == -EFAULT)
135 exception = 1; 135 exception = 1;
136 136
137 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code); 137 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
138 if (rc == -EFAULT) 138 if (rc == -EFAULT)
139 exception = 1; 139 exception = 1;
140 140
@@ -156,7 +156,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
156 if (rc == -EFAULT) 156 if (rc == -EFAULT)
157 exception = 1; 157 exception = 1;
158 158
159 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->extcall.code); 159 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
160 if (rc == -EFAULT) 160 if (rc == -EFAULT)
161 exception = 1; 161 exception = 1;
162 162
@@ -202,7 +202,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
202 if (rc == -EFAULT) 202 if (rc == -EFAULT)
203 exception = 1; 203 exception = 1;
204 204
205 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00); 205 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
206 if (rc == -EFAULT) 206 if (rc == -EFAULT)
207 exception = 1; 207 exception = 1;
208 208
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index db92f044024c..9f1f71e85778 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -13,6 +13,7 @@
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <asm/div64.h> 15#include <asm/div64.h>
16#include <asm/timer.h>
16 17
17void __delay(unsigned long loops) 18void __delay(unsigned long loops)
18{ 19{
@@ -28,36 +29,33 @@ void __delay(unsigned long loops)
28 29
29static void __udelay_disabled(unsigned long long usecs) 30static void __udelay_disabled(unsigned long long usecs)
30{ 31{
31 unsigned long mask, cr0, cr0_saved; 32 unsigned long cr0, cr6, new;
32 u64 clock_saved; 33 u64 clock_saved, end;
33 u64 end;
34 34
35 mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_WAIT |
36 PSW_MASK_EXT | PSW_MASK_MCHECK;
37 end = get_clock() + (usecs << 12); 35 end = get_clock() + (usecs << 12);
38 clock_saved = local_tick_disable(); 36 clock_saved = local_tick_disable();
39 __ctl_store(cr0_saved, 0, 0); 37 __ctl_store(cr0, 0, 0);
40 cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; 38 __ctl_store(cr6, 6, 6);
41 __ctl_load(cr0 , 0, 0); 39 new = (cr0 & 0xffff00e0) | 0x00000800;
40 __ctl_load(new , 0, 0);
41 new = 0;
42 __ctl_load(new, 6, 6);
42 lockdep_off(); 43 lockdep_off();
43 do { 44 do {
44 set_clock_comparator(end); 45 set_clock_comparator(end);
45 trace_hardirqs_on(); 46 vtime_stop_cpu();
46 __load_psw_mask(mask);
47 local_irq_disable(); 47 local_irq_disable();
48 } while (get_clock() < end); 48 } while (get_clock() < end);
49 lockdep_on(); 49 lockdep_on();
50 __ctl_load(cr0_saved, 0, 0); 50 __ctl_load(cr0, 0, 0);
51 __ctl_load(cr6, 6, 6);
51 local_tick_enable(clock_saved); 52 local_tick_enable(clock_saved);
52} 53}
53 54
54static void __udelay_enabled(unsigned long long usecs) 55static void __udelay_enabled(unsigned long long usecs)
55{ 56{
56 unsigned long mask; 57 u64 clock_saved, end;
57 u64 clock_saved;
58 u64 end;
59 58
60 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO;
61 end = get_clock() + (usecs << 12); 59 end = get_clock() + (usecs << 12);
62 do { 60 do {
63 clock_saved = 0; 61 clock_saved = 0;
@@ -65,8 +63,7 @@ static void __udelay_enabled(unsigned long long usecs)
65 clock_saved = local_tick_disable(); 63 clock_saved = local_tick_disable();
66 set_clock_comparator(end); 64 set_clock_comparator(end);
67 } 65 }
68 trace_hardirqs_on(); 66 vtime_stop_cpu();
69 __load_psw_mask(mask);
70 local_irq_disable(); 67 local_irq_disable();
71 if (clock_saved) 68 if (clock_saved)
72 local_tick_enable(clock_saved); 69 local_tick_enable(clock_saved);
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 91754ffb9203..093eb694d9c1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/smp.h>
13#include <asm/io.h> 14#include <asm/io.h>
14 15
15int spin_retry = 1000; 16int spin_retry = 1000;
@@ -24,21 +25,6 @@ static int __init spin_retry_setup(char *str)
24} 25}
25__setup("spin_retry=", spin_retry_setup); 26__setup("spin_retry=", spin_retry_setup);
26 27
27static inline void _raw_yield(void)
28{
29 if (MACHINE_HAS_DIAG44)
30 asm volatile("diag 0,0,0x44");
31}
32
33static inline void _raw_yield_cpu(int cpu)
34{
35 if (MACHINE_HAS_DIAG9C)
36 asm volatile("diag %0,0,0x9c"
37 : : "d" (cpu_logical_map(cpu)));
38 else
39 _raw_yield();
40}
41
42void arch_spin_lock_wait(arch_spinlock_t *lp) 28void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 29{
44 int count = spin_retry; 30 int count = spin_retry;
@@ -60,7 +46,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
60 } 46 }
61 owner = lp->owner_cpu; 47 owner = lp->owner_cpu;
62 if (owner) 48 if (owner)
63 _raw_yield_cpu(~owner); 49 smp_yield_cpu(~owner);
64 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 50 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
65 return; 51 return;
66 } 52 }
@@ -91,7 +77,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
91 } 77 }
92 owner = lp->owner_cpu; 78 owner = lp->owner_cpu;
93 if (owner) 79 if (owner)
94 _raw_yield_cpu(~owner); 80 smp_yield_cpu(~owner);
95 local_irq_disable(); 81 local_irq_disable();
96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 82 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
97 return; 83 return;
@@ -121,7 +107,7 @@ void arch_spin_relax(arch_spinlock_t *lock)
121 if (cpu != 0) { 107 if (cpu != 0) {
122 if (MACHINE_IS_VM || MACHINE_IS_KVM || 108 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
123 !smp_vcpu_scheduled(~cpu)) 109 !smp_vcpu_scheduled(~cpu))
124 _raw_yield_cpu(~cpu); 110 smp_yield_cpu(~cpu);
125 } 111 }
126} 112}
127EXPORT_SYMBOL(arch_spin_relax); 113EXPORT_SYMBOL(arch_spin_relax);
@@ -133,7 +119,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
133 119
134 while (1) { 120 while (1) {
135 if (count-- <= 0) { 121 if (count-- <= 0) {
136 _raw_yield(); 122 smp_yield();
137 count = spin_retry; 123 count = spin_retry;
138 } 124 }
139 if (!arch_read_can_lock(rw)) 125 if (!arch_read_can_lock(rw))
@@ -153,7 +139,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
153 local_irq_restore(flags); 139 local_irq_restore(flags);
154 while (1) { 140 while (1) {
155 if (count-- <= 0) { 141 if (count-- <= 0) {
156 _raw_yield(); 142 smp_yield();
157 count = spin_retry; 143 count = spin_retry;
158 } 144 }
159 if (!arch_read_can_lock(rw)) 145 if (!arch_read_can_lock(rw))
@@ -188,7 +174,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
188 174
189 while (1) { 175 while (1) {
190 if (count-- <= 0) { 176 if (count-- <= 0) {
191 _raw_yield(); 177 smp_yield();
192 count = spin_retry; 178 count = spin_retry;
193 } 179 }
194 if (!arch_write_can_lock(rw)) 180 if (!arch_write_can_lock(rw))
@@ -206,7 +192,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
206 local_irq_restore(flags); 192 local_irq_restore(flags);
207 while (1) { 193 while (1) {
208 if (count-- <= 0) { 194 if (count-- <= 0) {
209 _raw_yield(); 195 smp_yield();
210 count = spin_retry; 196 count = spin_retry;
211 } 197 }
212 if (!arch_write_can_lock(rw)) 198 if (!arch_write_can_lock(rw))
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e8fcd928dc78..b17c42df61c9 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -532,7 +532,7 @@ void pfault_fini(void)
532static DEFINE_SPINLOCK(pfault_lock); 532static DEFINE_SPINLOCK(pfault_lock);
533static LIST_HEAD(pfault_list); 533static LIST_HEAD(pfault_list);
534 534
535static void pfault_interrupt(unsigned int ext_int_code, 535static void pfault_interrupt(struct ext_code ext_code,
536 unsigned int param32, unsigned long param64) 536 unsigned int param32, unsigned long param64)
537{ 537{
538 struct task_struct *tsk; 538 struct task_struct *tsk;
@@ -545,7 +545,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
545 * in the 'cpu address' field associated with the 545 * in the 'cpu address' field associated with the
546 * external interrupt. 546 * external interrupt.
547 */ 547 */
548 subcode = ext_int_code >> 16; 548 subcode = ext_code.subcode;
549 if ((subcode & 0xff00) != __SUBCODE_MASK) 549 if ((subcode & 0xff00) != __SUBCODE_MASK)
550 return; 550 return;
551 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; 551 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 9daee91e6c3f..12bea05a0fc1 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -233,8 +233,8 @@ static inline unsigned long *trailer_entry_ptr(unsigned long v)
233} 233}
234 234
235/* prototypes for external interrupt handler and worker */ 235/* prototypes for external interrupt handler and worker */
236static void hws_ext_handler(unsigned int ext_int_code, 236static void hws_ext_handler(struct ext_code ext_code,
237 unsigned int param32, unsigned long param64); 237 unsigned int param32, unsigned long param64);
238 238
239static void worker(struct work_struct *work); 239static void worker(struct work_struct *work);
240 240
@@ -673,7 +673,7 @@ int hwsampler_activate(unsigned int cpu)
673 return rc; 673 return rc;
674} 674}
675 675
676static void hws_ext_handler(unsigned int ext_int_code, 676static void hws_ext_handler(struct ext_code ext_code,
677 unsigned int param32, unsigned long param64) 677 unsigned int param32, unsigned long param64)
678{ 678{
679 struct hws_cpu_buffer *cb; 679 struct hws_cpu_buffer *cb;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e707979767fb..ab9abb46d01a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -64,7 +64,6 @@ config CRYPTO_DEV_GEODE
64config ZCRYPT 64config ZCRYPT
65 tristate "Support for PCI-attached cryptographic adapters" 65 tristate "Support for PCI-attached cryptographic adapters"
66 depends on S390 66 depends on S390
67 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
68 select HW_RANDOM 67 select HW_RANDOM
69 help 68 help
70 Select this option if you want to use a PCI-attached cryptographic 69 Select this option if you want to use a PCI-attached cryptographic
@@ -77,14 +76,6 @@ config ZCRYPT
77 + Crypto Express3 Coprocessor (CEX3C) 76 + Crypto Express3 Coprocessor (CEX3C)
78 + Crypto Express3 Accelerator (CEX3A) 77 + Crypto Express3 Accelerator (CEX3A)
79 78
80config ZCRYPT_MONOLITHIC
81 bool "Monolithic zcrypt module"
82 depends on ZCRYPT
83 help
84 Select this option if you want to have a single module z90crypt,
85 that contains all parts of the crypto device driver (ap bus,
86 request router and all the card drivers).
87
88config CRYPTO_SHA1_S390 79config CRYPTO_SHA1_S390
89 tristate "SHA1 digest algorithm" 80 tristate "SHA1 digest algorithm"
90 depends on S390 81 depends on S390
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 110137e7ec81..f3509120a507 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -640,6 +640,10 @@ void dasd_enable_device(struct dasd_device *device)
640 dasd_set_target_state(device, DASD_STATE_NEW); 640 dasd_set_target_state(device, DASD_STATE_NEW);
641 /* Now wait for the devices to come up. */ 641 /* Now wait for the devices to come up. */
642 wait_event(dasd_init_waitq, _wait_for_device(device)); 642 wait_event(dasd_init_waitq, _wait_for_device(device));
643
644 dasd_reload_device(device);
645 if (device->discipline->kick_validate)
646 device->discipline->kick_validate(device);
643} 647}
644 648
645/* 649/*
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 46784b83c5c4..0cea7e98f464 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -229,7 +229,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
229} 229}
230 230
231/* Handle external interruption. */ 231/* Handle external interruption. */
232static void dasd_ext_handler(unsigned int ext_int_code, 232static void dasd_ext_handler(struct ext_code ext_code,
233 unsigned int param32, unsigned long param64) 233 unsigned int param32, unsigned long param64)
234{ 234{
235 struct dasd_ccw_req *cqr, *next; 235 struct dasd_ccw_req *cqr, *next;
@@ -239,7 +239,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
239 addr_t ip; 239 addr_t ip;
240 int rc; 240 int rc;
241 241
242 switch (ext_int_code >> 24) { 242 switch (ext_code.subcode >> 8) {
243 case DASD_DIAG_CODE_31BIT: 243 case DASD_DIAG_CODE_31BIT:
244 ip = (addr_t) param32; 244 ip = (addr_t) param32;
245 break; 245 break;
@@ -280,7 +280,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
280 cqr->stopclk = get_clock(); 280 cqr->stopclk = get_clock();
281 281
282 expires = 0; 282 expires = 0;
283 if ((ext_int_code & 0xff0000) == 0) { 283 if ((ext_code.subcode & 0xff) == 0) {
284 cqr->status = DASD_CQR_SUCCESS; 284 cqr->status = DASD_CQR_SUCCESS;
285 /* Start first request on queue if possible -> fast_io. */ 285 /* Start first request on queue if possible -> fast_io. */
286 if (!list_empty(&device->ccw_queue)) { 286 if (!list_empty(&device->ccw_queue)) {
@@ -296,7 +296,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
296 cqr->status = DASD_CQR_QUEUED; 296 cqr->status = DASD_CQR_QUEUED;
297 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " 297 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
298 "request %p was %d (%d retries left)", cqr, 298 "request %p was %d (%d retries left)", cqr,
299 (ext_int_code >> 16) & 0xff, cqr->retries); 299 ext_code.subcode & 0xff, cqr->retries);
300 dasd_diag_erp(device); 300 dasd_diag_erp(device);
301 } 301 }
302 302
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 2617b1ed4709..c21871a4e73d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1564,6 +1564,12 @@ static void dasd_eckd_do_validate_server(struct work_struct *work)
1564static void dasd_eckd_kick_validate_server(struct dasd_device *device) 1564static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1565{ 1565{
1566 dasd_get_device(device); 1566 dasd_get_device(device);
1567 /* exit if device not online or in offline processing */
1568 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1569 device->state < DASD_STATE_ONLINE) {
1570 dasd_put_device(device);
1571 return;
1572 }
1567 /* queue call to do_validate_server to the kernel event daemon. */ 1573 /* queue call to do_validate_server to the kernel event daemon. */
1568 schedule_work(&device->kick_validate); 1574 schedule_work(&device->kick_validate);
1569} 1575}
@@ -1993,6 +1999,7 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
1993static int dasd_eckd_online_to_ready(struct dasd_device *device) 1999static int dasd_eckd_online_to_ready(struct dasd_device *device)
1994{ 2000{
1995 cancel_work_sync(&device->reload_device); 2001 cancel_work_sync(&device->reload_device);
2002 cancel_work_sync(&device->kick_validate);
1996 return dasd_alias_remove_device(device); 2003 return dasd_alias_remove_device(device);
1997}; 2004};
1998 2005
@@ -2263,6 +2270,7 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2263 * and only if not suspended 2270 * and only if not suspended
2264 */ 2271 */
2265 if (!device->block && private->lcu && 2272 if (!device->block && private->lcu &&
2273 device->state == DASD_STATE_ONLINE &&
2266 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2274 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2267 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 2275 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
2268 /* 2276 /*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index eaa7e78186f9..30f29a0020a1 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -393,7 +393,7 @@ __sclp_find_req(u32 sccb)
393/* Handler for external interruption. Perform request post-processing. 393/* Handler for external interruption. Perform request post-processing.
394 * Prepare read event data request if necessary. Start processing of next 394 * Prepare read event data request if necessary. Start processing of next
395 * request on queue. */ 395 * request on queue. */
396static void sclp_interrupt_handler(unsigned int ext_int_code, 396static void sclp_interrupt_handler(struct ext_code ext_code,
397 unsigned int param32, unsigned long param64) 397 unsigned int param32, unsigned long param64)
398{ 398{
399 struct sclp_req *req; 399 struct sclp_req *req;
@@ -818,7 +818,7 @@ EXPORT_SYMBOL(sclp_reactivate);
818 818
819/* Handler for external interruption used during initialization. Modify 819/* Handler for external interruption used during initialization. Modify
820 * request state to done. */ 820 * request state to done. */
821static void sclp_check_handler(unsigned int ext_int_code, 821static void sclp_check_handler(struct ext_code ext_code,
822 unsigned int param32, unsigned long param64) 822 unsigned int param32, unsigned long param64)
823{ 823{
824 u32 finished_sccb; 824 u32 finished_sccb;
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 87fc0ac11e67..69df137310bc 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -15,7 +15,6 @@
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/atomic.h> 16#include <linux/atomic.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/sigp.h>
19#include <asm/smp.h> 18#include <asm/smp.h>
20 19
21#include "sclp.h" 20#include "sclp.h"
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index fa733ecd3d70..69e6c50d4cfb 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -8,6 +8,7 @@
8#define KMSG_COMPONENT "sclp_sdias" 8#define KMSG_COMPONENT "sclp_sdias"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 10
11#include <linux/completion.h>
11#include <linux/sched.h> 12#include <linux/sched.h>
12#include <asm/sclp.h> 13#include <asm/sclp.h>
13#include <asm/debug.h> 14#include <asm/debug.h>
@@ -62,15 +63,29 @@ struct sdias_sccb {
62} __attribute__((packed)); 63} __attribute__((packed));
63 64
64static struct sdias_sccb sccb __attribute__((aligned(4096))); 65static struct sdias_sccb sccb __attribute__((aligned(4096)));
66static struct sdias_evbuf sdias_evbuf;
65 67
66static int sclp_req_done; 68static DECLARE_COMPLETION(evbuf_accepted);
67static wait_queue_head_t sdias_wq; 69static DECLARE_COMPLETION(evbuf_done);
68static DEFINE_MUTEX(sdias_mutex); 70static DEFINE_MUTEX(sdias_mutex);
69 71
72/*
73 * Called by SCLP base when read event data has been completed (async mode only)
74 */
75static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
76{
77 memcpy(&sdias_evbuf, evbuf,
78 min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
79 complete(&evbuf_done);
80 TRACE("sclp_sdias_receiver_fn done\n");
81}
82
83/*
84 * Called by SCLP base when sdias event has been accepted
85 */
70static void sdias_callback(struct sclp_req *request, void *data) 86static void sdias_callback(struct sclp_req *request, void *data)
71{ 87{
72 sclp_req_done = 1; 88 complete(&evbuf_accepted);
73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
74 TRACE("callback done\n"); 89 TRACE("callback done\n");
75} 90}
76 91
@@ -80,7 +95,6 @@ static int sdias_sclp_send(struct sclp_req *req)
80 int rc; 95 int rc;
81 96
82 for (retries = SDIAS_RETRIES; retries; retries--) { 97 for (retries = SDIAS_RETRIES; retries; retries--) {
83 sclp_req_done = 0;
84 TRACE("add request\n"); 98 TRACE("add request\n");
85 rc = sclp_add_request(req); 99 rc = sclp_add_request(req);
86 if (rc) { 100 if (rc) {
@@ -91,16 +105,31 @@ static int sdias_sclp_send(struct sclp_req *req)
91 continue; 105 continue;
92 } 106 }
93 /* initiated, wait for completion of service call */ 107 /* initiated, wait for completion of service call */
94 wait_event(sdias_wq, (sclp_req_done == 1)); 108 wait_for_completion(&evbuf_accepted);
95 if (req->status == SCLP_REQ_FAILED) { 109 if (req->status == SCLP_REQ_FAILED) {
96 TRACE("sclp request failed\n"); 110 TRACE("sclp request failed\n");
97 rc = -EIO;
98 continue; 111 continue;
99 } 112 }
113 /* if not accepted, retry */
114 if (!(sccb.evbuf.hdr.flags & 0x80)) {
115 TRACE("sclp request failed: flags=%x\n",
116 sccb.evbuf.hdr.flags);
117 continue;
118 }
119 /*
120 * for the sync interface the response is in the initial sccb
121 */
122 if (!sclp_sdias_register.receiver_fn) {
123 memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
124 TRACE("sync request done\n");
125 return 0;
126 }
127 /* otherwise we wait for completion */
128 wait_for_completion(&evbuf_done);
100 TRACE("request done\n"); 129 TRACE("request done\n");
101 break; 130 return 0;
102 } 131 }
103 return rc; 132 return -EIO;
104} 133}
105 134
106/* 135/*
@@ -140,13 +169,12 @@ int sclp_sdias_blk_count(void)
140 goto out; 169 goto out;
141 } 170 }
142 171
143 switch (sccb.evbuf.event_status) { 172 switch (sdias_evbuf.event_status) {
144 case 0: 173 case 0:
145 rc = sccb.evbuf.blk_cnt; 174 rc = sdias_evbuf.blk_cnt;
146 break; 175 break;
147 default: 176 default:
148 pr_err("SCLP error: %x\n", 177 pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
149 sccb.evbuf.event_status);
150 rc = -EIO; 178 rc = -EIO;
151 goto out; 179 goto out;
152 } 180 }
@@ -211,18 +239,18 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
211 goto out; 239 goto out;
212 } 240 }
213 241
214 switch (sccb.evbuf.event_status) { 242 switch (sdias_evbuf.event_status) {
215 case EVSTATE_ALL_STORED: 243 case EVSTATE_ALL_STORED:
216 TRACE("all stored\n"); 244 TRACE("all stored\n");
217 case EVSTATE_PART_STORED: 245 case EVSTATE_PART_STORED:
218 TRACE("part stored: %i\n", sccb.evbuf.blk_cnt); 246 TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
219 break; 247 break;
220 case EVSTATE_NO_DATA: 248 case EVSTATE_NO_DATA:
221 TRACE("no data\n"); 249 TRACE("no data\n");
222 default: 250 default:
223 pr_err("Error from SCLP while copying hsa. " 251 pr_err("Error from SCLP while copying hsa. "
224 "Event status = %x\n", 252 "Event status = %x\n",
225 sccb.evbuf.event_status); 253 sdias_evbuf.event_status);
226 rc = -EIO; 254 rc = -EIO;
227 } 255 }
228out: 256out:
@@ -230,19 +258,50 @@ out:
230 return rc; 258 return rc;
231} 259}
232 260
233int __init sclp_sdias_init(void) 261static int __init sclp_sdias_register_check(void)
234{ 262{
235 int rc; 263 int rc;
236 264
265 rc = sclp_register(&sclp_sdias_register);
266 if (rc)
267 return rc;
268 if (sclp_sdias_blk_count() == 0) {
269 sclp_unregister(&sclp_sdias_register);
270 return -ENODEV;
271 }
272 return 0;
273}
274
275static int __init sclp_sdias_init_sync(void)
276{
277 TRACE("Try synchronous mode\n");
278 sclp_sdias_register.receive_mask = 0;
279 sclp_sdias_register.receiver_fn = NULL;
280 return sclp_sdias_register_check();
281}
282
283static int __init sclp_sdias_init_async(void)
284{
285 TRACE("Try asynchronous mode\n");
286 sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
287 sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
288 return sclp_sdias_register_check();
289}
290
291int __init sclp_sdias_init(void)
292{
237 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 293 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
238 return 0; 294 return 0;
239 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); 295 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
240 debug_register_view(sdias_dbf, &debug_sprintf_view); 296 debug_register_view(sdias_dbf, &debug_sprintf_view);
241 debug_set_level(sdias_dbf, 6); 297 debug_set_level(sdias_dbf, 6);
242 rc = sclp_register(&sclp_sdias_register); 298 if (sclp_sdias_init_sync() == 0)
243 if (rc) 299 goto out;
244 return rc; 300 if (sclp_sdias_init_async() == 0)
245 init_waitqueue_head(&sdias_wq); 301 goto out;
302 TRACE("init failed\n");
303 return -ENODEV;
304out:
246 TRACE("init done\n"); 305 TRACE("init done\n");
247 return 0; 306 return 0;
248} 307}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1b6d9247fdc7..3303d66b2794 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -21,7 +21,6 @@
21#include <asm/ipl.h> 21#include <asm/ipl.h>
22#include <asm/sclp.h> 22#include <asm/sclp.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/sigp.h>
25#include <asm/uaccess.h> 24#include <asm/uaccess.h>
26#include <asm/debug.h> 25#include <asm/debug.h>
27#include <asm/processor.h> 26#include <asm/processor.h>
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index dc67c397449e..a49c46c91983 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -601,8 +601,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
601 struct pt_regs *old_regs; 601 struct pt_regs *old_regs;
602 602
603 old_regs = set_irq_regs(regs); 603 old_regs = set_irq_regs(regs);
604 s390_idle_check(regs, S390_lowcore.int_clock,
605 S390_lowcore.async_enter_timer);
606 irq_enter(); 604 irq_enter();
607 __this_cpu_write(s390_idle.nohz_delay, 1); 605 __this_cpu_write(s390_idle.nohz_delay, 1);
608 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 606 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 770a740a393c..2a0dfcb0bc42 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -18,6 +18,7 @@
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/debug.h> 19#include <asm/debug.h>
20#include <asm/qdio.h> 20#include <asm/qdio.h>
21#include <asm/ipl.h>
21 22
22#include "cio.h" 23#include "cio.h"
23#include "css.h" 24#include "css.h"
@@ -1093,6 +1094,11 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
1093 q->nr, q->first_to_kick, count, irq_ptr->int_parm); 1094 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1094no_handler: 1095no_handler:
1095 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1096 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1097 /*
1098 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1099 * Therefore we call the LGR detection function here.
1100 */
1101 lgr_info_log();
1096} 1102}
1097 1103
1098static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 1104static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index f0a12d2eb780..af3c7f16ea88 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,16 +2,6 @@
2# S/390 crypto devices 2# S/390 crypto devices
3# 3#
4 4
5ifdef CONFIG_ZCRYPT_MONOLITHIC
6
7z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \
8 zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o
9obj-$(CONFIG_ZCRYPT) += z90crypt.o
10
11else
12
13ap-objs := ap_bus.o 5ap-objs := ap_bus.o
14obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o 6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
15obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o 7obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
16
17endif
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 96bbe9d12a79..12ae1817b172 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1862,7 +1862,5 @@ void ap_module_exit(void)
1862 } 1862 }
1863} 1863}
1864 1864
1865#ifndef CONFIG_ZCRYPT_MONOLITHIC
1866module_init(ap_module_init); 1865module_init(ap_module_init);
1867module_exit(ap_module_exit); 1866module_exit(ap_module_exit);
1868#endif
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 88ad33ed5d38..88523208d47d 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1220,7 +1220,5 @@ void zcrypt_api_exit(void)
1220 misc_deregister(&zcrypt_misc_device); 1220 misc_deregister(&zcrypt_misc_device);
1221} 1221}
1222 1222
1223#ifndef CONFIG_ZCRYPT_MONOLITHIC
1224module_init(zcrypt_api_init); 1223module_init(zcrypt_api_init);
1225module_exit(zcrypt_api_exit); 1224module_exit(zcrypt_api_exit);
1226#endif
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index da171b5f3996..084286728166 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -63,13 +63,11 @@ static struct ap_device_id zcrypt_cex2a_ids[] = {
63 { /* end of list */ }, 63 { /* end of list */ },
64}; 64};
65 65
66#ifndef CONFIG_ZCRYPT_MONOLITHIC
67MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); 66MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
68MODULE_AUTHOR("IBM Corporation"); 67MODULE_AUTHOR("IBM Corporation");
69MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " 68MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
70 "Copyright 2001, 2006 IBM Corporation"); 69 "Copyright 2001, 2006 IBM Corporation");
71MODULE_LICENSE("GPL"); 70MODULE_LICENSE("GPL");
72#endif
73 71
74static int zcrypt_cex2a_probe(struct ap_device *ap_dev); 72static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
75static void zcrypt_cex2a_remove(struct ap_device *ap_dev); 73static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
@@ -496,7 +494,5 @@ void __exit zcrypt_cex2a_exit(void)
496 ap_driver_unregister(&zcrypt_cex2a_driver); 494 ap_driver_unregister(&zcrypt_cex2a_driver);
497} 495}
498 496
499#ifndef CONFIG_ZCRYPT_MONOLITHIC
500module_init(zcrypt_cex2a_init); 497module_init(zcrypt_cex2a_init);
501module_exit(zcrypt_cex2a_exit); 498module_exit(zcrypt_cex2a_exit);
502#endif
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
deleted file mode 100644
index eb313c3fb2d1..000000000000
--- a/drivers/s390/crypto/zcrypt_mono.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_mono.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/miscdevice.h>
32#include <linux/fs.h>
33#include <linux/proc_fs.h>
34#include <linux/compat.h>
35#include <linux/atomic.h>
36#include <asm/uaccess.h>
37
38#include "ap_bus.h"
39#include "zcrypt_api.h"
40#include "zcrypt_pcica.h"
41#include "zcrypt_pcicc.h"
42#include "zcrypt_pcixcc.h"
43#include "zcrypt_cex2a.h"
44
45/**
46 * The module initialization code.
47 */
48static int __init zcrypt_init(void)
49{
50 int rc;
51
52 rc = ap_module_init();
53 if (rc)
54 goto out;
55 rc = zcrypt_api_init();
56 if (rc)
57 goto out_ap;
58 rc = zcrypt_pcica_init();
59 if (rc)
60 goto out_api;
61 rc = zcrypt_pcicc_init();
62 if (rc)
63 goto out_pcica;
64 rc = zcrypt_pcixcc_init();
65 if (rc)
66 goto out_pcicc;
67 rc = zcrypt_cex2a_init();
68 if (rc)
69 goto out_pcixcc;
70 return 0;
71
72out_pcixcc:
73 zcrypt_pcixcc_exit();
74out_pcicc:
75 zcrypt_pcicc_exit();
76out_pcica:
77 zcrypt_pcica_exit();
78out_api:
79 zcrypt_api_exit();
80out_ap:
81 ap_module_exit();
82out:
83 return rc;
84}
85
86/**
87 * The module termination code.
88 */
89static void __exit zcrypt_exit(void)
90{
91 zcrypt_cex2a_exit();
92 zcrypt_pcixcc_exit();
93 zcrypt_pcicc_exit();
94 zcrypt_pcica_exit();
95 zcrypt_api_exit();
96 ap_module_exit();
97}
98
99module_init(zcrypt_init);
100module_exit(zcrypt_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index d84816f144df..0effca925451 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -53,13 +53,11 @@ static struct ap_device_id zcrypt_pcica_ids[] = {
53 { /* end of list */ }, 53 { /* end of list */ },
54}; 54};
55 55
56#ifndef CONFIG_ZCRYPT_MONOLITHIC
57MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids); 56MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
58MODULE_AUTHOR("IBM Corporation"); 57MODULE_AUTHOR("IBM Corporation");
59MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, " 58MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
60 "Copyright 2001, 2006 IBM Corporation"); 59 "Copyright 2001, 2006 IBM Corporation");
61MODULE_LICENSE("GPL"); 60MODULE_LICENSE("GPL");
62#endif
63 61
64static int zcrypt_pcica_probe(struct ap_device *ap_dev); 62static int zcrypt_pcica_probe(struct ap_device *ap_dev);
65static void zcrypt_pcica_remove(struct ap_device *ap_dev); 63static void zcrypt_pcica_remove(struct ap_device *ap_dev);
@@ -408,7 +406,5 @@ void zcrypt_pcica_exit(void)
408 ap_driver_unregister(&zcrypt_pcica_driver); 406 ap_driver_unregister(&zcrypt_pcica_driver);
409} 407}
410 408
411#ifndef CONFIG_ZCRYPT_MONOLITHIC
412module_init(zcrypt_pcica_init); 409module_init(zcrypt_pcica_init);
413module_exit(zcrypt_pcica_exit); 410module_exit(zcrypt_pcica_exit);
414#endif
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index bdbdbe192993..f9523c0cc8d2 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -65,13 +65,11 @@ static struct ap_device_id zcrypt_pcicc_ids[] = {
65 { /* end of list */ }, 65 { /* end of list */ },
66}; 66};
67 67
68#ifndef CONFIG_ZCRYPT_MONOLITHIC
69MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids); 68MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
70MODULE_AUTHOR("IBM Corporation"); 69MODULE_AUTHOR("IBM Corporation");
71MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, " 70MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
72 "Copyright 2001, 2006 IBM Corporation"); 71 "Copyright 2001, 2006 IBM Corporation");
73MODULE_LICENSE("GPL"); 72MODULE_LICENSE("GPL");
74#endif
75 73
76static int zcrypt_pcicc_probe(struct ap_device *ap_dev); 74static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
77static void zcrypt_pcicc_remove(struct ap_device *ap_dev); 75static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
@@ -614,7 +612,5 @@ void zcrypt_pcicc_exit(void)
614 ap_driver_unregister(&zcrypt_pcicc_driver); 612 ap_driver_unregister(&zcrypt_pcicc_driver);
615} 613}
616 614
617#ifndef CONFIG_ZCRYPT_MONOLITHIC
618module_init(zcrypt_pcicc_init); 615module_init(zcrypt_pcicc_init);
619module_exit(zcrypt_pcicc_exit); 616module_exit(zcrypt_pcicc_exit);
620#endif
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 077b7d109fde..cf1cbd4747f4 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -75,13 +75,11 @@ static struct ap_device_id zcrypt_pcixcc_ids[] = {
75 { /* end of list */ }, 75 { /* end of list */ },
76}; 76};
77 77
78#ifndef CONFIG_ZCRYPT_MONOLITHIC
79MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); 78MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
80MODULE_AUTHOR("IBM Corporation"); 79MODULE_AUTHOR("IBM Corporation");
81MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " 80MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
82 "Copyright 2001, 2006 IBM Corporation"); 81 "Copyright 2001, 2006 IBM Corporation");
83MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
84#endif
85 83
86static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); 84static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
87static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); 85static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
@@ -1121,7 +1119,5 @@ void zcrypt_pcixcc_exit(void)
1121 ap_driver_unregister(&zcrypt_pcixcc_driver); 1119 ap_driver_unregister(&zcrypt_pcixcc_driver);
1122} 1120}
1123 1121
1124#ifndef CONFIG_ZCRYPT_MONOLITHIC
1125module_init(zcrypt_pcixcc_init); 1122module_init(zcrypt_pcixcc_init);
1126module_exit(zcrypt_pcixcc_exit); 1123module_exit(zcrypt_pcixcc_exit);
1127#endif
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 7bc1955337ea..d74e9ae6dfb3 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -380,15 +380,13 @@ static void hotplug_devices(struct work_struct *dummy)
380/* 380/*
381 * we emulate the request_irq behaviour on top of s390 extints 381 * we emulate the request_irq behaviour on top of s390 extints
382 */ 382 */
383static void kvm_extint_handler(unsigned int ext_int_code, 383static void kvm_extint_handler(struct ext_code ext_code,
384 unsigned int param32, unsigned long param64) 384 unsigned int param32, unsigned long param64)
385{ 385{
386 struct virtqueue *vq; 386 struct virtqueue *vq;
387 u16 subcode;
388 u32 param; 387 u32 param;
389 388
390 subcode = ext_int_code >> 16; 389 if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
391 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
392 return; 390 return;
393 kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; 391 kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
394 392
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 403be43b793d..3ad1f9db5f8b 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1800,7 +1800,7 @@ static void iucv_work_fn(struct work_struct *work)
1800 * Handles external interrupts coming in from CP. 1800 * Handles external interrupts coming in from CP.
1801 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). 1801 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
1802 */ 1802 */
1803static void iucv_external_interrupt(unsigned int ext_int_code, 1803static void iucv_external_interrupt(struct ext_code ext_code,
1804 unsigned int param32, unsigned long param64) 1804 unsigned int param32, unsigned long param64)
1805{ 1805{
1806 struct iucv_irq_data *p; 1806 struct iucv_irq_data *p;