aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2012-03-11 11:59:26 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-03-11 11:59:28 -0400
commit8b646bd759086f6090fe27acf414c0b5faa737f4 (patch)
tree29475659031c57ccf2ca43899614ab5c6b1899a0
parent7e180bd8020d213bb0de15c3606968f8a9262439 (diff)
[S390] rework smp code
Define struct pcpu and merge some of the NR_CPUS arrays into it, including __cpu_logical_map, current_set and smp_cpu_state. Split smp related functions to those operating on physical cpus and the functions operating on a logical cpu number. Make the functions for physical cpus use a pointer to a struct pcpu. This hides the knowledge about cpu addresses in smp.c, entry[64].S and swsusp_asm64.S, thus remove the sigp.h header. The PSW restart mechanism is used to start secondary cpus, calling a function on an online cpu, calling a function on the ipl cpu, and for the nmi signal. Replace the different assembler functions with a single function restart_int_handler. The new entry point calls a function whose pointer is stored in the lowcore of the target cpu and it can wait for the source cpu to stop. This covers all existing use cases. Overall the code is now simpler and there are ~380 lines less code. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/lowcore.h100
-rw-r--r--arch/s390/include/asm/sigp.h132
-rw-r--r--arch/s390/include/asm/smp.h63
-rw-r--r--arch/s390/include/asm/vdso.h4
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/asm-offsets.c17
-rw-r--r--arch/s390/kernel/entry.S81
-rw-r--r--arch/s390/kernel/entry.h9
-rw-r--r--arch/s390/kernel/entry64.S72
-rw-r--r--arch/s390/kernel/ipl.c21
-rw-r--r--arch/s390/kernel/machine_kexec.c43
-rw-r--r--arch/s390/kernel/setup.c56
-rw-r--r--arch/s390/kernel/smp.c1083
-rw-r--r--arch/s390/kernel/switch_cpu.S58
-rw-r--r--arch/s390/kernel/switch_cpu64.S51
-rw-r--r--arch/s390/kernel/swsusp_asm64.S12
-rw-r--r--arch/s390/kernel/topology.c8
-rw-r--r--arch/s390/kernel/vdso.c28
-rw-r--r--arch/s390/kernel/vtime.c3
-rw-r--r--arch/s390/lib/spinlock.c30
-rw-r--r--drivers/s390/char/sclp_quiesce.c1
-rw-r--r--drivers/s390/char/zcore.c1
22 files changed, 739 insertions, 1136 deletions
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 0831449e87a..4e69563bc95 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999,2010 2 * Copyright IBM Corp. 1999,2012
3 * Author(s): Hartmut Penner <hp@de.ibm.com>, 3 * Author(s): Hartmut Penner <hp@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow, 5 * Denis Joseph Barrow,
@@ -12,14 +12,6 @@
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/cpu.h> 13#include <asm/cpu.h>
14 14
15void restart_int_handler(void);
16void ext_int_handler(void);
17void system_call(void);
18void pgm_check_handler(void);
19void mcck_int_handler(void);
20void io_int_handler(void);
21void psw_restart_int_handler(void);
22
23#ifdef CONFIG_32BIT 15#ifdef CONFIG_32BIT
24 16
25#define LC_ORDER 0 17#define LC_ORDER 0
@@ -117,32 +109,37 @@ struct _lowcore {
117 __u64 steal_timer; /* 0x0288 */ 109 __u64 steal_timer; /* 0x0288 */
118 __u64 last_update_timer; /* 0x0290 */ 110 __u64 last_update_timer; /* 0x0290 */
119 __u64 last_update_clock; /* 0x0298 */ 111 __u64 last_update_clock; /* 0x0298 */
112 __u64 int_clock; /* 0x02a0 */
113 __u64 mcck_clock; /* 0x02a8 */
114 __u64 clock_comparator; /* 0x02b0 */
120 115
121 /* Current process. */ 116 /* Current process. */
122 __u32 current_task; /* 0x02a0 */ 117 __u32 current_task; /* 0x02b8 */
123 __u32 thread_info; /* 0x02a4 */ 118 __u32 thread_info; /* 0x02bc */
124 __u32 kernel_stack; /* 0x02a8 */ 119 __u32 kernel_stack; /* 0x02c0 */
120
121 /* Interrupt, panic and restart stack. */
122 __u32 async_stack; /* 0x02c4 */
123 __u32 panic_stack; /* 0x02c8 */
124 __u32 restart_stack; /* 0x02cc */
125 125
126 /* Interrupt and panic stack. */ 126 /* Restart function and parameter. */
127 __u32 async_stack; /* 0x02ac */ 127 __u32 restart_fn; /* 0x02d0 */
128 __u32 panic_stack; /* 0x02b0 */ 128 __u32 restart_data; /* 0x02d4 */
129 __u32 restart_source; /* 0x02d8 */
129 130
130 /* Address space pointer. */ 131 /* Address space pointer. */
131 __u32 kernel_asce; /* 0x02b4 */ 132 __u32 kernel_asce; /* 0x02dc */
132 __u32 user_asce; /* 0x02b8 */ 133 __u32 user_asce; /* 0x02e0 */
133 __u32 current_pid; /* 0x02bc */ 134 __u32 current_pid; /* 0x02e4 */
134 135
135 /* SMP info area */ 136 /* SMP info area */
136 __u32 cpu_nr; /* 0x02c0 */ 137 __u32 cpu_nr; /* 0x02e8 */
137 __u32 softirq_pending; /* 0x02c4 */ 138 __u32 softirq_pending; /* 0x02ec */
138 __u32 percpu_offset; /* 0x02c8 */ 139 __u32 percpu_offset; /* 0x02f0 */
139 __u32 ext_call_fast; /* 0x02cc */ 140 __u32 machine_flags; /* 0x02f4 */
140 __u64 int_clock; /* 0x02d0 */ 141 __u32 ftrace_func; /* 0x02f8 */
141 __u64 mcck_clock; /* 0x02d8 */ 142 __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
142 __u64 clock_comparator; /* 0x02e0 */
143 __u32 machine_flags; /* 0x02e8 */
144 __u32 ftrace_func; /* 0x02ec */
145 __u8 pad_0x02f8[0x0300-0x02f0]; /* 0x02f0 */
146 143
147 /* Interrupt response block */ 144 /* Interrupt response block */
148 __u8 irb[64]; /* 0x0300 */ 145 __u8 irb[64]; /* 0x0300 */
@@ -254,34 +251,39 @@ struct _lowcore {
254 __u64 steal_timer; /* 0x02e0 */ 251 __u64 steal_timer; /* 0x02e0 */
255 __u64 last_update_timer; /* 0x02e8 */ 252 __u64 last_update_timer; /* 0x02e8 */
256 __u64 last_update_clock; /* 0x02f0 */ 253 __u64 last_update_clock; /* 0x02f0 */
254 __u64 int_clock; /* 0x02f8 */
255 __u64 mcck_clock; /* 0x0300 */
256 __u64 clock_comparator; /* 0x0308 */
257 257
258 /* Current process. */ 258 /* Current process. */
259 __u64 current_task; /* 0x02f8 */ 259 __u64 current_task; /* 0x0310 */
260 __u64 thread_info; /* 0x0300 */ 260 __u64 thread_info; /* 0x0318 */
261 __u64 kernel_stack; /* 0x0308 */ 261 __u64 kernel_stack; /* 0x0320 */
262
263 /* Interrupt, panic and restart stack. */
264 __u64 async_stack; /* 0x0328 */
265 __u64 panic_stack; /* 0x0330 */
266 __u64 restart_stack; /* 0x0338 */
262 267
263 /* Interrupt and panic stack. */ 268 /* Restart function and parameter. */
264 __u64 async_stack; /* 0x0310 */ 269 __u64 restart_fn; /* 0x0340 */
265 __u64 panic_stack; /* 0x0318 */ 270 __u64 restart_data; /* 0x0348 */
271 __u64 restart_source; /* 0x0350 */
266 272
267 /* Address space pointer. */ 273 /* Address space pointer. */
268 __u64 kernel_asce; /* 0x0320 */ 274 __u64 kernel_asce; /* 0x0358 */
269 __u64 user_asce; /* 0x0328 */ 275 __u64 user_asce; /* 0x0360 */
270 __u64 current_pid; /* 0x0330 */ 276 __u64 current_pid; /* 0x0368 */
271 277
272 /* SMP info area */ 278 /* SMP info area */
273 __u32 cpu_nr; /* 0x0338 */ 279 __u32 cpu_nr; /* 0x0370 */
274 __u32 softirq_pending; /* 0x033c */ 280 __u32 softirq_pending; /* 0x0374 */
275 __u64 percpu_offset; /* 0x0340 */ 281 __u64 percpu_offset; /* 0x0378 */
276 __u64 ext_call_fast; /* 0x0348 */ 282 __u64 vdso_per_cpu_data; /* 0x0380 */
277 __u64 int_clock; /* 0x0350 */ 283 __u64 machine_flags; /* 0x0388 */
278 __u64 mcck_clock; /* 0x0358 */ 284 __u64 ftrace_func; /* 0x0390 */
279 __u64 clock_comparator; /* 0x0360 */ 285 __u64 gmap; /* 0x0398 */
280 __u64 vdso_per_cpu_data; /* 0x0368 */ 286 __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
281 __u64 machine_flags; /* 0x0370 */
282 __u64 ftrace_func; /* 0x0378 */
283 __u64 gmap; /* 0x0380 */
284 __u8 pad_0x0388[0x0400-0x0388]; /* 0x0388 */
285 287
286 /* Interrupt response block. */ 288 /* Interrupt response block. */
287 __u8 irb[64]; /* 0x0400 */ 289 __u8 irb[64]; /* 0x0400 */
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
deleted file mode 100644
index 7040b8567cd..00000000000
--- a/arch/s390/include/asm/sigp.h
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Routines and structures for signalling other processors.
3 *
4 * Copyright IBM Corp. 1999,2010
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 */
9
10#ifndef __ASM_SIGP_H
11#define __ASM_SIGP_H
12
13#include <asm/system.h>
14
15/* Get real cpu address from logical cpu number. */
16extern unsigned short __cpu_logical_map[];
17
18static inline int cpu_logical_map(int cpu)
19{
20#ifdef CONFIG_SMP
21 return __cpu_logical_map[cpu];
22#else
23 return stap();
24#endif
25}
26
27enum {
28 sigp_sense = 1,
29 sigp_external_call = 2,
30 sigp_emergency_signal = 3,
31 sigp_start = 4,
32 sigp_stop = 5,
33 sigp_restart = 6,
34 sigp_stop_and_store_status = 9,
35 sigp_initial_cpu_reset = 11,
36 sigp_cpu_reset = 12,
37 sigp_set_prefix = 13,
38 sigp_store_status_at_address = 14,
39 sigp_store_extended_status_at_address = 15,
40 sigp_set_architecture = 18,
41 sigp_conditional_emergency_signal = 19,
42 sigp_sense_running = 21,
43};
44
45enum {
46 sigp_order_code_accepted = 0,
47 sigp_status_stored = 1,
48 sigp_busy = 2,
49 sigp_not_operational = 3,
50};
51
52/*
53 * Definitions for external call.
54 */
55enum {
56 ec_schedule = 0,
57 ec_call_function,
58 ec_call_function_single,
59 ec_stop_cpu,
60};
61
62/*
63 * Signal processor.
64 */
65static inline int raw_sigp(u16 cpu, int order)
66{
67 register unsigned long reg1 asm ("1") = 0;
68 int ccode;
69
70 asm volatile(
71 " sigp %1,%2,0(%3)\n"
72 " ipm %0\n"
73 " srl %0,28\n"
74 : "=d" (ccode)
75 : "d" (reg1), "d" (cpu),
76 "a" (order) : "cc" , "memory");
77 return ccode;
78}
79
80/*
81 * Signal processor with parameter.
82 */
83static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
84{
85 register unsigned int reg1 asm ("1") = parameter;
86 int ccode;
87
88 asm volatile(
89 " sigp %1,%2,0(%3)\n"
90 " ipm %0\n"
91 " srl %0,28\n"
92 : "=d" (ccode)
93 : "d" (reg1), "d" (cpu),
94 "a" (order) : "cc" , "memory");
95 return ccode;
96}
97
98/*
99 * Signal processor with parameter and return status.
100 */
101static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
102{
103 register unsigned int reg1 asm ("1") = parm;
104 int ccode;
105
106 asm volatile(
107 " sigp %1,%2,0(%3)\n"
108 " ipm %0\n"
109 " srl %0,28\n"
110 : "=d" (ccode), "+d" (reg1)
111 : "d" (cpu), "a" (order)
112 : "cc" , "memory");
113 *status = reg1;
114 return ccode;
115}
116
117static inline int sigp(int cpu, int order)
118{
119 return raw_sigp(cpu_logical_map(cpu), order);
120}
121
122static inline int sigp_p(u32 parameter, int cpu, int order)
123{
124 return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
125}
126
127static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
128{
129 return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
130}
131
132#endif /* __ASM_SIGP_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index c32e9123b40..797f7872968 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999,2009 2 * Copyright IBM Corp. 1999,2012
3 * Author(s): Denis Joseph Barrow, 3 * Author(s): Denis Joseph Barrow,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>, 5 * Heiko Carstens <heiko.carstens@de.ibm.com>,
@@ -10,71 +10,52 @@
10#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
11 11
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/sigp.h>
14
15extern void machine_restart_smp(char *);
16extern void machine_halt_smp(void);
17extern void machine_power_off_smp(void);
18 13
19#define raw_smp_processor_id() (S390_lowcore.cpu_nr) 14#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
20 15
21extern int __cpu_disable (void);
22extern void __cpu_die (unsigned int cpu);
23extern int __cpu_up (unsigned int cpu);
24
25extern struct mutex smp_cpu_state_mutex; 16extern struct mutex smp_cpu_state_mutex;
17extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
18
19extern int __cpu_up(unsigned int cpu);
26 20
27extern void arch_send_call_function_single_ipi(int cpu); 21extern void arch_send_call_function_single_ipi(int cpu);
28extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 22extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
29 23
30extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 24extern void smp_call_online_cpu(void (*func)(void *), void *);
31 25extern void smp_call_ipl_cpu(void (*func)(void *), void *);
32extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
33extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
34 int from, int to);
35extern void smp_restart_with_online_cpu(void);
36extern void smp_restart_cpu(void);
37 26
38/* 27extern int smp_find_processor_id(u16 address);
39 * returns 1 if (virtual) cpu is scheduled 28extern int smp_store_status(int cpu);
40 * returns 0 otherwise 29extern int smp_vcpu_scheduled(int cpu);
41 */ 30extern void smp_yield_cpu(int cpu);
42static inline int smp_vcpu_scheduled(int cpu) 31extern void smp_yield(void);
43{ 32extern void smp_stop_cpu(void);
44 u32 status;
45
46 switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
47 case sigp_status_stored:
48 /* Check for running status */
49 if (status & 0x400)
50 return 0;
51 break;
52 case sigp_not_operational:
53 return 0;
54 default:
55 break;
56 }
57 return 1;
58}
59 33
60#else /* CONFIG_SMP */ 34#else /* CONFIG_SMP */
61 35
62static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) 36static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
63{ 37{
64 func(data); 38 func(data);
65} 39}
66 40
67static inline void smp_restart_with_online_cpu(void) 41static inline void smp_call_online_cpu(void (*func)(void *), void *data)
68{ 42{
43 func(data);
69} 44}
70 45
71#define smp_vcpu_scheduled (1) 46static inline int smp_find_processor_id(int address) { return 0; }
47static inline int smp_vcpu_scheduled(int cpu) { return 1; }
48static inline void smp_yield_cpu(int cpu) { }
49static inline void smp_yield(void) { }
50static inline void smp_stop_cpu(void) { }
72 51
73#endif /* CONFIG_SMP */ 52#endif /* CONFIG_SMP */
74 53
75#ifdef CONFIG_HOTPLUG_CPU 54#ifdef CONFIG_HOTPLUG_CPU
76extern int smp_rescan_cpus(void); 55extern int smp_rescan_cpus(void);
77extern void __noreturn cpu_die(void); 56extern void __noreturn cpu_die(void);
57extern void __cpu_die(unsigned int cpu);
58extern int __cpu_disable(void);
78#else 59#else
79static inline int smp_rescan_cpus(void) { return 0; } 60static inline int smp_rescan_cpus(void) { return 0; }
80static inline void cpu_die(void) { } 61static inline void cpu_die(void) { }
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 533f35751ae..c4a11cfad3c 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -40,8 +40,8 @@ struct vdso_per_cpu_data {
40extern struct vdso_data *vdso_data; 40extern struct vdso_data *vdso_data;
41 41
42#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
43int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore); 43int vdso_alloc_per_cpu(struct _lowcore *lowcore);
44void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore); 44void vdso_free_per_cpu(struct _lowcore *lowcore);
45#endif 45#endif
46 46
47#endif /* __ASSEMBLY__ */ 47#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 7d9ec924e7e..d0a48268eb2 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -34,8 +34,6 @@ extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
34obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 34obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
35obj-$(CONFIG_SMP) += smp.o 35obj-$(CONFIG_SMP) += smp.o
36obj-$(CONFIG_SCHED_BOOK) += topology.o 36obj-$(CONFIG_SCHED_BOOK) += topology.o
37obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
38 switch_cpu.o)
39obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o 37obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
40obj-$(CONFIG_AUDIT) += audit.o 38obj-$(CONFIG_AUDIT) += audit.o
41compat-obj-$(CONFIG_AUDIT) += compat_audit.o 39compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 530ae0e8e38..aeeaf896be9 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -9,8 +9,8 @@
9#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <asm/vdso.h> 11#include <asm/vdso.h>
12#include <asm/sigp.h>
13#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <asm/system.h>
14 14
15/* 15/*
16 * Make sure that the compiler is new enough. We want a compiler that 16 * Make sure that the compiler is new enough. We want a compiler that
@@ -70,12 +70,6 @@ int main(void)
70 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 70 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
71 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 71 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
72 BLANK(); 72 BLANK();
73 /* constants for SIGP */
74 DEFINE(__SIGP_STOP, sigp_stop);
75 DEFINE(__SIGP_RESTART, sigp_restart);
76 DEFINE(__SIGP_SENSE, sigp_sense);
77 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
78 BLANK();
79 /* lowcore offsets */ 73 /* lowcore offsets */
80 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); 74 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
81 DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); 75 DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
@@ -95,20 +89,19 @@ int main(void)
95 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); 89 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
96 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); 90 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
97 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); 91 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
98 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
99 BLANK();
100 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
101 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); 92 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
102 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); 93 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
103 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); 94 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
104 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); 95 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
105 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); 96 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
106 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); 97 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
98 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
107 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); 99 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
108 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); 100 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
109 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); 101 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
110 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); 102 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
111 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); 103 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
104 BLANK();
112 DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); 105 DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
113 DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); 106 DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
114 DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); 107 DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
@@ -129,12 +122,16 @@ int main(void)
129 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 122 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
130 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 123 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
131 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 124 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
125 DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
126 DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
132 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); 127 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
133 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 128 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
134 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 129 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
135 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 130 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
136 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); 131 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
137 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); 132 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
133 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
134 BLANK();
138 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 135 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
139 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); 136 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
140 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); 137 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5f437b830da..6143521a4ff 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/entry.S 2 * arch/s390/kernel/entry.S
3 * S390 low-level entry points. 3 * S390 low-level entry points.
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright (C) IBM Corp. 1999,2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -691,77 +691,30 @@ mcck_panic:
6910: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 6910: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
692 j mcck_skip 692 j mcck_skip
693 693
694/*
695 * Restart interruption handler, kick starter for additional CPUs
696 */
697#ifdef CONFIG_SMP
698 __CPUINIT
699ENTRY(restart_int_handler)
700 basr %r1,0
701restart_base:
702 spt restart_vtime-restart_base(%r1)
703 stck __LC_LAST_UPDATE_CLOCK
704 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
705 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
706 l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
707 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
708 lam %a0,%a15,__LC_AREGS_SAVE_AREA
709 lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone
710 l %r1,__LC_THREAD_INFO
711 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
712 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
713 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
714 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
715 basr %r14,0
716 l %r14,restart_addr-.(%r14)
717 basr %r14,%r14 # call start_secondary
718restart_addr:
719 .long start_secondary
720 .align 8
721restart_vtime:
722 .long 0x7fffffff,0xffffffff
723 .previous
724#else
725/*
726 * If we do not run with SMP enabled, let the new CPU crash ...
727 */
728ENTRY(restart_int_handler)
729 basr %r1,0
730restart_base:
731 lpsw restart_crash-restart_base(%r1)
732 .align 8
733restart_crash:
734 .long 0x000a0000,0x00000000
735restart_go:
736#endif
737
738# 694#
739# PSW restart interrupt handler 695# PSW restart interrupt handler
740# 696#
741ENTRY(psw_restart_int_handler) 697ENTRY(restart_int_handler)
742 st %r15,__LC_SAVE_AREA_RESTART 698 st %r15,__LC_SAVE_AREA_RESTART
743 basr %r15,0 699 l %r15,__LC_RESTART_STACK
7440: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
745 l %r15,0(%r15)
746 ahi %r15,-__PT_SIZE # create pt_regs on stack 700 ahi %r15,-__PT_SIZE # create pt_regs on stack
701 xc 0(__PT_SIZE,%r15),0(%r15)
747 stm %r0,%r14,__PT_R0(%r15) 702 stm %r0,%r14,__PT_R0(%r15)
748 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART 703 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
749 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw 704 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
750 ahi %r15,-STACK_FRAME_OVERHEAD 705 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
751 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 706 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
752 basr %r14,0 707 lm %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu
7531: l %r14,.Ldo_restart-1b(%r14) 708 ltr %r3,%r3 # test source cpu address
754 basr %r14,%r14 709 jm 1f # negative -> skip source stop
755 basr %r14,0 # load disabled wait PSW if 7100: sigp %r4,%r3,1 # sigp sense to source cpu
7562: lpsw restart_psw_crash-2b(%r14) # do_restart returns 711 brc 10,0b # wait for status stored
757 .align 4 7121: basr %r14,%r1 # call function
758.Ldo_restart: 713 stap __SF_EMPTY(%r15) # store cpu address
759 .long do_restart 714 lh %r3,__SF_EMPTY(%r15)
760.Lrestart_stack: 7152: sigp %r4,%r3,5 # sigp stop to current cpu
761 .long restart_stack 716 brc 2,2b
762 .align 8 7173: j 3b
763restart_psw_crash:
764 .long 0x000a0000,0x00000000 + restart_psw_crash
765 718
766 .section .kprobes.text, "ax" 719 .section .kprobes.text, "ax"
767 720
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index bf538aaf407..92b1617d0c9 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -9,6 +9,14 @@
9extern void (*pgm_check_table[128])(struct pt_regs *); 9extern void (*pgm_check_table[128])(struct pt_regs *);
10extern void *restart_stack; 10extern void *restart_stack;
11 11
12void system_call(void);
13void pgm_check_handler(void);
14void ext_int_handler(void);
15void io_int_handler(void);
16void mcck_int_handler(void);
17void restart_int_handler(void);
18void restart_call_handler(void);
19
12asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 20asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
13asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); 21asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
14 22
@@ -26,7 +34,6 @@ void do_notify_resume(struct pt_regs *regs);
26 34
27void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); 35void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
28void do_restart(void); 36void do_restart(void);
29int __cpuinit start_secondary(void *cpuvoid);
30void __init startup_init(void); 37void __init startup_init(void);
31void die(struct pt_regs *regs, const char *str); 38void die(struct pt_regs *regs, const char *str);
32 39
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index bacbd2848d4..e33789a4575 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/entry64.S 2 * arch/s390/kernel/entry64.S
3 * S390 low-level entry points. 3 * S390 low-level entry points.
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2010 5 * Copyright (C) IBM Corp. 1999,2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -713,68 +713,30 @@ mcck_panic:
7130: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 7130: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
714 j mcck_skip 714 j mcck_skip
715 715
716/*
717 * Restart interruption handler, kick starter for additional CPUs
718 */
719#ifdef CONFIG_SMP
720 __CPUINIT
721ENTRY(restart_int_handler)
722 basr %r1,0
723restart_base:
724 spt restart_vtime-restart_base(%r1)
725 stck __LC_LAST_UPDATE_CLOCK
726 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
727 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
728 lghi %r10,__LC_GPREGS_SAVE_AREA
729 lg %r15,120(%r10) # load ksp
730 lghi %r10,__LC_CREGS_SAVE_AREA
731 lctlg %c0,%c15,0(%r10) # get new ctl regs
732 lghi %r10,__LC_AREGS_SAVE_AREA
733 lam %a0,%a15,0(%r10)
734 lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone
735 lg %r1,__LC_THREAD_INFO
736 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
737 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
738 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
739 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
740 brasl %r14,start_secondary
741 .align 8
742restart_vtime:
743 .long 0x7fffffff,0xffffffff
744 .previous
745#else
746/*
747 * If we do not run with SMP enabled, let the new CPU crash ...
748 */
749ENTRY(restart_int_handler)
750 basr %r1,0
751restart_base:
752 lpswe restart_crash-restart_base(%r1)
753 .align 8
754restart_crash:
755 .long 0x000a0000,0x00000000,0x00000000,0x00000000
756restart_go:
757#endif
758
759# 716#
760# PSW restart interrupt handler 717# PSW restart interrupt handler
761# 718#
762ENTRY(psw_restart_int_handler) 719ENTRY(restart_int_handler)
763 stg %r15,__LC_SAVE_AREA_RESTART 720 stg %r15,__LC_SAVE_AREA_RESTART
764 larl %r15,restart_stack # load restart stack 721 lg %r15,__LC_RESTART_STACK
765 lg %r15,0(%r15)
766 aghi %r15,-__PT_SIZE # create pt_regs on stack 722 aghi %r15,-__PT_SIZE # create pt_regs on stack
723 xc 0(__PT_SIZE,%r15),0(%r15)
767 stmg %r0,%r14,__PT_R0(%r15) 724 stmg %r0,%r14,__PT_R0(%r15)
768 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 725 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
769 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw 726 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
770 aghi %r15,-STACK_FRAME_OVERHEAD 727 aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
771 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 728 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
772 brasl %r14,do_restart 729 lmg %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu
773 larl %r14,restart_psw_crash # load disabled wait PSW if 730 ltgr %r3,%r3 # test source cpu address
774 lpswe 0(%r14) # do_restart returns 731 jm 1f # negative -> skip source stop
775 .align 8 7320: sigp %r4,%r3,1 # sigp sense to source cpu
776restart_psw_crash: 733 brc 10,0b # wait for status stored
777 .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash 7341: basr %r14,%r1 # call function
735 stap __SF_EMPTY(%r15) # store cpu address
736 llgh %r3,__SF_EMPTY(%r15)
7372: sigp %r4,%r3,5 # sigp stop to current cpu
738 brc 2,2b
7393: j 3b
778 740
779 .section .kprobes.text, "ax" 741 .section .kprobes.text, "ax"
780 742
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index affa8e68124..e5a72a2b0c5 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/ipl.c 2 * arch/s390/kernel/ipl.c
3 * ipl/reipl/dump support for Linux on s390. 3 * ipl/reipl/dump support for Linux on s390.
4 * 4 *
5 * Copyright IBM Corp. 2005,2007 5 * Copyright IBM Corp. 2005,2012
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Volker Sameske <sameske@de.ibm.com> 8 * Volker Sameske <sameske@de.ibm.com>
@@ -25,7 +25,6 @@
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/reset.h> 26#include <asm/reset.h>
27#include <asm/sclp.h> 27#include <asm/sclp.h>
28#include <asm/sigp.h>
29#include <asm/checksum.h> 28#include <asm/checksum.h>
30#include "entry.h" 29#include "entry.h"
31 30
@@ -571,7 +570,7 @@ static void __ipl_run(void *unused)
571 570
572static void ipl_run(struct shutdown_trigger *trigger) 571static void ipl_run(struct shutdown_trigger *trigger)
573{ 572{
574 smp_switch_to_ipl_cpu(__ipl_run, NULL); 573 smp_call_ipl_cpu(__ipl_run, NULL);
575} 574}
576 575
577static int __init ipl_init(void) 576static int __init ipl_init(void)
@@ -1101,7 +1100,7 @@ static void __reipl_run(void *unused)
1101 1100
1102static void reipl_run(struct shutdown_trigger *trigger) 1101static void reipl_run(struct shutdown_trigger *trigger)
1103{ 1102{
1104 smp_switch_to_ipl_cpu(__reipl_run, NULL); 1103 smp_call_ipl_cpu(__reipl_run, NULL);
1105} 1104}
1106 1105
1107static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) 1106static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
@@ -1421,7 +1420,7 @@ static void dump_run(struct shutdown_trigger *trigger)
1421 if (dump_method == DUMP_METHOD_NONE) 1420 if (dump_method == DUMP_METHOD_NONE)
1422 return; 1421 return;
1423 smp_send_stop(); 1422 smp_send_stop();
1424 smp_switch_to_ipl_cpu(__dump_run, NULL); 1423 smp_call_ipl_cpu(__dump_run, NULL);
1425} 1424}
1426 1425
1427static int __init dump_ccw_init(void) 1426static int __init dump_ccw_init(void)
@@ -1623,9 +1622,7 @@ static void stop_run(struct shutdown_trigger *trigger)
1623 if (strcmp(trigger->name, ON_PANIC_STR) == 0 || 1622 if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
1624 strcmp(trigger->name, ON_RESTART_STR) == 0) 1623 strcmp(trigger->name, ON_RESTART_STR) == 0)
1625 disabled_wait((unsigned long) __builtin_return_address(0)); 1624 disabled_wait((unsigned long) __builtin_return_address(0));
1626 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 1625 smp_stop_cpu();
1627 cpu_relax();
1628 for (;;);
1629} 1626}
1630 1627
1631static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, 1628static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
@@ -1738,9 +1735,8 @@ static ssize_t on_restart_store(struct kobject *kobj,
1738static struct kobj_attribute on_restart_attr = 1735static struct kobj_attribute on_restart_attr =
1739 __ATTR(on_restart, 0644, on_restart_show, on_restart_store); 1736 __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
1740 1737
1741void do_restart(void) 1738static void __do_restart(void *ignore)
1742{ 1739{
1743 smp_restart_with_online_cpu();
1744 smp_send_stop(); 1740 smp_send_stop();
1745#ifdef CONFIG_CRASH_DUMP 1741#ifdef CONFIG_CRASH_DUMP
1746 crash_kexec(NULL); 1742 crash_kexec(NULL);
@@ -1749,6 +1745,11 @@ void do_restart(void)
1749 stop_run(&on_restart_trigger); 1745 stop_run(&on_restart_trigger);
1750} 1746}
1751 1747
1748void do_restart(void)
1749{
1750 smp_call_online_cpu(__do_restart, NULL);
1751}
1752
1752/* on halt */ 1753/* on halt */
1753 1754
1754static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; 1755static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 47b168fb29c..bf6fbc03eba 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -49,50 +49,21 @@ static void add_elf_notes(int cpu)
49} 49}
50 50
51/* 51/*
52 * Store status of next available physical CPU
53 */
54static int store_status_next(int start_cpu, int this_cpu)
55{
56 struct save_area *sa = (void *) 4608 + store_prefix();
57 int cpu, rc;
58
59 for (cpu = start_cpu; cpu < 65536; cpu++) {
60 if (cpu == this_cpu)
61 continue;
62 do {
63 rc = raw_sigp(cpu, sigp_stop_and_store_status);
64 } while (rc == sigp_busy);
65 if (rc != sigp_order_code_accepted)
66 continue;
67 if (sa->pref_reg)
68 return cpu;
69 }
70 return -1;
71}
72
73/*
74 * Initialize CPU ELF notes 52 * Initialize CPU ELF notes
75 */ 53 */
76void setup_regs(void) 54void setup_regs(void)
77{ 55{
78 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; 56 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
79 int cpu, this_cpu, phys_cpu = 0, first = 1; 57 int cpu, this_cpu;
80 58
81 this_cpu = stap(); 59 this_cpu = smp_find_processor_id(stap());
82 60 add_elf_notes(this_cpu);
83 if (!S390_lowcore.prefixreg_save_area)
84 first = 0;
85 for_each_online_cpu(cpu) { 61 for_each_online_cpu(cpu) {
86 if (first) { 62 if (cpu == this_cpu)
87 add_elf_notes(cpu); 63 continue;
88 first = 0; 64 if (smp_store_status(cpu))
89 continue; 65 continue;
90 }
91 phys_cpu = store_status_next(phys_cpu, this_cpu);
92 if (phys_cpu == -1)
93 break;
94 add_elf_notes(cpu); 66 add_elf_notes(cpu);
95 phys_cpu++;
96 } 67 }
97 /* Copy dump CPU store status info to absolute zero */ 68 /* Copy dump CPU store status info to absolute zero */
98 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); 69 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
@@ -255,5 +226,5 @@ void machine_kexec(struct kimage *image)
255 return; 226 return;
256 tracer_disable(); 227 tracer_disable();
257 smp_send_stop(); 228 smp_send_stop();
258 smp_switch_to_ipl_cpu(__machine_kexec, image); 229 smp_call_ipl_cpu(__machine_kexec, image);
259} 230}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 778c100fe31..9a3edb5f2c9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/setup.c 2 * arch/s390/kernel/setup.c
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright (C) IBM Corp. 1999,2010 5 * Copyright (C) IBM Corp. 1999,2012
6 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * 8 *
@@ -62,6 +62,7 @@
62#include <asm/ebcdic.h> 62#include <asm/ebcdic.h>
63#include <asm/kvm_virtio.h> 63#include <asm/kvm_virtio.h>
64#include <asm/diag.h> 64#include <asm/diag.h>
65#include "entry.h"
65 66
66long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | 67long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
67 PSW_MASK_EA | PSW_MASK_BA; 68 PSW_MASK_EA | PSW_MASK_BA;
@@ -351,8 +352,9 @@ static void setup_addressing_mode(void)
351 } 352 }
352} 353}
353 354
354static void __init 355void *restart_stack __attribute__((__section__(".data")));
355setup_lowcore(void) 356
357static void __init setup_lowcore(void)
356{ 358{
357 struct _lowcore *lc; 359 struct _lowcore *lc;
358 360
@@ -363,7 +365,7 @@ setup_lowcore(void)
363 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 365 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
364 lc->restart_psw.mask = psw_kernel_bits; 366 lc->restart_psw.mask = psw_kernel_bits;
365 lc->restart_psw.addr = 367 lc->restart_psw.addr =
366 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 368 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
367 lc->external_new_psw.mask = psw_kernel_bits | 369 lc->external_new_psw.mask = psw_kernel_bits |
368 PSW_MASK_DAT | PSW_MASK_MCHECK; 370 PSW_MASK_DAT | PSW_MASK_MCHECK;
369 lc->external_new_psw.addr = 371 lc->external_new_psw.addr =
@@ -412,6 +414,24 @@ setup_lowcore(void)
412 lc->last_update_timer = S390_lowcore.last_update_timer; 414 lc->last_update_timer = S390_lowcore.last_update_timer;
413 lc->last_update_clock = S390_lowcore.last_update_clock; 415 lc->last_update_clock = S390_lowcore.last_update_clock;
414 lc->ftrace_func = S390_lowcore.ftrace_func; 416 lc->ftrace_func = S390_lowcore.ftrace_func;
417
418 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
419 restart_stack += ASYNC_SIZE;
420
421 /*
422 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
423 * restart data to the absolute zero lowcore. This is necesary if
424 * PSW restart is done on an offline CPU that has lowcore zero.
425 */
426 lc->restart_stack = (unsigned long) restart_stack;
427 lc->restart_fn = (unsigned long) do_restart;
428 lc->restart_data = 0;
429 lc->restart_source = -1UL;
430 memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
431 4*sizeof(unsigned long));
432 copy_to_absolute_zero(&S390_lowcore.restart_psw,
433 &lc->restart_psw, sizeof(psw_t));
434
415 set_prefix((u32)(unsigned long) lc); 435 set_prefix((u32)(unsigned long) lc);
416 lowcore_ptr[0] = lc; 436 lowcore_ptr[0] = lc;
417} 437}
@@ -572,27 +592,6 @@ static void __init setup_memory_end(void)
572 } 592 }
573} 593}
574 594
575void *restart_stack __attribute__((__section__(".data")));
576
577/*
578 * Setup new PSW and allocate stack for PSW restart interrupt
579 */
580static void __init setup_restart_psw(void)
581{
582 psw_t psw;
583
584 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
585 restart_stack += ASYNC_SIZE;
586
587 /*
588 * Setup restart PSW for absolute zero lowcore. This is necesary
589 * if PSW restart is done on an offline CPU that has lowcore zero
590 */
591 psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
592 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
593 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
594}
595
596static void __init setup_vmcoreinfo(void) 595static void __init setup_vmcoreinfo(void)
597{ 596{
598#ifdef CONFIG_KEXEC 597#ifdef CONFIG_KEXEC
@@ -782,8 +781,7 @@ static void __init reserve_crashkernel(void)
782#endif 781#endif
783} 782}
784 783
785static void __init 784static void __init setup_memory(void)
786setup_memory(void)
787{ 785{
788 unsigned long bootmap_size; 786 unsigned long bootmap_size;
789 unsigned long start_pfn, end_pfn; 787 unsigned long start_pfn, end_pfn;
@@ -1014,8 +1012,7 @@ static void __init setup_hwcaps(void)
1014 * was printed. 1012 * was printed.
1015 */ 1013 */
1016 1014
1017void __init 1015void __init setup_arch(char **cmdline_p)
1018setup_arch(char **cmdline_p)
1019{ 1016{
1020 /* 1017 /*
1021 * print what head.S has found out about the machine 1018 * print what head.S has found out about the machine
@@ -1068,7 +1065,6 @@ setup_arch(char **cmdline_p)
1068 setup_memory(); 1065 setup_memory();
1069 setup_resources(); 1066 setup_resources();
1070 setup_vmcoreinfo(); 1067 setup_vmcoreinfo();
1071 setup_restart_psw();
1072 setup_lowcore(); 1068 setup_lowcore();
1073 1069
1074 cpu_init(); 1070 cpu_init();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2398ce6b15a..6db8526a602 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,23 +1,18 @@
1/* 1/*
2 * arch/s390/kernel/smp.c 2 * SMP related functions
3 * 3 *
4 * Copyright IBM Corp. 1999, 2009 4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens (heiko.carstens@de.ibm.com) 7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * 8 *
9 * based on other smp stuff by 9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar 11 * (c) 1998 Ingo Molnar
12 * 12 *
13 * We work with logical cpu numbering everywhere we can. The only 13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * functions using the real cpu address (got from STAP) are the sigp 14 * the translation of logical to physical cpu ids. All new code that
15 * functions. For all other functions we use the identity mapping. 15 * operates on physical cpu numbers needs to go into smp.c.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
21 */ 16 */
22 17
23#define KMSG_COMPONENT "cpu" 18#define KMSG_COMPONENT "cpu"
@@ -31,140 +26,381 @@
31#include <linux/spinlock.h> 26#include <linux/spinlock.h>
32#include <linux/kernel_stat.h> 27#include <linux/kernel_stat.h>
33#include <linux/delay.h> 28#include <linux/delay.h>
34#include <linux/cache.h>
35#include <linux/interrupt.h> 29#include <linux/interrupt.h>
36#include <linux/irqflags.h> 30#include <linux/irqflags.h>
37#include <linux/cpu.h> 31#include <linux/cpu.h>
38#include <linux/timex.h>
39#include <linux/bootmem.h>
40#include <linux/slab.h> 32#include <linux/slab.h>
41#include <linux/crash_dump.h> 33#include <linux/crash_dump.h>
42#include <asm/asm-offsets.h> 34#include <asm/asm-offsets.h>
43#include <asm/ipl.h> 35#include <asm/ipl.h>
44#include <asm/setup.h> 36#include <asm/setup.h>
45#include <asm/sigp.h>
46#include <asm/pgalloc.h>
47#include <asm/irq.h> 37#include <asm/irq.h>
48#include <asm/cpcmd.h>
49#include <asm/tlbflush.h> 38#include <asm/tlbflush.h>
50#include <asm/timer.h> 39#include <asm/timer.h>
51#include <asm/lowcore.h> 40#include <asm/lowcore.h>
52#include <asm/sclp.h> 41#include <asm/sclp.h>
53#include <asm/cputime.h>
54#include <asm/vdso.h> 42#include <asm/vdso.h>
55#include <asm/cpu.h>
56#include "entry.h" 43#include "entry.h"
57 44
58/* logical cpu to cpu address */ 45enum {
59unsigned short __cpu_logical_map[NR_CPUS]; 46 sigp_sense = 1,
47 sigp_external_call = 2,
48 sigp_emergency_signal = 3,
49 sigp_start = 4,
50 sigp_stop = 5,
51 sigp_restart = 6,
52 sigp_stop_and_store_status = 9,
53 sigp_initial_cpu_reset = 11,
54 sigp_cpu_reset = 12,
55 sigp_set_prefix = 13,
56 sigp_store_status_at_address = 14,
57 sigp_store_extended_status_at_address = 15,
58 sigp_set_architecture = 18,
59 sigp_conditional_emergency_signal = 19,
60 sigp_sense_running = 21,
61};
60 62
61static struct task_struct *current_set[NR_CPUS]; 63enum {
64 sigp_order_code_accepted = 0,
65 sigp_status_stored = 1,
66 sigp_busy = 2,
67 sigp_not_operational = 3,
68};
62 69
63static u8 smp_cpu_type; 70enum {
64static int smp_use_sigp_detection; 71 ec_schedule = 0,
72 ec_call_function,
73 ec_call_function_single,
74 ec_stop_cpu,
75};
65 76
66enum s390_cpu_state { 77enum {
67 CPU_STATE_STANDBY, 78 CPU_STATE_STANDBY,
68 CPU_STATE_CONFIGURED, 79 CPU_STATE_CONFIGURED,
69}; 80};
70 81
82struct pcpu {
83 struct cpu cpu;
84 struct task_struct *idle; /* idle process for the cpu */
85 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
86 unsigned long async_stack; /* async stack for the cpu */
87 unsigned long panic_stack; /* panic stack for the cpu */
88 unsigned long ec_mask; /* bit mask for ec_xxx functions */
89 int state; /* physical cpu state */
90 u32 status; /* last status received via sigp */
91 u16 address; /* physical cpu address */
92};
93
94static u8 boot_cpu_type;
95static u16 boot_cpu_address;
96static struct pcpu pcpu_devices[NR_CPUS];
97
71DEFINE_MUTEX(smp_cpu_state_mutex); 98DEFINE_MUTEX(smp_cpu_state_mutex);
72static int smp_cpu_state[NR_CPUS];
73 99
74static DEFINE_PER_CPU(struct cpu, cpu_devices); 100/*
101 * Signal processor helper functions.
102 */
103static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
104{
105 register unsigned int reg1 asm ("1") = parm;
106 int cc;
75 107
76static void smp_ext_bitcall(int, int); 108 asm volatile(
109 " sigp %1,%2,0(%3)\n"
110 " ipm %0\n"
111 " srl %0,28\n"
112 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
113 if (status && cc == 1)
114 *status = reg1;
115 return cc;
116}
77 117
78static int raw_cpu_stopped(int cpu) 118static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
79{ 119{
80 u32 status; 120 int cc;
81 121
82 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { 122 while (1) {
83 case sigp_status_stored: 123 cc = __pcpu_sigp(addr, order, parm, status);
84 /* Check for stopped and check stop state */ 124 if (cc != sigp_busy)
85 if (status & 0x50) 125 return cc;
86 return 1; 126 cpu_relax();
87 break;
88 default:
89 break;
90 } 127 }
91 return 0;
92} 128}
93 129
94static inline int cpu_stopped(int cpu) 130static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
95{ 131{
96 return raw_cpu_stopped(cpu_logical_map(cpu)); 132 int cc, retry;
133
134 for (retry = 0; ; retry++) {
135 cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
136 if (cc != sigp_busy)
137 break;
138 if (retry >= 3)
139 udelay(10);
140 }
141 return cc;
142}
143
144static inline int pcpu_stopped(struct pcpu *pcpu)
145{
146 if (__pcpu_sigp(pcpu->address, sigp_sense,
147 0, &pcpu->status) != sigp_status_stored)
148 return 0;
149 /* Check for stopped and check stop state */
150 return !!(pcpu->status & 0x50);
151}
152
153static inline int pcpu_running(struct pcpu *pcpu)
154{
155 if (__pcpu_sigp(pcpu->address, sigp_sense_running,
156 0, &pcpu->status) != sigp_status_stored)
157 return 1;
158 /* Check for running status */
159 return !(pcpu->status & 0x400);
97} 160}
98 161
99/* 162/*
100 * Ensure that PSW restart is done on an online CPU 163 * Find struct pcpu by cpu address.
101 */ 164 */
102void smp_restart_with_online_cpu(void) 165static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
103{ 166{
104 int cpu; 167 int cpu;
105 168
106 for_each_online_cpu(cpu) { 169 for_each_cpu(cpu, mask)
107 if (stap() == __cpu_logical_map[cpu]) { 170 if (pcpu_devices[cpu].address == address)
108 /* We are online: Enable DAT again and return */ 171 return pcpu_devices + cpu;
109 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 172 return NULL;
110 return; 173}
111 } 174
175static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
176{
177 int order;
178
179 set_bit(ec_bit, &pcpu->ec_mask);
180 order = pcpu_running(pcpu) ?
181 sigp_external_call : sigp_emergency_signal;
182 pcpu_sigp_retry(pcpu, order, 0);
183}
184
185static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
186{
187 struct _lowcore *lc;
188
189 if (pcpu != &pcpu_devices[0]) {
190 pcpu->lowcore = (struct _lowcore *)
191 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
192 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
193 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
194 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
195 goto out;
112 } 196 }
113 /* We are not online: Do PSW restart on an online CPU */ 197 lc = pcpu->lowcore;
114 while (sigp(cpu, sigp_restart) == sigp_busy) 198 memcpy(lc, &S390_lowcore, 512);
115 cpu_relax(); 199 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
116 /* And stop ourself */ 200 lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
117 while (raw_sigp(stap(), sigp_stop) == sigp_busy) 201 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
118 cpu_relax(); 202 lc->cpu_nr = cpu;
119 for (;;); 203#ifndef CONFIG_64BIT
204 if (MACHINE_HAS_IEEE) {
205 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
206 if (!lc->extended_save_area_addr)
207 goto out;
208 }
209#else
210 if (vdso_alloc_per_cpu(lc))
211 goto out;
212#endif
213 lowcore_ptr[cpu] = lc;
214 pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
215 return 0;
216out:
217 if (pcpu != &pcpu_devices[0]) {
218 free_page(pcpu->panic_stack);
219 free_pages(pcpu->async_stack, ASYNC_ORDER);
220 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
221 }
222 return -ENOMEM;
120} 223}
121 224
122void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) 225static void pcpu_free_lowcore(struct pcpu *pcpu)
123{ 226{
124 struct _lowcore *lc, *current_lc; 227 pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
125 struct stack_frame *sf; 228 lowcore_ptr[pcpu - pcpu_devices] = NULL;
126 struct pt_regs *regs; 229#ifndef CONFIG_64BIT
127 unsigned long sp; 230 if (MACHINE_HAS_IEEE) {
128 231 struct _lowcore *lc = pcpu->lowcore;
129 if (smp_processor_id() == 0) 232
130 func(data); 233 free_page((unsigned long) lc->extended_save_area_addr);
131 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | 234 lc->extended_save_area_addr = 0;
132 PSW_MASK_EA | PSW_MASK_BA); 235 }
133 /* Disable lowcore protection */ 236#else
134 __ctl_clear_bit(0, 28); 237 vdso_free_per_cpu(pcpu->lowcore);
135 current_lc = lowcore_ptr[smp_processor_id()]; 238#endif
136 lc = lowcore_ptr[0]; 239 if (pcpu != &pcpu_devices[0]) {
137 if (!lc) 240 free_page(pcpu->panic_stack);
138 lc = current_lc; 241 free_pages(pcpu->async_stack, ASYNC_ORDER);
139 lc->restart_psw.mask = 242 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
140 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; 243 }
141 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; 244}
142 if (!cpu_online(0)) 245
143 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); 246static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
144 while (sigp(0, sigp_stop_and_store_status) == sigp_busy) 247{
145 cpu_relax(); 248 struct _lowcore *lc = pcpu->lowcore;
146 sp = lc->panic_stack; 249
147 sp -= sizeof(struct pt_regs); 250 atomic_inc(&init_mm.context.attach_count);
148 regs = (struct pt_regs *) sp; 251 lc->cpu_nr = cpu;
149 memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs)); 252 lc->percpu_offset = __per_cpu_offset[cpu];
150 regs->psw = current_lc->psw_save_area; 253 lc->kernel_asce = S390_lowcore.kernel_asce;
151 sp -= STACK_FRAME_OVERHEAD; 254 lc->machine_flags = S390_lowcore.machine_flags;
152 sf = (struct stack_frame *) sp; 255 lc->ftrace_func = S390_lowcore.ftrace_func;
153 sf->back_chain = 0; 256 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
154 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); 257 __ctl_store(lc->cregs_save_area, 0, 15);
258 save_access_regs((unsigned int *) lc->access_regs_save_area);
259 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
260 MAX_FACILITY_BIT/8);
261}
262
263static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
264{
265 struct _lowcore *lc = pcpu->lowcore;
266 struct thread_info *ti = task_thread_info(tsk);
267
268 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
269 lc->thread_info = (unsigned long) task_thread_info(tsk);
270 lc->current_task = (unsigned long) tsk;
271 lc->user_timer = ti->user_timer;
272 lc->system_timer = ti->system_timer;
273 lc->steal_timer = 0;
274}
275
276static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
277{
278 struct _lowcore *lc = pcpu->lowcore;
279
280 lc->restart_stack = lc->kernel_stack;
281 lc->restart_fn = (unsigned long) func;
282 lc->restart_data = (unsigned long) data;
283 lc->restart_source = -1UL;
284 pcpu_sigp_retry(pcpu, sigp_restart, 0);
285}
286
287/*
288 * Call function via PSW restart on pcpu and stop the current cpu.
289 */
290static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
291 void *data, unsigned long stack)
292{
293 struct _lowcore *lc = pcpu->lowcore;
294 unsigned short this_cpu;
295
296 __load_psw_mask(psw_kernel_bits);
297 this_cpu = stap();
298 if (pcpu->address == this_cpu)
299 func(data); /* should not return */
300 /* Stop target cpu (if func returns this stops the current cpu). */
301 pcpu_sigp_retry(pcpu, sigp_stop, 0);
302 /* Restart func on the target cpu and stop the current cpu. */
303 lc->restart_stack = stack;
304 lc->restart_fn = (unsigned long) func;
305 lc->restart_data = (unsigned long) data;
306 lc->restart_source = (unsigned long) this_cpu;
307 asm volatile(
308 "0: sigp 0,%0,6 # sigp restart to target cpu\n"
309 " brc 2,0b # busy, try again\n"
310 "1: sigp 0,%1,5 # sigp stop to current cpu\n"
311 " brc 2,1b # busy, try again\n"
312 : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
313 for (;;) ;
314}
315
316/*
317 * Call function on an online CPU.
318 */
319void smp_call_online_cpu(void (*func)(void *), void *data)
320{
321 struct pcpu *pcpu;
322
323 /* Use the current cpu if it is online. */
324 pcpu = pcpu_find_address(cpu_online_mask, stap());
325 if (!pcpu)
326 /* Use the first online cpu. */
327 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
328 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
329}
330
331/*
332 * Call function on the ipl CPU.
333 */
334void smp_call_ipl_cpu(void (*func)(void *), void *data)
335{
336 pcpu_delegate(&pcpu_devices[0], func, data, pcpu_devices->panic_stack);
337}
338
339int smp_find_processor_id(u16 address)
340{
341 int cpu;
342
343 for_each_present_cpu(cpu)
344 if (pcpu_devices[cpu].address == address)
345 return cpu;
346 return -1;
155} 347}
156 348
157static void smp_stop_cpu(void) 349int smp_vcpu_scheduled(int cpu)
158{ 350{
159 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 351 return pcpu_running(pcpu_devices + cpu);
352}
353
354void smp_yield(void)
355{
356 if (MACHINE_HAS_DIAG44)
357 asm volatile("diag 0,0,0x44");
358}
359
360void smp_yield_cpu(int cpu)
361{
362 if (MACHINE_HAS_DIAG9C)
363 asm volatile("diag %0,0,0x9c"
364 : : "d" (pcpu_devices[cpu].address));
365 else if (MACHINE_HAS_DIAG44)
366 asm volatile("diag 0,0,0x44");
367}
368
369/*
370 * Send cpus emergency shutdown signal. This gives the cpus the
371 * opportunity to complete outstanding interrupts.
372 */
373void smp_emergency_stop(cpumask_t *cpumask)
374{
375 u64 end;
376 int cpu;
377
378 end = get_clock() + (1000000UL << 12);
379 for_each_cpu(cpu, cpumask) {
380 struct pcpu *pcpu = pcpu_devices + cpu;
381 set_bit(ec_stop_cpu, &pcpu->ec_mask);
382 while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
383 0, NULL) == sigp_busy &&
384 get_clock() < end)
385 cpu_relax();
386 }
387 while (get_clock() < end) {
388 for_each_cpu(cpu, cpumask)
389 if (pcpu_stopped(pcpu_devices + cpu))
390 cpumask_clear_cpu(cpu, cpumask);
391 if (cpumask_empty(cpumask))
392 break;
160 cpu_relax(); 393 cpu_relax();
394 }
161} 395}
162 396
397/*
398 * Stop all cpus but the current one.
399 */
163void smp_send_stop(void) 400void smp_send_stop(void)
164{ 401{
165 cpumask_t cpumask; 402 cpumask_t cpumask;
166 int cpu; 403 int cpu;
167 u64 end;
168 404
169 /* Disable all interrupts/machine checks */ 405 /* Disable all interrupts/machine checks */
170 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 406 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
@@ -173,56 +409,46 @@ void smp_send_stop(void)
173 cpumask_copy(&cpumask, cpu_online_mask); 409 cpumask_copy(&cpumask, cpu_online_mask);
174 cpumask_clear_cpu(smp_processor_id(), &cpumask); 410 cpumask_clear_cpu(smp_processor_id(), &cpumask);
175 411
176 if (oops_in_progress) { 412 if (oops_in_progress)
177 /* 413 smp_emergency_stop(&cpumask);
178 * Give the other cpus the opportunity to complete
179 * outstanding interrupts before stopping them.
180 */
181 end = get_clock() + (1000000UL << 12);
182 for_each_cpu(cpu, &cpumask) {
183 set_bit(ec_stop_cpu, (unsigned long *)
184 &lowcore_ptr[cpu]->ext_call_fast);
185 while (sigp(cpu, sigp_emergency_signal) == sigp_busy &&
186 get_clock() < end)
187 cpu_relax();
188 }
189 while (get_clock() < end) {
190 for_each_cpu(cpu, &cpumask)
191 if (cpu_stopped(cpu))
192 cpumask_clear_cpu(cpu, &cpumask);
193 if (cpumask_empty(&cpumask))
194 break;
195 cpu_relax();
196 }
197 }
198 414
199 /* stop all processors */ 415 /* stop all processors */
200 for_each_cpu(cpu, &cpumask) { 416 for_each_cpu(cpu, &cpumask) {
201 while (sigp(cpu, sigp_stop) == sigp_busy) 417 struct pcpu *pcpu = pcpu_devices + cpu;
202 cpu_relax(); 418 pcpu_sigp_retry(pcpu, sigp_stop, 0);
203 while (!cpu_stopped(cpu)) 419 while (!pcpu_stopped(pcpu))
204 cpu_relax(); 420 cpu_relax();
205 } 421 }
206} 422}
207 423
208/* 424/*
425 * Stop the current cpu.
426 */
427void smp_stop_cpu(void)
428{
429 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
430 for (;;) ;
431}
432
433/*
209 * This is the main routine where commands issued by other 434 * This is the main routine where commands issued by other
210 * cpus are handled. 435 * cpus are handled.
211 */ 436 */
212
213static void do_ext_call_interrupt(unsigned int ext_int_code, 437static void do_ext_call_interrupt(unsigned int ext_int_code,
214 unsigned int param32, unsigned long param64) 438 unsigned int param32, unsigned long param64)
215{ 439{
216 unsigned long bits; 440 unsigned long bits;
441 int cpu;
217 442
443 cpu = smp_processor_id();
218 if ((ext_int_code & 0xffff) == 0x1202) 444 if ((ext_int_code & 0xffff) == 0x1202)
219 kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; 445 kstat_cpu(cpu).irqs[EXTINT_EXC]++;
220 else 446 else
221 kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; 447 kstat_cpu(cpu).irqs[EXTINT_EMS]++;
222 /* 448 /*
223 * handle bit signal external calls 449 * handle bit signal external calls
224 */ 450 */
225 bits = xchg(&S390_lowcore.ext_call_fast, 0); 451 bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
226 452
227 if (test_bit(ec_stop_cpu, &bits)) 453 if (test_bit(ec_stop_cpu, &bits))
228 smp_stop_cpu(); 454 smp_stop_cpu();
@@ -238,38 +464,17 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
238 464
239} 465}
240 466
241/*
242 * Send an external call sigp to another cpu and return without waiting
243 * for its completion.
244 */
245static void smp_ext_bitcall(int cpu, int sig)
246{
247 int order;
248
249 /*
250 * Set signaling bit in lowcore of target cpu and kick it
251 */
252 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
253 while (1) {
254 order = smp_vcpu_scheduled(cpu) ?
255 sigp_external_call : sigp_emergency_signal;
256 if (sigp(cpu, order) != sigp_busy)
257 break;
258 udelay(10);
259 }
260}
261
262void arch_send_call_function_ipi_mask(const struct cpumask *mask) 467void arch_send_call_function_ipi_mask(const struct cpumask *mask)
263{ 468{
264 int cpu; 469 int cpu;
265 470
266 for_each_cpu(cpu, mask) 471 for_each_cpu(cpu, mask)
267 smp_ext_bitcall(cpu, ec_call_function); 472 pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
268} 473}
269 474
270void arch_send_call_function_single_ipi(int cpu) 475void arch_send_call_function_single_ipi(int cpu)
271{ 476{
272 smp_ext_bitcall(cpu, ec_call_function_single); 477 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
273} 478}
274 479
275#ifndef CONFIG_64BIT 480#ifndef CONFIG_64BIT
@@ -295,15 +500,16 @@ EXPORT_SYMBOL(smp_ptlb_all);
295 */ 500 */
296void smp_send_reschedule(int cpu) 501void smp_send_reschedule(int cpu)
297{ 502{
298 smp_ext_bitcall(cpu, ec_schedule); 503 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
299} 504}
300 505
301/* 506/*
302 * parameter area for the set/clear control bit callbacks 507 * parameter area for the set/clear control bit callbacks
303 */ 508 */
304struct ec_creg_mask_parms { 509struct ec_creg_mask_parms {
305 unsigned long orvals[16]; 510 unsigned long orval;
306 unsigned long andvals[16]; 511 unsigned long andval;
512 int cr;
307}; 513};
308 514
309/* 515/*
@@ -313,11 +519,9 @@ static void smp_ctl_bit_callback(void *info)
313{ 519{
314 struct ec_creg_mask_parms *pp = info; 520 struct ec_creg_mask_parms *pp = info;
315 unsigned long cregs[16]; 521 unsigned long cregs[16];
316 int i;
317 522
318 __ctl_store(cregs, 0, 15); 523 __ctl_store(cregs, 0, 15);
319 for (i = 0; i <= 15; i++) 524 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
320 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
321 __ctl_load(cregs, 0, 15); 525 __ctl_load(cregs, 0, 15);
322} 526}
323 527
@@ -326,11 +530,8 @@ static void smp_ctl_bit_callback(void *info)
326 */ 530 */
327void smp_ctl_set_bit(int cr, int bit) 531void smp_ctl_set_bit(int cr, int bit)
328{ 532{
329 struct ec_creg_mask_parms parms; 533 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
330 534
331 memset(&parms.orvals, 0, sizeof(parms.orvals));
332 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
333 parms.orvals[cr] = 1UL << bit;
334 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 535 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
335} 536}
336EXPORT_SYMBOL(smp_ctl_set_bit); 537EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -340,216 +541,175 @@ EXPORT_SYMBOL(smp_ctl_set_bit);
340 */ 541 */
341void smp_ctl_clear_bit(int cr, int bit) 542void smp_ctl_clear_bit(int cr, int bit)
342{ 543{
343 struct ec_creg_mask_parms parms; 544 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
344 545
345 memset(&parms.orvals, 0, sizeof(parms.orvals));
346 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
347 parms.andvals[cr] = ~(1UL << bit);
348 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 546 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
349} 547}
350EXPORT_SYMBOL(smp_ctl_clear_bit); 548EXPORT_SYMBOL(smp_ctl_clear_bit);
351 549
352#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) 550#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
353 551
354static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 552struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
553EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
554
555static void __init smp_get_save_area(int cpu, u16 address)
355{ 556{
356 if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) 557 void *lc = pcpu_devices[0].lowcore;
357 return; 558 struct save_area *save_area;
559
358 if (is_kdump_kernel()) 560 if (is_kdump_kernel())
359 return; 561 return;
562 if (!OLDMEM_BASE && (address == boot_cpu_address ||
563 ipl_info.type != IPL_TYPE_FCP_DUMP))
564 return;
360 if (cpu >= NR_CPUS) { 565 if (cpu >= NR_CPUS) {
361 pr_warning("CPU %i exceeds the maximum %i and is excluded from " 566 pr_warning("CPU %i exceeds the maximum %i and is excluded "
362 "the dump\n", cpu, NR_CPUS - 1); 567 "from the dump\n", cpu, NR_CPUS - 1);
363 return; 568 return;
364 } 569 }
365 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); 570 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
366 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) 571 if (!save_area)
367 cpu_relax(); 572 panic("could not allocate memory for save area\n");
368 memcpy_real(zfcpdump_save_areas[cpu], 573 zfcpdump_save_areas[cpu] = save_area;
369 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 574#ifdef CONFIG_CRASH_DUMP
370 sizeof(struct save_area)); 575 if (address == boot_cpu_address) {
576 /* Copy the registers of the boot cpu. */
577 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
578 SAVE_AREA_BASE - PAGE_SIZE, 0);
579 return;
580 }
581#endif
582 /* Get the registers of a non-boot cpu. */
583 __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
584 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
371} 585}
372 586
373struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 587int smp_store_status(int cpu)
374EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
375
376#else
377
378static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
379
380#endif /* CONFIG_ZFCPDUMP */
381
382static int cpu_known(int cpu_id)
383{ 588{
384 int cpu; 589 struct pcpu *pcpu;
385 590
386 for_each_present_cpu(cpu) { 591 pcpu = pcpu_devices + cpu;
387 if (__cpu_logical_map[cpu] == cpu_id) 592 if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
388 return 1; 593 0, NULL) != sigp_order_code_accepted)
389 } 594 return -EIO;
390 return 0; 595 return 0;
391} 596}
392 597
393static int smp_rescan_cpus_sigp(cpumask_t avail) 598#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
394{
395 int cpu_id, logical_cpu;
396 599
397 logical_cpu = cpumask_first(&avail); 600static inline void smp_get_save_area(int cpu, u16 address) { }
398 if (logical_cpu >= nr_cpu_ids) 601
399 return 0; 602#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
400 for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
401 if (cpu_known(cpu_id))
402 continue;
403 __cpu_logical_map[logical_cpu] = cpu_id;
404 cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
405 if (!cpu_stopped(logical_cpu))
406 continue;
407 set_cpu_present(logical_cpu, true);
408 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
409 logical_cpu = cpumask_next(logical_cpu, &avail);
410 if (logical_cpu >= nr_cpu_ids)
411 break;
412 }
413 return 0;
414}
415 603
416static int smp_rescan_cpus_sclp(cpumask_t avail) 604static struct sclp_cpu_info *smp_get_cpu_info(void)
417{ 605{
606 static int use_sigp_detection;
418 struct sclp_cpu_info *info; 607 struct sclp_cpu_info *info;
419 int cpu_id, logical_cpu, cpu; 608 int address;
420 int rc; 609
421 610 info = kzalloc(sizeof(*info), GFP_KERNEL);
422 logical_cpu = cpumask_first(&avail); 611 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
423 if (logical_cpu >= nr_cpu_ids) 612 use_sigp_detection = 1;
424 return 0; 613 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
425 info = kmalloc(sizeof(*info), GFP_KERNEL); 614 if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
426 if (!info) 615 sigp_not_operational)
427 return -ENOMEM; 616 continue;
428 rc = sclp_get_cpu_info(info); 617 info->cpu[info->configured].address = address;
429 if (rc) 618 info->configured++;
430 goto out; 619 }
431 for (cpu = 0; cpu < info->combined; cpu++) { 620 info->combined = info->configured;
432 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
433 continue;
434 cpu_id = info->cpu[cpu].address;
435 if (cpu_known(cpu_id))
436 continue;
437 __cpu_logical_map[logical_cpu] = cpu_id;
438 cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
439 set_cpu_present(logical_cpu, true);
440 if (cpu >= info->configured)
441 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
442 else
443 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
444 logical_cpu = cpumask_next(logical_cpu, &avail);
445 if (logical_cpu >= nr_cpu_ids)
446 break;
447 } 621 }
448out: 622 return info;
449 kfree(info);
450 return rc;
451} 623}
452 624
453static int __smp_rescan_cpus(void) 625static int __devinit smp_add_present_cpu(int cpu);
626
627static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
628 int sysfs_add)
454{ 629{
630 struct pcpu *pcpu;
455 cpumask_t avail; 631 cpumask_t avail;
632 int cpu, nr, i;
456 633
634 nr = 0;
457 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 635 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
458 if (smp_use_sigp_detection) 636 cpu = cpumask_first(&avail);
459 return smp_rescan_cpus_sigp(avail); 637 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
460 else 638 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
461 return smp_rescan_cpus_sclp(avail); 639 continue;
640 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
641 continue;
642 pcpu = pcpu_devices + cpu;
643 pcpu->address = info->cpu[i].address;
644 pcpu->state = (cpu >= info->configured) ?
645 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
646 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
647 set_cpu_present(cpu, true);
648 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
649 set_cpu_present(cpu, false);
650 else
651 nr++;
652 cpu = cpumask_next(cpu, &avail);
653 }
654 return nr;
462} 655}
463 656
464static void __init smp_detect_cpus(void) 657static void __init smp_detect_cpus(void)
465{ 658{
466 unsigned int cpu, c_cpus, s_cpus; 659 unsigned int cpu, c_cpus, s_cpus;
467 struct sclp_cpu_info *info; 660 struct sclp_cpu_info *info;
468 u16 boot_cpu_addr, cpu_addr;
469 661
470 c_cpus = 1; 662 info = smp_get_cpu_info();
471 s_cpus = 0;
472 boot_cpu_addr = __cpu_logical_map[0];
473 info = kmalloc(sizeof(*info), GFP_KERNEL);
474 if (!info) 663 if (!info)
475 panic("smp_detect_cpus failed to allocate memory\n"); 664 panic("smp_detect_cpus failed to allocate memory\n");
476#ifdef CONFIG_CRASH_DUMP
477 if (OLDMEM_BASE && !is_kdump_kernel()) {
478 struct save_area *save_area;
479
480 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
481 if (!save_area)
482 panic("could not allocate memory for save area\n");
483 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
484 0x200, 0);
485 zfcpdump_save_areas[0] = save_area;
486 }
487#endif
488 /* Use sigp detection algorithm if sclp doesn't work. */
489 if (sclp_get_cpu_info(info)) {
490 smp_use_sigp_detection = 1;
491 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
492 if (cpu == boot_cpu_addr)
493 continue;
494 if (!raw_cpu_stopped(cpu))
495 continue;
496 smp_get_save_area(c_cpus, cpu);
497 c_cpus++;
498 }
499 goto out;
500 }
501
502 if (info->has_cpu_type) { 665 if (info->has_cpu_type) {
503 for (cpu = 0; cpu < info->combined; cpu++) { 666 for (cpu = 0; cpu < info->combined; cpu++) {
504 if (info->cpu[cpu].address == boot_cpu_addr) { 667 if (info->cpu[cpu].address != boot_cpu_address)
505 smp_cpu_type = info->cpu[cpu].type; 668 continue;
506 break; 669 /* The boot cpu dictates the cpu type. */
507 } 670 boot_cpu_type = info->cpu[cpu].type;
671 break;
508 } 672 }
509 } 673 }
510 674 c_cpus = s_cpus = 0;
511 for (cpu = 0; cpu < info->combined; cpu++) { 675 for (cpu = 0; cpu < info->combined; cpu++) {
512 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) 676 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
513 continue; 677 continue;
514 cpu_addr = info->cpu[cpu].address; 678 if (cpu < info->configured) {
515 if (cpu_addr == boot_cpu_addr) 679 smp_get_save_area(c_cpus, info->cpu[cpu].address);
516 continue; 680 c_cpus++;
517 if (!raw_cpu_stopped(cpu_addr)) { 681 } else
518 s_cpus++; 682 s_cpus++;
519 continue;
520 }
521 smp_get_save_area(c_cpus, cpu_addr);
522 c_cpus++;
523 } 683 }
524out:
525 kfree(info);
526 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 684 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
527 get_online_cpus(); 685 get_online_cpus();
528 __smp_rescan_cpus(); 686 __smp_rescan_cpus(info, 0);
529 put_online_cpus(); 687 put_online_cpus();
688 kfree(info);
530} 689}
531 690
532/* 691/*
533 * Activate a secondary processor. 692 * Activate a secondary processor.
534 */ 693 */
535int __cpuinit start_secondary(void *cpuvoid) 694static void __cpuinit smp_start_secondary(void *cpuvoid)
536{ 695{
696 S390_lowcore.last_update_clock = get_clock();
697 S390_lowcore.restart_stack = (unsigned long) restart_stack;
698 S390_lowcore.restart_fn = (unsigned long) do_restart;
699 S390_lowcore.restart_data = 0;
700 S390_lowcore.restart_source = -1UL;
701 restore_access_regs(S390_lowcore.access_regs_save_area);
702 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
703 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
537 cpu_init(); 704 cpu_init();
538 preempt_disable(); 705 preempt_disable();
539 init_cpu_timer(); 706 init_cpu_timer();
540 init_cpu_vtimer(); 707 init_cpu_vtimer();
541 pfault_init(); 708 pfault_init();
542
543 notify_cpu_starting(smp_processor_id()); 709 notify_cpu_starting(smp_processor_id());
544 ipi_call_lock(); 710 ipi_call_lock();
545 set_cpu_online(smp_processor_id(), true); 711 set_cpu_online(smp_processor_id(), true);
546 ipi_call_unlock(); 712 ipi_call_unlock();
547 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
548 S390_lowcore.restart_psw.mask =
549 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
550 S390_lowcore.restart_psw.addr =
551 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
552 __ctl_set_bit(0, 28); /* Enable lowcore protection */
553 /* 713 /*
554 * Wait until the cpu which brought this one up marked it 714 * Wait until the cpu which brought this one up marked it
555 * active before enabling interrupts. 715 * active before enabling interrupts.
@@ -559,7 +719,6 @@ int __cpuinit start_secondary(void *cpuvoid)
559 local_irq_enable(); 719 local_irq_enable();
560 /* cpu_idle will call schedule for us */ 720 /* cpu_idle will call schedule for us */
561 cpu_idle(); 721 cpu_idle();
562 return 0;
563} 722}
564 723
565struct create_idle { 724struct create_idle {
@@ -578,82 +737,20 @@ static void __cpuinit smp_fork_idle(struct work_struct *work)
578 complete(&c_idle->done); 737 complete(&c_idle->done);
579} 738}
580 739
581static int __cpuinit smp_alloc_lowcore(int cpu)
582{
583 unsigned long async_stack, panic_stack;
584 struct _lowcore *lowcore;
585
586 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
587 if (!lowcore)
588 return -ENOMEM;
589 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
590 panic_stack = __get_free_page(GFP_KERNEL);
591 if (!panic_stack || !async_stack)
592 goto out;
593 memcpy(lowcore, &S390_lowcore, 512);
594 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
595 lowcore->async_stack = async_stack + ASYNC_SIZE;
596 lowcore->panic_stack = panic_stack + PAGE_SIZE;
597 lowcore->restart_psw.mask =
598 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
599 lowcore->restart_psw.addr =
600 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
601 if (user_mode != HOME_SPACE_MODE)
602 lowcore->restart_psw.mask |= PSW_ASC_HOME;
603#ifndef CONFIG_64BIT
604 if (MACHINE_HAS_IEEE) {
605 unsigned long save_area;
606
607 save_area = get_zeroed_page(GFP_KERNEL);
608 if (!save_area)
609 goto out;
610 lowcore->extended_save_area_addr = (u32) save_area;
611 }
612#else
613 if (vdso_alloc_per_cpu(cpu, lowcore))
614 goto out;
615#endif
616 lowcore_ptr[cpu] = lowcore;
617 return 0;
618
619out:
620 free_page(panic_stack);
621 free_pages(async_stack, ASYNC_ORDER);
622 free_pages((unsigned long) lowcore, LC_ORDER);
623 return -ENOMEM;
624}
625
626static void smp_free_lowcore(int cpu)
627{
628 struct _lowcore *lowcore;
629
630 lowcore = lowcore_ptr[cpu];
631#ifndef CONFIG_64BIT
632 if (MACHINE_HAS_IEEE)
633 free_page((unsigned long) lowcore->extended_save_area_addr);
634#else
635 vdso_free_per_cpu(cpu, lowcore);
636#endif
637 free_page(lowcore->panic_stack - PAGE_SIZE);
638 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
639 free_pages((unsigned long) lowcore, LC_ORDER);
640 lowcore_ptr[cpu] = NULL;
641}
642
643/* Upping and downing of CPUs */ 740/* Upping and downing of CPUs */
644int __cpuinit __cpu_up(unsigned int cpu) 741int __cpuinit __cpu_up(unsigned int cpu)
645{ 742{
646 struct _lowcore *cpu_lowcore;
647 struct create_idle c_idle; 743 struct create_idle c_idle;
648 struct task_struct *idle; 744 struct pcpu *pcpu;
649 struct stack_frame *sf; 745 int rc;
650 u32 lowcore;
651 int ccode;
652 746
653 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 747 pcpu = pcpu_devices + cpu;
748 if (pcpu->state != CPU_STATE_CONFIGURED)
654 return -EIO; 749 return -EIO;
655 idle = current_set[cpu]; 750 if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
656 if (!idle) { 751 sigp_order_code_accepted)
752 return -EIO;
753 if (!pcpu->idle) {
657 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); 754 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
658 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); 755 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
659 c_idle.cpu = cpu; 756 c_idle.cpu = cpu;
@@ -661,68 +758,28 @@ int __cpuinit __cpu_up(unsigned int cpu)
661 wait_for_completion(&c_idle.done); 758 wait_for_completion(&c_idle.done);
662 if (IS_ERR(c_idle.idle)) 759 if (IS_ERR(c_idle.idle))
663 return PTR_ERR(c_idle.idle); 760 return PTR_ERR(c_idle.idle);
664 idle = c_idle.idle; 761 pcpu->idle = c_idle.idle;
665 current_set[cpu] = c_idle.idle;
666 } 762 }
667 init_idle(idle, cpu); 763 init_idle(pcpu->idle, cpu);
668 if (smp_alloc_lowcore(cpu)) 764 rc = pcpu_alloc_lowcore(pcpu, cpu);
669 return -ENOMEM; 765 if (rc)
670 do { 766 return rc;
671 ccode = sigp(cpu, sigp_initial_cpu_reset); 767 pcpu_prepare_secondary(pcpu, cpu);
672 if (ccode == sigp_busy) 768 pcpu_attach_task(pcpu, pcpu->idle);
673 udelay(10); 769 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
674 if (ccode == sigp_not_operational)
675 goto err_out;
676 } while (ccode == sigp_busy);
677
678 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
679 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
680 udelay(10);
681
682 cpu_lowcore = lowcore_ptr[cpu];
683 cpu_lowcore->kernel_stack = (unsigned long)
684 task_stack_page(idle) + THREAD_SIZE;
685 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
686 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
687 - sizeof(struct pt_regs)
688 - sizeof(struct stack_frame));
689 memset(sf, 0, sizeof(struct stack_frame));
690 sf->gprs[9] = (unsigned long) sf;
691 cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf;
692 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
693 atomic_inc(&init_mm.context.attach_count);
694 asm volatile(
695 " stam 0,15,0(%0)"
696 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
697 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
698 cpu_lowcore->current_task = (unsigned long) idle;
699 cpu_lowcore->cpu_nr = cpu;
700 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
701 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
702 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
703 memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
704 MAX_FACILITY_BIT/8);
705 eieio();
706
707 while (sigp(cpu, sigp_restart) == sigp_busy)
708 udelay(10);
709
710 while (!cpu_online(cpu)) 770 while (!cpu_online(cpu))
711 cpu_relax(); 771 cpu_relax();
712 return 0; 772 return 0;
713
714err_out:
715 smp_free_lowcore(cpu);
716 return -EIO;
717} 773}
718 774
719static int __init setup_possible_cpus(char *s) 775static int __init setup_possible_cpus(char *s)
720{ 776{
721 int pcpus, cpu; 777 int max, cpu;
722 778
723 pcpus = simple_strtoul(s, NULL, 0); 779 if (kstrtoint(s, 0, &max) < 0)
780 return 0;
724 init_cpu_possible(cpumask_of(0)); 781 init_cpu_possible(cpumask_of(0));
725 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) 782 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
726 set_cpu_possible(cpu, true); 783 set_cpu_possible(cpu, true);
727 return 0; 784 return 0;
728} 785}
@@ -732,113 +789,67 @@ early_param("possible_cpus", setup_possible_cpus);
732 789
733int __cpu_disable(void) 790int __cpu_disable(void)
734{ 791{
735 struct ec_creg_mask_parms cr_parms; 792 unsigned long cregs[16];
736 int cpu = smp_processor_id();
737
738 set_cpu_online(cpu, false);
739 793
740 /* Disable pfault pseudo page faults on this cpu. */ 794 set_cpu_online(smp_processor_id(), false);
795 /* Disable pseudo page faults on this cpu. */
741 pfault_fini(); 796 pfault_fini();
742 797 /* Disable interrupt sources via control register. */
743 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 798 __ctl_store(cregs, 0, 15);
744 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 799 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
745 800 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
746 /* disable all external interrupts */ 801 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
747 cr_parms.orvals[0] = 0; 802 __ctl_load(cregs, 0, 15);
748 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
749 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
750 1 << 4);
751 /* disable all I/O interrupts */
752 cr_parms.orvals[6] = 0;
753 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
754 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
755 /* disable most machine checks */
756 cr_parms.orvals[14] = 0;
757 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
758 1 << 25 | 1 << 24);
759
760 smp_ctl_bit_callback(&cr_parms);
761
762 return 0; 803 return 0;
763} 804}
764 805
765void __cpu_die(unsigned int cpu) 806void __cpu_die(unsigned int cpu)
766{ 807{
808 struct pcpu *pcpu;
809
767 /* Wait until target cpu is down */ 810 /* Wait until target cpu is down */
768 while (!cpu_stopped(cpu)) 811 pcpu = pcpu_devices + cpu;
812 while (!pcpu_stopped(pcpu))
769 cpu_relax(); 813 cpu_relax();
770 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) 814 pcpu_free_lowcore(pcpu);
771 udelay(10);
772 smp_free_lowcore(cpu);
773 atomic_dec(&init_mm.context.attach_count); 815 atomic_dec(&init_mm.context.attach_count);
774} 816}
775 817
776void __noreturn cpu_die(void) 818void __noreturn cpu_die(void)
777{ 819{
778 idle_task_exit(); 820 idle_task_exit();
779 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 821 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
780 cpu_relax(); 822 for (;;) ;
781 for (;;);
782} 823}
783 824
784#endif /* CONFIG_HOTPLUG_CPU */ 825#endif /* CONFIG_HOTPLUG_CPU */
785 826
786void __init smp_prepare_cpus(unsigned int max_cpus) 827void __init smp_prepare_cpus(unsigned int max_cpus)
787{ 828{
788#ifndef CONFIG_64BIT
789 unsigned long save_area = 0;
790#endif
791 unsigned long async_stack, panic_stack;
792 struct _lowcore *lowcore;
793
794 smp_detect_cpus();
795
796 /* request the 0x1201 emergency signal external interrupt */ 829 /* request the 0x1201 emergency signal external interrupt */
797 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 830 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
798 panic("Couldn't request external interrupt 0x1201"); 831 panic("Couldn't request external interrupt 0x1201");
799 /* request the 0x1202 external call external interrupt */ 832 /* request the 0x1202 external call external interrupt */
800 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) 833 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
801 panic("Couldn't request external interrupt 0x1202"); 834 panic("Couldn't request external interrupt 0x1202");
802 835 smp_detect_cpus();
803 /* Reallocate current lowcore, but keep its contents. */
804 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
805 panic_stack = __get_free_page(GFP_KERNEL);
806 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
807 BUG_ON(!lowcore || !panic_stack || !async_stack);
808#ifndef CONFIG_64BIT
809 if (MACHINE_HAS_IEEE)
810 save_area = get_zeroed_page(GFP_KERNEL);
811#endif
812 local_irq_disable();
813 local_mcck_disable();
814 lowcore_ptr[smp_processor_id()] = lowcore;
815 *lowcore = S390_lowcore;
816 lowcore->panic_stack = panic_stack + PAGE_SIZE;
817 lowcore->async_stack = async_stack + ASYNC_SIZE;
818#ifndef CONFIG_64BIT
819 if (MACHINE_HAS_IEEE)
820 lowcore->extended_save_area_addr = (u32) save_area;
821#endif
822 set_prefix((u32)(unsigned long) lowcore);
823 local_mcck_enable();
824 local_irq_enable();
825#ifdef CONFIG_64BIT
826 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
827 BUG();
828#endif
829} 836}
830 837
831void __init smp_prepare_boot_cpu(void) 838void __init smp_prepare_boot_cpu(void)
832{ 839{
833 BUG_ON(smp_processor_id() != 0); 840 struct pcpu *pcpu = pcpu_devices;
834 841
835 current_thread_info()->cpu = 0; 842 boot_cpu_address = stap();
836 set_cpu_present(0, true); 843 pcpu->idle = current;
837 set_cpu_online(0, true); 844 pcpu->state = CPU_STATE_CONFIGURED;
845 pcpu->address = boot_cpu_address;
846 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
847 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
848 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
838 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 849 S390_lowcore.percpu_offset = __per_cpu_offset[0];
839 current_set[0] = current;
840 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
841 cpu_set_polarization(0, POLARIZATION_UNKNOWN); 850 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
851 set_cpu_present(0, true);
852 set_cpu_online(0, true);
842} 853}
843 854
844void __init smp_cpus_done(unsigned int max_cpus) 855void __init smp_cpus_done(unsigned int max_cpus)
@@ -848,7 +859,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
848void __init smp_setup_processor_id(void) 859void __init smp_setup_processor_id(void)
849{ 860{
850 S390_lowcore.cpu_nr = 0; 861 S390_lowcore.cpu_nr = 0;
851 __cpu_logical_map[0] = stap();
852} 862}
853 863
854/* 864/*
@@ -864,56 +874,57 @@ int setup_profiling_timer(unsigned int multiplier)
864 874
865#ifdef CONFIG_HOTPLUG_CPU 875#ifdef CONFIG_HOTPLUG_CPU
866static ssize_t cpu_configure_show(struct device *dev, 876static ssize_t cpu_configure_show(struct device *dev,
867 struct device_attribute *attr, char *buf) 877 struct device_attribute *attr, char *buf)
868{ 878{
869 ssize_t count; 879 ssize_t count;
870 880
871 mutex_lock(&smp_cpu_state_mutex); 881 mutex_lock(&smp_cpu_state_mutex);
872 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); 882 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
873 mutex_unlock(&smp_cpu_state_mutex); 883 mutex_unlock(&smp_cpu_state_mutex);
874 return count; 884 return count;
875} 885}
876 886
877static ssize_t cpu_configure_store(struct device *dev, 887static ssize_t cpu_configure_store(struct device *dev,
878 struct device_attribute *attr, 888 struct device_attribute *attr,
879 const char *buf, size_t count) 889 const char *buf, size_t count)
880{ 890{
881 int cpu = dev->id; 891 struct pcpu *pcpu;
882 int val, rc; 892 int cpu, val, rc;
883 char delim; 893 char delim;
884 894
885 if (sscanf(buf, "%d %c", &val, &delim) != 1) 895 if (sscanf(buf, "%d %c", &val, &delim) != 1)
886 return -EINVAL; 896 return -EINVAL;
887 if (val != 0 && val != 1) 897 if (val != 0 && val != 1)
888 return -EINVAL; 898 return -EINVAL;
889
890 get_online_cpus(); 899 get_online_cpus();
891 mutex_lock(&smp_cpu_state_mutex); 900 mutex_lock(&smp_cpu_state_mutex);
892 rc = -EBUSY; 901 rc = -EBUSY;
893 /* disallow configuration changes of online cpus and cpu 0 */ 902 /* disallow configuration changes of online cpus and cpu 0 */
903 cpu = dev->id;
894 if (cpu_online(cpu) || cpu == 0) 904 if (cpu_online(cpu) || cpu == 0)
895 goto out; 905 goto out;
906 pcpu = pcpu_devices + cpu;
896 rc = 0; 907 rc = 0;
897 switch (val) { 908 switch (val) {
898 case 0: 909 case 0:
899 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { 910 if (pcpu->state != CPU_STATE_CONFIGURED)
900 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); 911 break;
901 if (!rc) { 912 rc = sclp_cpu_deconfigure(pcpu->address);
902 smp_cpu_state[cpu] = CPU_STATE_STANDBY; 913 if (rc)
903 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 914 break;
904 topology_expect_change(); 915 pcpu->state = CPU_STATE_STANDBY;
905 } 916 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
906 } 917 topology_expect_change();
907 break; 918 break;
908 case 1: 919 case 1:
909 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { 920 if (pcpu->state != CPU_STATE_STANDBY)
910 rc = sclp_cpu_configure(__cpu_logical_map[cpu]); 921 break;
911 if (!rc) { 922 rc = sclp_cpu_configure(pcpu->address);
912 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; 923 if (rc)
913 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 924 break;
914 topology_expect_change(); 925 pcpu->state = CPU_STATE_CONFIGURED;
915 } 926 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
916 } 927 topology_expect_change();
917 break; 928 break;
918 default: 929 default:
919 break; 930 break;
@@ -929,7 +940,7 @@ static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
929static ssize_t show_cpu_address(struct device *dev, 940static ssize_t show_cpu_address(struct device *dev,
930 struct device_attribute *attr, char *buf) 941 struct device_attribute *attr, char *buf)
931{ 942{
932 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); 943 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
933} 944}
934static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 945static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
935 946
@@ -1021,7 +1032,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
1021 unsigned long action, void *hcpu) 1032 unsigned long action, void *hcpu)
1022{ 1033{
1023 unsigned int cpu = (unsigned int)(long)hcpu; 1034 unsigned int cpu = (unsigned int)(long)hcpu;
1024 struct cpu *c = &per_cpu(cpu_devices, cpu); 1035 struct cpu *c = &pcpu_devices[cpu].cpu;
1025 struct device *s = &c->dev; 1036 struct device *s = &c->dev;
1026 struct s390_idle_data *idle; 1037 struct s390_idle_data *idle;
1027 int err = 0; 1038 int err = 0;
@@ -1047,7 +1058,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
1047 1058
1048static int __devinit smp_add_present_cpu(int cpu) 1059static int __devinit smp_add_present_cpu(int cpu)
1049{ 1060{
1050 struct cpu *c = &per_cpu(cpu_devices, cpu); 1061 struct cpu *c = &pcpu_devices[cpu].cpu;
1051 struct device *s = &c->dev; 1062 struct device *s = &c->dev;
1052 int rc; 1063 int rc;
1053 1064
@@ -1085,29 +1096,21 @@ out:
1085 1096
1086int __ref smp_rescan_cpus(void) 1097int __ref smp_rescan_cpus(void)
1087{ 1098{
1088 cpumask_t newcpus; 1099 struct sclp_cpu_info *info;
1089 int cpu; 1100 int nr;
1090 int rc;
1091 1101
1102 info = smp_get_cpu_info();
1103 if (!info)
1104 return -ENOMEM;
1092 get_online_cpus(); 1105 get_online_cpus();
1093 mutex_lock(&smp_cpu_state_mutex); 1106 mutex_lock(&smp_cpu_state_mutex);
1094 cpumask_copy(&newcpus, cpu_present_mask); 1107 nr = __smp_rescan_cpus(info, 1);
1095 rc = __smp_rescan_cpus();
1096 if (rc)
1097 goto out;
1098 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1099 for_each_cpu(cpu, &newcpus) {
1100 rc = smp_add_present_cpu(cpu);
1101 if (rc)
1102 set_cpu_present(cpu, false);
1103 }
1104 rc = 0;
1105out:
1106 mutex_unlock(&smp_cpu_state_mutex); 1108 mutex_unlock(&smp_cpu_state_mutex);
1107 put_online_cpus(); 1109 put_online_cpus();
1108 if (!cpumask_empty(&newcpus)) 1110 kfree(info);
1111 if (nr)
1109 topology_schedule_update(); 1112 topology_schedule_update();
1110 return rc; 1113 return 0;
1111} 1114}
1112 1115
1113static ssize_t __ref rescan_store(struct device *dev, 1116static ssize_t __ref rescan_store(struct device *dev,
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
deleted file mode 100644
index bfe070bc765..00000000000
--- a/arch/s390/kernel/switch_cpu.S
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * 31-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <linux/linkage.h>
9#include <asm/asm-offsets.h>
10#include <asm/ptrace.h>
11
12# smp_switch_to_cpu switches to destination cpu and executes the passed function
13# Parameter: %r2 - function to call
14# %r3 - function parameter
15# %r4 - stack poiner
16# %r5 - current cpu
17# %r6 - destination cpu
18
19 .section .text
20ENTRY(smp_switch_to_cpu)
21 stm %r6,%r15,__SF_GPRS(%r15)
22 lr %r1,%r15
23 ahi %r15,-STACK_FRAME_OVERHEAD
24 st %r1,__SF_BACKCHAIN(%r15)
25 basr %r13,0
260: la %r1,.gprregs_addr-0b(%r13)
27 l %r1,0(%r1)
28 stm %r0,%r15,0(%r1)
291: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
30 brc 2,1b /* busy, try again */
312: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
32 brc 2,2b /* busy, try again */
333: j 3b
34
35ENTRY(smp_restart_cpu)
36 basr %r13,0
370: la %r1,.gprregs_addr-0b(%r13)
38 l %r1,0(%r1)
39 lm %r0,%r15,0(%r1)
401: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
41 brc 10,1b /* busy, accepted (status 0), running */
42 tmll %r0,0x40 /* Test if calling CPU is stopped */
43 jz 1b
44 ltr %r4,%r4 /* New stack ? */
45 jz 1f
46 lr %r15,%r4
471: lr %r14,%r2 /* r14: Function to call */
48 lr %r2,%r3 /* r2 : Parameter for function*/
49 basr %r14,%r14 /* Call function */
50
51.gprregs_addr:
52 .long .gprregs
53
54 .section .data,"aw",@progbits
55.gprregs:
56 .rept 16
57 .long 0
58 .endr
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
deleted file mode 100644
index fcc42d799e4..00000000000
--- a/arch/s390/kernel/switch_cpu64.S
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * 64-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <linux/linkage.h>
9#include <asm/asm-offsets.h>
10#include <asm/ptrace.h>
11
12# smp_switch_to_cpu switches to destination cpu and executes the passed function
13# Parameter: %r2 - function to call
14# %r3 - function parameter
15# %r4 - stack poiner
16# %r5 - current cpu
17# %r6 - destination cpu
18
19 .section .text
20ENTRY(smp_switch_to_cpu)
21 stmg %r6,%r15,__SF_GPRS(%r15)
22 lgr %r1,%r15
23 aghi %r15,-STACK_FRAME_OVERHEAD
24 stg %r1,__SF_BACKCHAIN(%r15)
25 larl %r1,.gprregs
26 stmg %r0,%r15,0(%r1)
271: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
28 brc 2,1b /* busy, try again */
292: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
30 brc 2,2b /* busy, try again */
313: j 3b
32
33ENTRY(smp_restart_cpu)
34 larl %r1,.gprregs
35 lmg %r0,%r15,0(%r1)
361: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
37 brc 10,1b /* busy, accepted (status 0), running */
38 tmll %r0,0x40 /* Test if calling CPU is stopped */
39 jz 1b
40 ltgr %r4,%r4 /* New stack ? */
41 jz 1f
42 lgr %r15,%r4
431: lgr %r14,%r2 /* r14: Function to call */
44 lgr %r2,%r3 /* r2 : Parameter for function*/
45 basr %r14,%r14 /* Call function */
46
47 .section .data,"aw",@progbits
48.gprregs:
49 .rept 16
50 .quad 0
51 .endr
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 2ef39d1519a..ad3c79eceed 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -179,9 +179,9 @@ pgm_check_entry:
179 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ 179 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
180 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 180 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
1813: 1813:
182 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET 182 sigp %r9,%r1,11 /* sigp initial cpu reset */
183 brc 8,4f /* accepted */ 183 brc 8,4f /* accepted */
184 brc 2,3b /* busy, try again */ 184 brc 2,3b /* busy, try again */
185 185
186 /* Suspend CPU not available -> panic */ 186 /* Suspend CPU not available -> panic */
187 larl %r15,init_thread_union 187 larl %r15,init_thread_union
@@ -196,10 +196,10 @@ pgm_check_entry:
196 lpsw 0(%r3) 196 lpsw 0(%r3)
1974: 1974:
198 /* Switch to suspend CPU */ 198 /* Switch to suspend CPU */
199 sigp %r9,%r1,__SIGP_RESTART /* start suspend CPU */ 199 sigp %r9,%r1,6 /* sigp restart to suspend CPU */
200 brc 2,4b /* busy, try again */ 200 brc 2,4b /* busy, try again */
2015: 2015:
202 sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ 202 sigp %r9,%r2,5 /* sigp stop to current resume CPU */
203 brc 2,5b /* busy, try again */ 203 brc 2,5b /* busy, try again */
2046: j 6b 2046: j 6b
205 205
@@ -207,7 +207,7 @@ restart_suspend:
207 larl %r1,.Lresume_cpu 207 larl %r1,.Lresume_cpu
208 llgh %r2,0(%r1) 208 llgh %r2,0(%r1)
2097: 2097:
210 sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ 210 sigp %r9,%r2,1 /* sigp sense, wait for resume CPU */
211 brc 8,7b /* accepted, status 0, still running */ 211 brc 8,7b /* accepted, status 0, still running */
212 brc 2,7b /* busy, try again */ 212 brc 2,7b /* busy, try again */
213 tmll %r9,0x40 /* Test if resume CPU is stopped */ 213 tmll %r9,0x40 /* Test if resume CPU is stopped */
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 7370a41948c..4f8dc942257 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -79,12 +79,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
79 cpu < TOPOLOGY_CPU_BITS; 79 cpu < TOPOLOGY_CPU_BITS;
80 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1)) 80 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
81 { 81 {
82 unsigned int rcpu, lcpu; 82 unsigned int rcpu;
83 int lcpu;
83 84
84 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; 85 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
85 for_each_present_cpu(lcpu) { 86 lcpu = smp_find_processor_id(rcpu);
86 if (cpu_logical_map(lcpu) != rcpu) 87 if (lcpu >= 0) {
87 continue;
88 cpumask_set_cpu(lcpu, &book->mask); 88 cpumask_set_cpu(lcpu, &book->mask);
89 cpu_book_id[lcpu] = book->id; 89 cpu_book_id[lcpu] = book->id;
90 cpumask_set_cpu(lcpu, &core->mask); 90 cpumask_set_cpu(lcpu, &core->mask);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index d73630b4fe1..e704a9965f9 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -89,18 +89,11 @@ static void vdso_init_data(struct vdso_data *vd)
89 89
90#ifdef CONFIG_64BIT 90#ifdef CONFIG_64BIT
91/* 91/*
92 * Setup per cpu vdso data page.
93 */
94static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
95{
96}
97
98/*
99 * Allocate/free per cpu vdso data. 92 * Allocate/free per cpu vdso data.
100 */ 93 */
101#define SEGMENT_ORDER 2 94#define SEGMENT_ORDER 2
102 95
103int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) 96int vdso_alloc_per_cpu(struct _lowcore *lowcore)
104{ 97{
105 unsigned long segment_table, page_table, page_frame; 98 unsigned long segment_table, page_table, page_frame;
106 u32 *psal, *aste; 99 u32 *psal, *aste;
@@ -139,7 +132,6 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
139 aste[4] = (u32)(addr_t) psal; 132 aste[4] = (u32)(addr_t) psal;
140 lowcore->vdso_per_cpu_data = page_frame; 133 lowcore->vdso_per_cpu_data = page_frame;
141 134
142 vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
143 return 0; 135 return 0;
144 136
145out: 137out:
@@ -149,7 +141,7 @@ out:
149 return -ENOMEM; 141 return -ENOMEM;
150} 142}
151 143
152void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) 144void vdso_free_per_cpu(struct _lowcore *lowcore)
153{ 145{
154 unsigned long segment_table, page_table, page_frame; 146 unsigned long segment_table, page_table, page_frame;
155 u32 *psal, *aste; 147 u32 *psal, *aste;
@@ -168,19 +160,15 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
168 free_pages(segment_table, SEGMENT_ORDER); 160 free_pages(segment_table, SEGMENT_ORDER);
169} 161}
170 162
171static void __vdso_init_cr5(void *dummy) 163static void vdso_init_cr5(void)
172{ 164{
173 unsigned long cr5; 165 unsigned long cr5;
174 166
167 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
168 return;
175 cr5 = offsetof(struct _lowcore, paste); 169 cr5 = offsetof(struct _lowcore, paste);
176 __ctl_load(cr5, 5, 5); 170 __ctl_load(cr5, 5, 5);
177} 171}
178
179static void vdso_init_cr5(void)
180{
181 if (user_mode != HOME_SPACE_MODE && vdso_enabled)
182 on_each_cpu(__vdso_init_cr5, NULL, 1);
183}
184#endif /* CONFIG_64BIT */ 172#endif /* CONFIG_64BIT */
185 173
186/* 174/*
@@ -322,10 +310,8 @@ static int __init vdso_init(void)
322 } 310 }
323 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 311 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
324 vdso64_pagelist[vdso64_pages] = NULL; 312 vdso64_pagelist[vdso64_pages] = NULL;
325#ifndef CONFIG_SMP 313 if (vdso_alloc_per_cpu(&S390_lowcore))
326 if (vdso_alloc_per_cpu(0, &S390_lowcore))
327 BUG(); 314 BUG();
328#endif
329 vdso_init_cr5(); 315 vdso_init_cr5();
330#endif /* CONFIG_64BIT */ 316#endif /* CONFIG_64BIT */
331 317
@@ -335,7 +321,7 @@ static int __init vdso_init(void)
335 321
336 return 0; 322 return 0;
337} 323}
338arch_initcall(vdso_init); 324early_initcall(vdso_init);
339 325
340int in_gate_area_no_mm(unsigned long addr) 326int in_gate_area_no_mm(unsigned long addr)
341{ 327{
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index bb48977f546..7bacee9a546 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -570,6 +570,9 @@ void init_cpu_vtimer(void)
570 570
571 /* enable cpu timer interrupts */ 571 /* enable cpu timer interrupts */
572 __ctl_set_bit(0,10); 572 __ctl_set_bit(0,10);
573
574 /* set initial cpu timer */
575 set_vtimer(0x7fffffffffffffffULL);
573} 576}
574 577
575static int __cpuinit s390_nohz_notify(struct notifier_block *self, 578static int __cpuinit s390_nohz_notify(struct notifier_block *self,
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 91754ffb920..093eb694d9c 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/smp.h>
13#include <asm/io.h> 14#include <asm/io.h>
14 15
15int spin_retry = 1000; 16int spin_retry = 1000;
@@ -24,21 +25,6 @@ static int __init spin_retry_setup(char *str)
24} 25}
25__setup("spin_retry=", spin_retry_setup); 26__setup("spin_retry=", spin_retry_setup);
26 27
27static inline void _raw_yield(void)
28{
29 if (MACHINE_HAS_DIAG44)
30 asm volatile("diag 0,0,0x44");
31}
32
33static inline void _raw_yield_cpu(int cpu)
34{
35 if (MACHINE_HAS_DIAG9C)
36 asm volatile("diag %0,0,0x9c"
37 : : "d" (cpu_logical_map(cpu)));
38 else
39 _raw_yield();
40}
41
42void arch_spin_lock_wait(arch_spinlock_t *lp) 28void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 29{
44 int count = spin_retry; 30 int count = spin_retry;
@@ -60,7 +46,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
60 } 46 }
61 owner = lp->owner_cpu; 47 owner = lp->owner_cpu;
62 if (owner) 48 if (owner)
63 _raw_yield_cpu(~owner); 49 smp_yield_cpu(~owner);
64 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 50 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
65 return; 51 return;
66 } 52 }
@@ -91,7 +77,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
91 } 77 }
92 owner = lp->owner_cpu; 78 owner = lp->owner_cpu;
93 if (owner) 79 if (owner)
94 _raw_yield_cpu(~owner); 80 smp_yield_cpu(~owner);
95 local_irq_disable(); 81 local_irq_disable();
96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 82 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
97 return; 83 return;
@@ -121,7 +107,7 @@ void arch_spin_relax(arch_spinlock_t *lock)
121 if (cpu != 0) { 107 if (cpu != 0) {
122 if (MACHINE_IS_VM || MACHINE_IS_KVM || 108 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
123 !smp_vcpu_scheduled(~cpu)) 109 !smp_vcpu_scheduled(~cpu))
124 _raw_yield_cpu(~cpu); 110 smp_yield_cpu(~cpu);
125 } 111 }
126} 112}
127EXPORT_SYMBOL(arch_spin_relax); 113EXPORT_SYMBOL(arch_spin_relax);
@@ -133,7 +119,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
133 119
134 while (1) { 120 while (1) {
135 if (count-- <= 0) { 121 if (count-- <= 0) {
136 _raw_yield(); 122 smp_yield();
137 count = spin_retry; 123 count = spin_retry;
138 } 124 }
139 if (!arch_read_can_lock(rw)) 125 if (!arch_read_can_lock(rw))
@@ -153,7 +139,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
153 local_irq_restore(flags); 139 local_irq_restore(flags);
154 while (1) { 140 while (1) {
155 if (count-- <= 0) { 141 if (count-- <= 0) {
156 _raw_yield(); 142 smp_yield();
157 count = spin_retry; 143 count = spin_retry;
158 } 144 }
159 if (!arch_read_can_lock(rw)) 145 if (!arch_read_can_lock(rw))
@@ -188,7 +174,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
188 174
189 while (1) { 175 while (1) {
190 if (count-- <= 0) { 176 if (count-- <= 0) {
191 _raw_yield(); 177 smp_yield();
192 count = spin_retry; 178 count = spin_retry;
193 } 179 }
194 if (!arch_write_can_lock(rw)) 180 if (!arch_write_can_lock(rw))
@@ -206,7 +192,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
206 local_irq_restore(flags); 192 local_irq_restore(flags);
207 while (1) { 193 while (1) {
208 if (count-- <= 0) { 194 if (count-- <= 0) {
209 _raw_yield(); 195 smp_yield();
210 count = spin_retry; 196 count = spin_retry;
211 } 197 }
212 if (!arch_write_can_lock(rw)) 198 if (!arch_write_can_lock(rw))
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 87fc0ac11e6..69df137310b 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -15,7 +15,6 @@
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/atomic.h> 16#include <linux/atomic.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/sigp.h>
19#include <asm/smp.h> 18#include <asm/smp.h>
20 19
21#include "sclp.h" 20#include "sclp.h"
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1b6d9247fdc..3303d66b279 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -21,7 +21,6 @@
21#include <asm/ipl.h> 21#include <asm/ipl.h>
22#include <asm/sclp.h> 22#include <asm/sclp.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/sigp.h>
25#include <asm/uaccess.h> 24#include <asm/uaccess.h>
26#include <asm/debug.h> 25#include <asm/debug.h>
27#include <asm/processor.h> 26#include <asm/processor.h>