diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2006-12-05 01:52:38 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-12-07 23:55:55 -0500 |
commit | 413f7c405a342b0b9370ea7a652b9f0270183bf3 (patch) | |
tree | bee6cab5f98d14e55abcb2979b1bcd496f16c0ee | |
parent | 04da6af960194ecdee4c29cd3f86e766903418ca (diff) |
[POWERPC] Move the rest of the hotplug cpu code into platforms/pseries/hotplug-cpu.c
Move the rest of the hotplug cpu code from platforms/pseries/smp.c into
platforms/pseries/hotplug-cpu.c.
Wire up the smp_ops callbacks and the notifier in the hotplug cpu initcall,
rather than in smp_init_pseries(). No change in behaviour.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/platforms/pseries/hotplug-cpu.c | 201 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/smp.c | 200 |
2 files changed, 199 insertions, 202 deletions
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 9e9b6b159fab..12864d75126d 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -1,11 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * pseries CPU Hotplug infrastructure. | 2 | * pseries CPU Hotplug infrastructure. |
3 | * | 3 | * |
4 | * Split out from arch/powerpc/platforms/pseries/setup.c and | 4 | * Split out from arch/powerpc/platforms/pseries/setup.c |
5 | * arch/powerpc/kernel/rtas.c | 5 | * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c |
6 | * | 6 | * |
7 | * Peter Bergner, IBM March 2001. | 7 | * Peter Bergner, IBM March 2001. |
8 | * Copyright (C) 2001 IBM. | 8 | * Copyright (C) 2001 IBM. |
9 | * Dave Engebretsen, Peter Bergner, and | ||
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | ||
11 | * Plus various changes from other IBM teams... | ||
9 | * | 12 | * |
10 | * Copyright (C) 2006 Michael Ellerman, IBM Corporation | 13 | * Copyright (C) 2006 Michael Ellerman, IBM Corporation |
11 | * | 14 | * |
@@ -61,12 +64,206 @@ static void pSeries_mach_cpu_die(void) | |||
61 | for(;;); | 64 | for(;;); |
62 | } | 65 | } |
63 | 66 | ||
67 | /* Get state of physical CPU. | ||
68 | * Return codes: | ||
69 | * 0 - The processor is in the RTAS stopped state | ||
70 | * 1 - stop-self is in progress | ||
71 | * 2 - The processor is not in the RTAS stopped state | ||
72 | * -1 - Hardware Error | ||
73 | * -2 - Hardware Busy, Try again later. | ||
74 | */ | ||
75 | static int query_cpu_stopped(unsigned int pcpu) | ||
76 | { | ||
77 | int cpu_status; | ||
78 | int status, qcss_tok; | ||
79 | |||
80 | qcss_tok = rtas_token("query-cpu-stopped-state"); | ||
81 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) | ||
82 | return -1; | ||
83 | status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); | ||
84 | if (status != 0) { | ||
85 | printk(KERN_ERR | ||
86 | "RTAS query-cpu-stopped-state failed: %i\n", status); | ||
87 | return status; | ||
88 | } | ||
89 | |||
90 | return cpu_status; | ||
91 | } | ||
92 | |||
93 | static int pSeries_cpu_disable(void) | ||
94 | { | ||
95 | int cpu = smp_processor_id(); | ||
96 | |||
97 | cpu_clear(cpu, cpu_online_map); | ||
98 | vdso_data->processorCount--; | ||
99 | |||
100 | /*fix boot_cpuid here*/ | ||
101 | if (cpu == boot_cpuid) | ||
102 | boot_cpuid = any_online_cpu(cpu_online_map); | ||
103 | |||
104 | /* FIXME: abstract this to not be platform specific later on */ | ||
105 | xics_migrate_irqs_away(); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static void pSeries_cpu_die(unsigned int cpu) | ||
110 | { | ||
111 | int tries; | ||
112 | int cpu_status; | ||
113 | unsigned int pcpu = get_hard_smp_processor_id(cpu); | ||
114 | |||
115 | for (tries = 0; tries < 25; tries++) { | ||
116 | cpu_status = query_cpu_stopped(pcpu); | ||
117 | if (cpu_status == 0 || cpu_status == -1) | ||
118 | break; | ||
119 | msleep(200); | ||
120 | } | ||
121 | if (cpu_status != 0) { | ||
122 | printk("Querying DEAD? cpu %i (%i) shows %i\n", | ||
123 | cpu, pcpu, cpu_status); | ||
124 | } | ||
125 | |||
126 | /* Isolation and deallocation are definatly done by | ||
127 | * drslot_chrp_cpu. If they were not they would be | ||
128 | * done here. Change isolate state to Isolate and | ||
129 | * change allocation-state to Unusable. | ||
130 | */ | ||
131 | paca[cpu].cpu_start = 0; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle | ||
136 | * here is that a cpu device node may represent up to two logical cpus | ||
137 | * in the SMT case. We must honor the assumption in other code that | ||
138 | * the logical ids for sibling SMT threads x and y are adjacent, such | ||
139 | * that x^1 == y and y^1 == x. | ||
140 | */ | ||
141 | static int pSeries_add_processor(struct device_node *np) | ||
142 | { | ||
143 | unsigned int cpu; | ||
144 | cpumask_t candidate_map, tmp = CPU_MASK_NONE; | ||
145 | int err = -ENOSPC, len, nthreads, i; | ||
146 | const u32 *intserv; | ||
147 | |||
148 | intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len); | ||
149 | if (!intserv) | ||
150 | return 0; | ||
151 | |||
152 | nthreads = len / sizeof(u32); | ||
153 | for (i = 0; i < nthreads; i++) | ||
154 | cpu_set(i, tmp); | ||
155 | |||
156 | lock_cpu_hotplug(); | ||
157 | |||
158 | BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); | ||
159 | |||
160 | /* Get a bitmap of unoccupied slots. */ | ||
161 | cpus_xor(candidate_map, cpu_possible_map, cpu_present_map); | ||
162 | if (cpus_empty(candidate_map)) { | ||
163 | /* If we get here, it most likely means that NR_CPUS is | ||
164 | * less than the partition's max processors setting. | ||
165 | */ | ||
166 | printk(KERN_ERR "Cannot add cpu %s; this system configuration" | ||
167 | " supports %d logical cpus.\n", np->full_name, | ||
168 | cpus_weight(cpu_possible_map)); | ||
169 | goto out_unlock; | ||
170 | } | ||
171 | |||
172 | while (!cpus_empty(tmp)) | ||
173 | if (cpus_subset(tmp, candidate_map)) | ||
174 | /* Found a range where we can insert the new cpu(s) */ | ||
175 | break; | ||
176 | else | ||
177 | cpus_shift_left(tmp, tmp, nthreads); | ||
178 | |||
179 | if (cpus_empty(tmp)) { | ||
180 | printk(KERN_ERR "Unable to find space in cpu_present_map for" | ||
181 | " processor %s with %d thread(s)\n", np->name, | ||
182 | nthreads); | ||
183 | goto out_unlock; | ||
184 | } | ||
185 | |||
186 | for_each_cpu_mask(cpu, tmp) { | ||
187 | BUG_ON(cpu_isset(cpu, cpu_present_map)); | ||
188 | cpu_set(cpu, cpu_present_map); | ||
189 | set_hard_smp_processor_id(cpu, *intserv++); | ||
190 | } | ||
191 | err = 0; | ||
192 | out_unlock: | ||
193 | unlock_cpu_hotplug(); | ||
194 | return err; | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * Update the present map for a cpu node which is going away, and set | ||
199 | * the hard id in the paca(s) to -1 to be consistent with boot time | ||
200 | * convention for non-present cpus. | ||
201 | */ | ||
202 | static void pSeries_remove_processor(struct device_node *np) | ||
203 | { | ||
204 | unsigned int cpu; | ||
205 | int len, nthreads, i; | ||
206 | const u32 *intserv; | ||
207 | |||
208 | intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len); | ||
209 | if (!intserv) | ||
210 | return; | ||
211 | |||
212 | nthreads = len / sizeof(u32); | ||
213 | |||
214 | lock_cpu_hotplug(); | ||
215 | for (i = 0; i < nthreads; i++) { | ||
216 | for_each_present_cpu(cpu) { | ||
217 | if (get_hard_smp_processor_id(cpu) != intserv[i]) | ||
218 | continue; | ||
219 | BUG_ON(cpu_online(cpu)); | ||
220 | cpu_clear(cpu, cpu_present_map); | ||
221 | set_hard_smp_processor_id(cpu, -1); | ||
222 | break; | ||
223 | } | ||
224 | if (cpu == NR_CPUS) | ||
225 | printk(KERN_WARNING "Could not find cpu to remove " | ||
226 | "with physical id 0x%x\n", intserv[i]); | ||
227 | } | ||
228 | unlock_cpu_hotplug(); | ||
229 | } | ||
230 | |||
231 | static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *node) | ||
232 | { | ||
233 | int err = NOTIFY_OK; | ||
234 | |||
235 | switch (action) { | ||
236 | case PSERIES_RECONFIG_ADD: | ||
237 | if (pSeries_add_processor(node)) | ||
238 | err = NOTIFY_BAD; | ||
239 | break; | ||
240 | case PSERIES_RECONFIG_REMOVE: | ||
241 | pSeries_remove_processor(node); | ||
242 | break; | ||
243 | default: | ||
244 | err = NOTIFY_DONE; | ||
245 | break; | ||
246 | } | ||
247 | return err; | ||
248 | } | ||
249 | |||
250 | static struct notifier_block pSeries_smp_nb = { | ||
251 | .notifier_call = pSeries_smp_notifier, | ||
252 | }; | ||
253 | |||
64 | static int __init pseries_cpu_hotplug_init(void) | 254 | static int __init pseries_cpu_hotplug_init(void) |
65 | { | 255 | { |
66 | rtas_stop_self_args.token = rtas_token("stop-self"); | 256 | rtas_stop_self_args.token = rtas_token("stop-self"); |
67 | 257 | ||
68 | ppc_md.cpu_die = pSeries_mach_cpu_die; | 258 | ppc_md.cpu_die = pSeries_mach_cpu_die; |
69 | 259 | ||
260 | smp_ops->cpu_disable = pSeries_cpu_disable; | ||
261 | smp_ops->cpu_die = pSeries_cpu_die; | ||
262 | |||
263 | /* Processors can be added/removed only on LPAR */ | ||
264 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
265 | pSeries_reconfig_notifier_register(&pSeries_smp_nb); | ||
266 | |||
70 | return 0; | 267 | return 0; |
71 | } | 268 | } |
72 | arch_initcall(pseries_cpu_hotplug_init); | 269 | arch_initcall(pseries_cpu_hotplug_init); |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index c6624b8a0e77..4408518eaebe 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -64,197 +64,6 @@ static cpumask_t of_spin_map; | |||
64 | 64 | ||
65 | extern void generic_secondary_smp_init(unsigned long); | 65 | extern void generic_secondary_smp_init(unsigned long); |
66 | 66 | ||
67 | #ifdef CONFIG_HOTPLUG_CPU | ||
68 | |||
69 | /* Get state of physical CPU. | ||
70 | * Return codes: | ||
71 | * 0 - The processor is in the RTAS stopped state | ||
72 | * 1 - stop-self is in progress | ||
73 | * 2 - The processor is not in the RTAS stopped state | ||
74 | * -1 - Hardware Error | ||
75 | * -2 - Hardware Busy, Try again later. | ||
76 | */ | ||
77 | static int query_cpu_stopped(unsigned int pcpu) | ||
78 | { | ||
79 | int cpu_status; | ||
80 | int status, qcss_tok; | ||
81 | |||
82 | qcss_tok = rtas_token("query-cpu-stopped-state"); | ||
83 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) | ||
84 | return -1; | ||
85 | status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); | ||
86 | if (status != 0) { | ||
87 | printk(KERN_ERR | ||
88 | "RTAS query-cpu-stopped-state failed: %i\n", status); | ||
89 | return status; | ||
90 | } | ||
91 | |||
92 | return cpu_status; | ||
93 | } | ||
94 | |||
95 | static int pSeries_cpu_disable(void) | ||
96 | { | ||
97 | int cpu = smp_processor_id(); | ||
98 | |||
99 | cpu_clear(cpu, cpu_online_map); | ||
100 | vdso_data->processorCount--; | ||
101 | |||
102 | /*fix boot_cpuid here*/ | ||
103 | if (cpu == boot_cpuid) | ||
104 | boot_cpuid = any_online_cpu(cpu_online_map); | ||
105 | |||
106 | /* FIXME: abstract this to not be platform specific later on */ | ||
107 | xics_migrate_irqs_away(); | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static void pSeries_cpu_die(unsigned int cpu) | ||
112 | { | ||
113 | int tries; | ||
114 | int cpu_status; | ||
115 | unsigned int pcpu = get_hard_smp_processor_id(cpu); | ||
116 | |||
117 | for (tries = 0; tries < 25; tries++) { | ||
118 | cpu_status = query_cpu_stopped(pcpu); | ||
119 | if (cpu_status == 0 || cpu_status == -1) | ||
120 | break; | ||
121 | msleep(200); | ||
122 | } | ||
123 | if (cpu_status != 0) { | ||
124 | printk("Querying DEAD? cpu %i (%i) shows %i\n", | ||
125 | cpu, pcpu, cpu_status); | ||
126 | } | ||
127 | |||
128 | /* Isolation and deallocation are definatly done by | ||
129 | * drslot_chrp_cpu. If they were not they would be | ||
130 | * done here. Change isolate state to Isolate and | ||
131 | * change allocation-state to Unusable. | ||
132 | */ | ||
133 | paca[cpu].cpu_start = 0; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle | ||
138 | * here is that a cpu device node may represent up to two logical cpus | ||
139 | * in the SMT case. We must honor the assumption in other code that | ||
140 | * the logical ids for sibling SMT threads x and y are adjacent, such | ||
141 | * that x^1 == y and y^1 == x. | ||
142 | */ | ||
143 | static int pSeries_add_processor(struct device_node *np) | ||
144 | { | ||
145 | unsigned int cpu; | ||
146 | cpumask_t candidate_map, tmp = CPU_MASK_NONE; | ||
147 | int err = -ENOSPC, len, nthreads, i; | ||
148 | const u32 *intserv; | ||
149 | |||
150 | intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len); | ||
151 | if (!intserv) | ||
152 | return 0; | ||
153 | |||
154 | nthreads = len / sizeof(u32); | ||
155 | for (i = 0; i < nthreads; i++) | ||
156 | cpu_set(i, tmp); | ||
157 | |||
158 | lock_cpu_hotplug(); | ||
159 | |||
160 | BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); | ||
161 | |||
162 | /* Get a bitmap of unoccupied slots. */ | ||
163 | cpus_xor(candidate_map, cpu_possible_map, cpu_present_map); | ||
164 | if (cpus_empty(candidate_map)) { | ||
165 | /* If we get here, it most likely means that NR_CPUS is | ||
166 | * less than the partition's max processors setting. | ||
167 | */ | ||
168 | printk(KERN_ERR "Cannot add cpu %s; this system configuration" | ||
169 | " supports %d logical cpus.\n", np->full_name, | ||
170 | cpus_weight(cpu_possible_map)); | ||
171 | goto out_unlock; | ||
172 | } | ||
173 | |||
174 | while (!cpus_empty(tmp)) | ||
175 | if (cpus_subset(tmp, candidate_map)) | ||
176 | /* Found a range where we can insert the new cpu(s) */ | ||
177 | break; | ||
178 | else | ||
179 | cpus_shift_left(tmp, tmp, nthreads); | ||
180 | |||
181 | if (cpus_empty(tmp)) { | ||
182 | printk(KERN_ERR "Unable to find space in cpu_present_map for" | ||
183 | " processor %s with %d thread(s)\n", np->name, | ||
184 | nthreads); | ||
185 | goto out_unlock; | ||
186 | } | ||
187 | |||
188 | for_each_cpu_mask(cpu, tmp) { | ||
189 | BUG_ON(cpu_isset(cpu, cpu_present_map)); | ||
190 | cpu_set(cpu, cpu_present_map); | ||
191 | set_hard_smp_processor_id(cpu, *intserv++); | ||
192 | } | ||
193 | err = 0; | ||
194 | out_unlock: | ||
195 | unlock_cpu_hotplug(); | ||
196 | return err; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Update the present map for a cpu node which is going away, and set | ||
201 | * the hard id in the paca(s) to -1 to be consistent with boot time | ||
202 | * convention for non-present cpus. | ||
203 | */ | ||
204 | static void pSeries_remove_processor(struct device_node *np) | ||
205 | { | ||
206 | unsigned int cpu; | ||
207 | int len, nthreads, i; | ||
208 | const u32 *intserv; | ||
209 | |||
210 | intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len); | ||
211 | if (!intserv) | ||
212 | return; | ||
213 | |||
214 | nthreads = len / sizeof(u32); | ||
215 | |||
216 | lock_cpu_hotplug(); | ||
217 | for (i = 0; i < nthreads; i++) { | ||
218 | for_each_present_cpu(cpu) { | ||
219 | if (get_hard_smp_processor_id(cpu) != intserv[i]) | ||
220 | continue; | ||
221 | BUG_ON(cpu_online(cpu)); | ||
222 | cpu_clear(cpu, cpu_present_map); | ||
223 | set_hard_smp_processor_id(cpu, -1); | ||
224 | break; | ||
225 | } | ||
226 | if (cpu == NR_CPUS) | ||
227 | printk(KERN_WARNING "Could not find cpu to remove " | ||
228 | "with physical id 0x%x\n", intserv[i]); | ||
229 | } | ||
230 | unlock_cpu_hotplug(); | ||
231 | } | ||
232 | |||
233 | static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *node) | ||
234 | { | ||
235 | int err = NOTIFY_OK; | ||
236 | |||
237 | switch (action) { | ||
238 | case PSERIES_RECONFIG_ADD: | ||
239 | if (pSeries_add_processor(node)) | ||
240 | err = NOTIFY_BAD; | ||
241 | break; | ||
242 | case PSERIES_RECONFIG_REMOVE: | ||
243 | pSeries_remove_processor(node); | ||
244 | break; | ||
245 | default: | ||
246 | err = NOTIFY_DONE; | ||
247 | break; | ||
248 | } | ||
249 | return err; | ||
250 | } | ||
251 | |||
252 | static struct notifier_block pSeries_smp_nb = { | ||
253 | .notifier_call = pSeries_smp_notifier, | ||
254 | }; | ||
255 | |||
256 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
257 | |||
258 | /** | 67 | /** |
259 | * smp_startup_cpu() - start the given cpu | 68 | * smp_startup_cpu() - start the given cpu |
260 | * | 69 | * |
@@ -422,15 +231,6 @@ static void __init smp_init_pseries(void) | |||
422 | 231 | ||
423 | DBG(" -> smp_init_pSeries()\n"); | 232 | DBG(" -> smp_init_pSeries()\n"); |
424 | 233 | ||
425 | #ifdef CONFIG_HOTPLUG_CPU | ||
426 | smp_ops->cpu_disable = pSeries_cpu_disable; | ||
427 | smp_ops->cpu_die = pSeries_cpu_die; | ||
428 | |||
429 | /* Processors can be added/removed only on LPAR */ | ||
430 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
431 | pSeries_reconfig_notifier_register(&pSeries_smp_nb); | ||
432 | #endif | ||
433 | |||
434 | /* Mark threads which are still spinning in hold loops. */ | 234 | /* Mark threads which are still spinning in hold loops. */ |
435 | if (cpu_has_feature(CPU_FTR_SMT)) { | 235 | if (cpu_has_feature(CPU_FTR_SMT)) { |
436 | for_each_present_cpu(i) { | 236 | for_each_present_cpu(i) { |