diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-07-16 06:49:40 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-07-16 07:05:32 -0400 |
commit | e0204409df29fe1b7d18f81dfc3ae6f9d90e7a63 (patch) | |
tree | 66f670c0f182d02185f2f3ea6bb7bb97c165ff3b /arch/sparc64/kernel/smp.c | |
parent | f3c681c028846bd5d39f563909409832a295ca69 (diff) |
[SPARC64]: dr-cpu unconfigure support.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r-- | arch/sparc64/kernel/smp.c | 118 |
1 files changed, 106 insertions, 12 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index e038ae65cb62..b448d33321c6 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/prom.h> | 44 | #include <asm/prom.h> |
45 | #include <asm/mdesc.h> | 45 | #include <asm/mdesc.h> |
46 | #include <asm/ldc.h> | 46 | #include <asm/ldc.h> |
47 | #include <asm/hypervisor.h> | ||
47 | 48 | ||
48 | extern void calibrate_delay(void); | 49 | extern void calibrate_delay(void); |
49 | 50 | ||
@@ -62,7 +63,6 @@ EXPORT_SYMBOL(cpu_sibling_map); | |||
62 | EXPORT_SYMBOL(cpu_core_map); | 63 | EXPORT_SYMBOL(cpu_core_map); |
63 | 64 | ||
64 | static cpumask_t smp_commenced_mask; | 65 | static cpumask_t smp_commenced_mask; |
65 | static cpumask_t cpu_callout_map; | ||
66 | 66 | ||
67 | void smp_info(struct seq_file *m) | 67 | void smp_info(struct seq_file *m) |
68 | { | 68 | { |
@@ -83,6 +83,8 @@ void smp_bogo(struct seq_file *m) | |||
83 | i, cpu_data(i).clock_tick); | 83 | i, cpu_data(i).clock_tick); |
84 | } | 84 | } |
85 | 85 | ||
86 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | ||
87 | |||
86 | extern void setup_sparc64_timer(void); | 88 | extern void setup_sparc64_timer(void); |
87 | 89 | ||
88 | static volatile unsigned long callin_flag = 0; | 90 | static volatile unsigned long callin_flag = 0; |
@@ -121,7 +123,9 @@ void __devinit smp_callin(void) | |||
121 | while (!cpu_isset(cpuid, smp_commenced_mask)) | 123 | while (!cpu_isset(cpuid, smp_commenced_mask)) |
122 | rmb(); | 124 | rmb(); |
123 | 125 | ||
126 | spin_lock(&call_lock); | ||
124 | cpu_set(cpuid, cpu_online_map); | 127 | cpu_set(cpuid, cpu_online_map); |
128 | spin_unlock(&call_lock); | ||
125 | 129 | ||
126 | /* idle thread is expected to have preempt disabled */ | 130 | /* idle thread is expected to have preempt disabled */ |
127 | preempt_disable(); | 131 | preempt_disable(); |
@@ -324,6 +328,9 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) | |||
324 | hv_err = sun4v_cpu_start(cpu, trampoline_ra, | 328 | hv_err = sun4v_cpu_start(cpu, trampoline_ra, |
325 | kimage_addr_to_ra(&sparc64_ttable_tl0), | 329 | kimage_addr_to_ra(&sparc64_ttable_tl0), |
326 | __pa(hdesc)); | 330 | __pa(hdesc)); |
331 | if (hv_err) | ||
332 | printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " | ||
333 | "gives error %lu\n", hv_err); | ||
327 | } | 334 | } |
328 | #endif | 335 | #endif |
329 | 336 | ||
@@ -350,7 +357,6 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) | |||
350 | p = fork_idle(cpu); | 357 | p = fork_idle(cpu); |
351 | callin_flag = 0; | 358 | callin_flag = 0; |
352 | cpu_new_thread = task_thread_info(p); | 359 | cpu_new_thread = task_thread_info(p); |
353 | cpu_set(cpu, cpu_callout_map); | ||
354 | 360 | ||
355 | if (tlb_type == hypervisor) { | 361 | if (tlb_type == hypervisor) { |
356 | /* Alloc the mondo queues, cpu will load them. */ | 362 | /* Alloc the mondo queues, cpu will load them. */ |
@@ -379,7 +385,6 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) | |||
379 | ret = 0; | 385 | ret = 0; |
380 | } else { | 386 | } else { |
381 | printk("Processor %d is stuck.\n", cpu); | 387 | printk("Processor %d is stuck.\n", cpu); |
382 | cpu_clear(cpu, cpu_callout_map); | ||
383 | ret = -ENODEV; | 388 | ret = -ENODEV; |
384 | } | 389 | } |
385 | cpu_new_thread = NULL; | 390 | cpu_new_thread = NULL; |
@@ -791,7 +796,6 @@ struct call_data_struct { | |||
791 | int wait; | 796 | int wait; |
792 | }; | 797 | }; |
793 | 798 | ||
794 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | ||
795 | static struct call_data_struct *call_data; | 799 | static struct call_data_struct *call_data; |
796 | 800 | ||
797 | extern unsigned long xcall_call_function; | 801 | extern unsigned long xcall_call_function; |
@@ -1241,7 +1245,7 @@ void __devinit smp_fill_in_sib_core_maps(void) | |||
1241 | { | 1245 | { |
1242 | unsigned int i; | 1246 | unsigned int i; |
1243 | 1247 | ||
1244 | for_each_possible_cpu(i) { | 1248 | for_each_present_cpu(i) { |
1245 | unsigned int j; | 1249 | unsigned int j; |
1246 | 1250 | ||
1247 | cpus_clear(cpu_core_map[i]); | 1251 | cpus_clear(cpu_core_map[i]); |
@@ -1250,14 +1254,14 @@ void __devinit smp_fill_in_sib_core_maps(void) | |||
1250 | continue; | 1254 | continue; |
1251 | } | 1255 | } |
1252 | 1256 | ||
1253 | for_each_possible_cpu(j) { | 1257 | for_each_present_cpu(j) { |
1254 | if (cpu_data(i).core_id == | 1258 | if (cpu_data(i).core_id == |
1255 | cpu_data(j).core_id) | 1259 | cpu_data(j).core_id) |
1256 | cpu_set(j, cpu_core_map[i]); | 1260 | cpu_set(j, cpu_core_map[i]); |
1257 | } | 1261 | } |
1258 | } | 1262 | } |
1259 | 1263 | ||
1260 | for_each_possible_cpu(i) { | 1264 | for_each_present_cpu(i) { |
1261 | unsigned int j; | 1265 | unsigned int j; |
1262 | 1266 | ||
1263 | cpus_clear(cpu_sibling_map[i]); | 1267 | cpus_clear(cpu_sibling_map[i]); |
@@ -1266,7 +1270,7 @@ void __devinit smp_fill_in_sib_core_maps(void) | |||
1266 | continue; | 1270 | continue; |
1267 | } | 1271 | } |
1268 | 1272 | ||
1269 | for_each_possible_cpu(j) { | 1273 | for_each_present_cpu(j) { |
1270 | if (cpu_data(i).proc_id == | 1274 | if (cpu_data(i).proc_id == |
1271 | cpu_data(j).proc_id) | 1275 | cpu_data(j).proc_id) |
1272 | cpu_set(j, cpu_sibling_map[i]); | 1276 | cpu_set(j, cpu_sibling_map[i]); |
@@ -1296,16 +1300,106 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
1296 | } | 1300 | } |
1297 | 1301 | ||
1298 | #ifdef CONFIG_HOTPLUG_CPU | 1302 | #ifdef CONFIG_HOTPLUG_CPU |
1303 | void cpu_play_dead(void) | ||
1304 | { | ||
1305 | int cpu = smp_processor_id(); | ||
1306 | unsigned long pstate; | ||
1307 | |||
1308 | idle_task_exit(); | ||
1309 | |||
1310 | if (tlb_type == hypervisor) { | ||
1311 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1312 | |||
1313 | sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, | ||
1314 | tb->cpu_mondo_pa, 0); | ||
1315 | sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, | ||
1316 | tb->dev_mondo_pa, 0); | ||
1317 | sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, | ||
1318 | tb->resum_mondo_pa, 0); | ||
1319 | sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, | ||
1320 | tb->nonresum_mondo_pa, 0); | ||
1321 | } | ||
1322 | |||
1323 | cpu_clear(cpu, smp_commenced_mask); | ||
1324 | membar_safe("#Sync"); | ||
1325 | |||
1326 | local_irq_disable(); | ||
1327 | |||
1328 | __asm__ __volatile__( | ||
1329 | "rdpr %%pstate, %0\n\t" | ||
1330 | "wrpr %0, %1, %%pstate" | ||
1331 | : "=r" (pstate) | ||
1332 | : "i" (PSTATE_IE)); | ||
1333 | |||
1334 | while (1) | ||
1335 | barrier(); | ||
1336 | } | ||
1337 | |||
1299 | int __cpu_disable(void) | 1338 | int __cpu_disable(void) |
1300 | { | 1339 | { |
1301 | printk(KERN_ERR "SMP: __cpu_disable() on cpu %d\n", | 1340 | int cpu = smp_processor_id(); |
1302 | smp_processor_id()); | 1341 | cpuinfo_sparc *c; |
1303 | return -ENODEV; | 1342 | int i; |
1343 | |||
1344 | for_each_cpu_mask(i, cpu_core_map[cpu]) | ||
1345 | cpu_clear(cpu, cpu_core_map[i]); | ||
1346 | cpus_clear(cpu_core_map[cpu]); | ||
1347 | |||
1348 | for_each_cpu_mask(i, cpu_sibling_map[cpu]) | ||
1349 | cpu_clear(cpu, cpu_sibling_map[i]); | ||
1350 | cpus_clear(cpu_sibling_map[cpu]); | ||
1351 | |||
1352 | c = &cpu_data(cpu); | ||
1353 | |||
1354 | c->core_id = 0; | ||
1355 | c->proc_id = -1; | ||
1356 | |||
1357 | spin_lock(&call_lock); | ||
1358 | cpu_clear(cpu, cpu_online_map); | ||
1359 | spin_unlock(&call_lock); | ||
1360 | |||
1361 | smp_wmb(); | ||
1362 | |||
1363 | /* Make sure no interrupts point to this cpu. */ | ||
1364 | fixup_irqs(); | ||
1365 | |||
1366 | local_irq_enable(); | ||
1367 | mdelay(1); | ||
1368 | local_irq_disable(); | ||
1369 | |||
1370 | return 0; | ||
1304 | } | 1371 | } |
1305 | 1372 | ||
1306 | void __cpu_die(unsigned int cpu) | 1373 | void __cpu_die(unsigned int cpu) |
1307 | { | 1374 | { |
1308 | printk(KERN_ERR "SMP: __cpu_die(%u)\n", cpu); | 1375 | int i; |
1376 | |||
1377 | for (i = 0; i < 100; i++) { | ||
1378 | smp_rmb(); | ||
1379 | if (!cpu_isset(cpu, smp_commenced_mask)) | ||
1380 | break; | ||
1381 | msleep(100); | ||
1382 | } | ||
1383 | if (cpu_isset(cpu, smp_commenced_mask)) { | ||
1384 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | ||
1385 | } else { | ||
1386 | #if defined(CONFIG_SUN_LDOMS) | ||
1387 | unsigned long hv_err; | ||
1388 | int limit = 100; | ||
1389 | |||
1390 | do { | ||
1391 | hv_err = sun4v_cpu_stop(cpu); | ||
1392 | if (hv_err == HV_EOK) { | ||
1393 | cpu_clear(cpu, cpu_present_map); | ||
1394 | break; | ||
1395 | } | ||
1396 | } while (--limit > 0); | ||
1397 | if (limit <= 0) { | ||
1398 | printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", | ||
1399 | hv_err); | ||
1400 | } | ||
1401 | #endif | ||
1402 | } | ||
1309 | } | 1403 | } |
1310 | #endif | 1404 | #endif |
1311 | 1405 | ||