aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-07-16 06:49:40 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-16 07:05:32 -0400
commite0204409df29fe1b7d18f81dfc3ae6f9d90e7a63 (patch)
tree66f670c0f182d02185f2f3ea6bb7bb97c165ff3b /arch/sparc64/kernel
parentf3c681c028846bd5d39f563909409832a295ca69 (diff)
[SPARC64]: dr-cpu unconfigure support.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/ds.c25
-rw-r--r--arch/sparc64/kernel/irq.c20
-rw-r--r--arch/sparc64/kernel/process.c21
-rw-r--r--arch/sparc64/kernel/smp.c118
4 files changed, 165 insertions, 19 deletions
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c
index b6331718ee0c..1c587107cef0 100644
--- a/arch/sparc64/kernel/ds.c
+++ b/arch/sparc64/kernel/ds.c
@@ -20,6 +20,7 @@
20#include <asm/power.h> 20#include <asm/power.h>
21#include <asm/mdesc.h> 21#include <asm/mdesc.h>
22#include <asm/head.h> 22#include <asm/head.h>
23#include <asm/irq.h>
23 24
24#define DRV_MODULE_NAME "ds" 25#define DRV_MODULE_NAME "ds"
25#define PFX DRV_MODULE_NAME ": " 26#define PFX DRV_MODULE_NAME ": "
@@ -559,6 +560,9 @@ static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num,
559 560
560 kfree(resp); 561 kfree(resp);
561 562
563 /* Redistribute IRQs, taking into account the new cpus. */
564 fixup_irqs();
565
562 return 0; 566 return 0;
563} 567}
564 568
@@ -566,7 +570,8 @@ static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
566 cpumask_t *mask) 570 cpumask_t *mask)
567{ 571{
568 struct ds_data *resp; 572 struct ds_data *resp;
569 int resp_len, ncpus; 573 int resp_len, ncpus, cpu;
574 unsigned long flags;
570 575
571 ncpus = cpus_weight(*mask); 576 ncpus = cpus_weight(*mask);
572 resp_len = dr_cpu_size_response(ncpus); 577 resp_len = dr_cpu_size_response(ncpus);
@@ -578,9 +583,25 @@ static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
578 resp_len, ncpus, mask, 583 resp_len, ncpus, mask,
579 DR_CPU_STAT_UNCONFIGURED); 584 DR_CPU_STAT_UNCONFIGURED);
580 585
586 for_each_cpu_mask(cpu, *mask) {
587 int err;
588
589 printk(KERN_INFO PFX "CPU[%d]: Shutting down cpu %d...\n",
590 smp_processor_id(), cpu);
591 err = cpu_down(cpu);
592 if (err)
593 dr_cpu_mark(resp, cpu, ncpus,
594 DR_CPU_RES_FAILURE,
595 DR_CPU_STAT_CONFIGURED);
596 }
597
598 spin_lock_irqsave(&ds_lock, flags);
599 ds_send(ds_info->lp, resp, resp_len);
600 spin_unlock_irqrestore(&ds_lock, flags);
601
581 kfree(resp); 602 kfree(resp);
582 603
583 return -EOPNOTSUPP; 604 return 0;
584} 605}
585 606
586static void process_dr_cpu_list(struct ds_cap_state *cp) 607static void process_dr_cpu_list(struct ds_cap_state *cp)
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index a1c916f35baa..8cb3358674f5 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -803,6 +803,26 @@ void handler_irq(int irq, struct pt_regs *regs)
803 set_irq_regs(old_regs); 803 set_irq_regs(old_regs);
804} 804}
805 805
806#ifdef CONFIG_HOTPLUG_CPU
807void fixup_irqs(void)
808{
809 unsigned int irq;
810
811 for (irq = 0; irq < NR_IRQS; irq++) {
812 unsigned long flags;
813
814 spin_lock_irqsave(&irq_desc[irq].lock, flags);
815 if (irq_desc[irq].action &&
816 !(irq_desc[irq].status & IRQ_PER_CPU)) {
817 if (irq_desc[irq].chip->set_affinity)
818 irq_desc[irq].chip->set_affinity(irq,
819 irq_desc[irq].affinity);
820 }
821 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
822 }
823}
824#endif
825
806struct sun5_timer { 826struct sun5_timer {
807 u64 count0; 827 u64 count0;
808 u64 limit0; 828 u64 limit0;
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index f5f97e2c669c..93557507ec9f 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -29,6 +29,7 @@
29#include <linux/compat.h> 29#include <linux/compat.h>
30#include <linux/tick.h> 30#include <linux/tick.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/cpu.h>
32 33
33#include <asm/oplib.h> 34#include <asm/oplib.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -49,7 +50,7 @@
49 50
50/* #define VERBOSE_SHOWREGS */ 51/* #define VERBOSE_SHOWREGS */
51 52
52static void sparc64_yield(void) 53static void sparc64_yield(int cpu)
53{ 54{
54 if (tlb_type != hypervisor) 55 if (tlb_type != hypervisor)
55 return; 56 return;
@@ -57,7 +58,7 @@ static void sparc64_yield(void)
57 clear_thread_flag(TIF_POLLING_NRFLAG); 58 clear_thread_flag(TIF_POLLING_NRFLAG);
58 smp_mb__after_clear_bit(); 59 smp_mb__after_clear_bit();
59 60
60 while (!need_resched()) { 61 while (!need_resched() && !cpu_is_offline(cpu)) {
61 unsigned long pstate; 62 unsigned long pstate;
62 63
63 /* Disable interrupts. */ 64 /* Disable interrupts. */
@@ -68,7 +69,7 @@ static void sparc64_yield(void)
68 : "=&r" (pstate) 69 : "=&r" (pstate)
69 : "i" (PSTATE_IE)); 70 : "i" (PSTATE_IE));
70 71
71 if (!need_resched()) 72 if (!need_resched() && !cpu_is_offline(cpu))
72 sun4v_cpu_yield(); 73 sun4v_cpu_yield();
73 74
74 /* Re-enable interrupts. */ 75 /* Re-enable interrupts. */
@@ -86,15 +87,25 @@ static void sparc64_yield(void)
86/* The idle loop on sparc64. */ 87/* The idle loop on sparc64. */
87void cpu_idle(void) 88void cpu_idle(void)
88{ 89{
90 int cpu = smp_processor_id();
91
89 set_thread_flag(TIF_POLLING_NRFLAG); 92 set_thread_flag(TIF_POLLING_NRFLAG);
90 93
91 while(1) { 94 while(1) {
92 tick_nohz_stop_sched_tick(); 95 tick_nohz_stop_sched_tick();
93 while (!need_resched()) 96
94 sparc64_yield(); 97 while (!need_resched() && !cpu_is_offline(cpu))
98 sparc64_yield(cpu);
99
95 tick_nohz_restart_sched_tick(); 100 tick_nohz_restart_sched_tick();
96 101
97 preempt_enable_no_resched(); 102 preempt_enable_no_resched();
103
104#ifdef CONFIG_HOTPLUG_CPU
105 if (cpu_is_offline(cpu))
106 cpu_play_dead();
107#endif
108
98 schedule(); 109 schedule();
99 preempt_disable(); 110 preempt_disable();
100 } 111 }
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index e038ae65cb62..b448d33321c6 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -44,6 +44,7 @@
44#include <asm/prom.h> 44#include <asm/prom.h>
45#include <asm/mdesc.h> 45#include <asm/mdesc.h>
46#include <asm/ldc.h> 46#include <asm/ldc.h>
47#include <asm/hypervisor.h>
47 48
48extern void calibrate_delay(void); 49extern void calibrate_delay(void);
49 50
@@ -62,7 +63,6 @@ EXPORT_SYMBOL(cpu_sibling_map);
62EXPORT_SYMBOL(cpu_core_map); 63EXPORT_SYMBOL(cpu_core_map);
63 64
64static cpumask_t smp_commenced_mask; 65static cpumask_t smp_commenced_mask;
65static cpumask_t cpu_callout_map;
66 66
67void smp_info(struct seq_file *m) 67void smp_info(struct seq_file *m)
68{ 68{
@@ -83,6 +83,8 @@ void smp_bogo(struct seq_file *m)
83 i, cpu_data(i).clock_tick); 83 i, cpu_data(i).clock_tick);
84} 84}
85 85
86static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
87
86extern void setup_sparc64_timer(void); 88extern void setup_sparc64_timer(void);
87 89
88static volatile unsigned long callin_flag = 0; 90static volatile unsigned long callin_flag = 0;
@@ -121,7 +123,9 @@ void __devinit smp_callin(void)
121 while (!cpu_isset(cpuid, smp_commenced_mask)) 123 while (!cpu_isset(cpuid, smp_commenced_mask))
122 rmb(); 124 rmb();
123 125
126 spin_lock(&call_lock);
124 cpu_set(cpuid, cpu_online_map); 127 cpu_set(cpuid, cpu_online_map);
128 spin_unlock(&call_lock);
125 129
126 /* idle thread is expected to have preempt disabled */ 130 /* idle thread is expected to have preempt disabled */
127 preempt_disable(); 131 preempt_disable();
@@ -324,6 +328,9 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
324 hv_err = sun4v_cpu_start(cpu, trampoline_ra, 328 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
325 kimage_addr_to_ra(&sparc64_ttable_tl0), 329 kimage_addr_to_ra(&sparc64_ttable_tl0),
326 __pa(hdesc)); 330 __pa(hdesc));
331 if (hv_err)
332 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
333 "gives error %lu\n", hv_err);
327} 334}
328#endif 335#endif
329 336
@@ -350,7 +357,6 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
350 p = fork_idle(cpu); 357 p = fork_idle(cpu);
351 callin_flag = 0; 358 callin_flag = 0;
352 cpu_new_thread = task_thread_info(p); 359 cpu_new_thread = task_thread_info(p);
353 cpu_set(cpu, cpu_callout_map);
354 360
355 if (tlb_type == hypervisor) { 361 if (tlb_type == hypervisor) {
356 /* Alloc the mondo queues, cpu will load them. */ 362 /* Alloc the mondo queues, cpu will load them. */
@@ -379,7 +385,6 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
379 ret = 0; 385 ret = 0;
380 } else { 386 } else {
381 printk("Processor %d is stuck.\n", cpu); 387 printk("Processor %d is stuck.\n", cpu);
382 cpu_clear(cpu, cpu_callout_map);
383 ret = -ENODEV; 388 ret = -ENODEV;
384 } 389 }
385 cpu_new_thread = NULL; 390 cpu_new_thread = NULL;
@@ -791,7 +796,6 @@ struct call_data_struct {
791 int wait; 796 int wait;
792}; 797};
793 798
794static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
795static struct call_data_struct *call_data; 799static struct call_data_struct *call_data;
796 800
797extern unsigned long xcall_call_function; 801extern unsigned long xcall_call_function;
@@ -1241,7 +1245,7 @@ void __devinit smp_fill_in_sib_core_maps(void)
1241{ 1245{
1242 unsigned int i; 1246 unsigned int i;
1243 1247
1244 for_each_possible_cpu(i) { 1248 for_each_present_cpu(i) {
1245 unsigned int j; 1249 unsigned int j;
1246 1250
1247 cpus_clear(cpu_core_map[i]); 1251 cpus_clear(cpu_core_map[i]);
@@ -1250,14 +1254,14 @@ void __devinit smp_fill_in_sib_core_maps(void)
1250 continue; 1254 continue;
1251 } 1255 }
1252 1256
1253 for_each_possible_cpu(j) { 1257 for_each_present_cpu(j) {
1254 if (cpu_data(i).core_id == 1258 if (cpu_data(i).core_id ==
1255 cpu_data(j).core_id) 1259 cpu_data(j).core_id)
1256 cpu_set(j, cpu_core_map[i]); 1260 cpu_set(j, cpu_core_map[i]);
1257 } 1261 }
1258 } 1262 }
1259 1263
1260 for_each_possible_cpu(i) { 1264 for_each_present_cpu(i) {
1261 unsigned int j; 1265 unsigned int j;
1262 1266
1263 cpus_clear(cpu_sibling_map[i]); 1267 cpus_clear(cpu_sibling_map[i]);
@@ -1266,7 +1270,7 @@ void __devinit smp_fill_in_sib_core_maps(void)
1266 continue; 1270 continue;
1267 } 1271 }
1268 1272
1269 for_each_possible_cpu(j) { 1273 for_each_present_cpu(j) {
1270 if (cpu_data(i).proc_id == 1274 if (cpu_data(i).proc_id ==
1271 cpu_data(j).proc_id) 1275 cpu_data(j).proc_id)
1272 cpu_set(j, cpu_sibling_map[i]); 1276 cpu_set(j, cpu_sibling_map[i]);
@@ -1296,16 +1300,106 @@ int __cpuinit __cpu_up(unsigned int cpu)
1296} 1300}
1297 1301
1298#ifdef CONFIG_HOTPLUG_CPU 1302#ifdef CONFIG_HOTPLUG_CPU
1303void cpu_play_dead(void)
1304{
1305 int cpu = smp_processor_id();
1306 unsigned long pstate;
1307
1308 idle_task_exit();
1309
1310 if (tlb_type == hypervisor) {
1311 struct trap_per_cpu *tb = &trap_block[cpu];
1312
1313 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1314 tb->cpu_mondo_pa, 0);
1315 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1316 tb->dev_mondo_pa, 0);
1317 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1318 tb->resum_mondo_pa, 0);
1319 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1320 tb->nonresum_mondo_pa, 0);
1321 }
1322
1323 cpu_clear(cpu, smp_commenced_mask);
1324 membar_safe("#Sync");
1325
1326 local_irq_disable();
1327
1328 __asm__ __volatile__(
1329 "rdpr %%pstate, %0\n\t"
1330 "wrpr %0, %1, %%pstate"
1331 : "=r" (pstate)
1332 : "i" (PSTATE_IE));
1333
1334 while (1)
1335 barrier();
1336}
1337
1299int __cpu_disable(void) 1338int __cpu_disable(void)
1300{ 1339{
1301 printk(KERN_ERR "SMP: __cpu_disable() on cpu %d\n", 1340 int cpu = smp_processor_id();
1302 smp_processor_id()); 1341 cpuinfo_sparc *c;
1303 return -ENODEV; 1342 int i;
1343
1344 for_each_cpu_mask(i, cpu_core_map[cpu])
1345 cpu_clear(cpu, cpu_core_map[i]);
1346 cpus_clear(cpu_core_map[cpu]);
1347
1348 for_each_cpu_mask(i, cpu_sibling_map[cpu])
1349 cpu_clear(cpu, cpu_sibling_map[i]);
1350 cpus_clear(cpu_sibling_map[cpu]);
1351
1352 c = &cpu_data(cpu);
1353
1354 c->core_id = 0;
1355 c->proc_id = -1;
1356
1357 spin_lock(&call_lock);
1358 cpu_clear(cpu, cpu_online_map);
1359 spin_unlock(&call_lock);
1360
1361 smp_wmb();
1362
1363 /* Make sure no interrupts point to this cpu. */
1364 fixup_irqs();
1365
1366 local_irq_enable();
1367 mdelay(1);
1368 local_irq_disable();
1369
1370 return 0;
1304} 1371}
1305 1372
1306void __cpu_die(unsigned int cpu) 1373void __cpu_die(unsigned int cpu)
1307{ 1374{
1308 printk(KERN_ERR "SMP: __cpu_die(%u)\n", cpu); 1375 int i;
1376
1377 for (i = 0; i < 100; i++) {
1378 smp_rmb();
1379 if (!cpu_isset(cpu, smp_commenced_mask))
1380 break;
1381 msleep(100);
1382 }
1383 if (cpu_isset(cpu, smp_commenced_mask)) {
1384 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1385 } else {
1386#if defined(CONFIG_SUN_LDOMS)
1387 unsigned long hv_err;
1388 int limit = 100;
1389
1390 do {
1391 hv_err = sun4v_cpu_stop(cpu);
1392 if (hv_err == HV_EOK) {
1393 cpu_clear(cpu, cpu_present_map);
1394 break;
1395 }
1396 } while (--limit > 0);
1397 if (limit <= 0) {
1398 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1399 hv_err);
1400 }
1401#endif
1402 }
1309} 1403}
1310#endif 1404#endif
1311 1405