aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Nixon <alex.nixon@citrix.com>2008-08-22 06:52:15 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-25 05:25:14 -0400
commitd68d82afd4c88e25763b23cd9cd4974573a3706f (patch)
tree42a3fb93a5cef70db7ad01fda1ed0dc68dbe6110
parent8227dce7dc2cfdcc28ee0eadfb482a7ee77fba03 (diff)
xen: implement CPU hotplugging
Note the changes from 2.6.18-xen CPU hotplugging: A vcpu_down request from the remote admin via Xenbus both hotunplugs the CPU, and disables it by removing it from the cpu_present map, and removing its entry in /sys. A vcpu_up request from the remote admin only re-enables the CPU, and does not immediately bring the CPU up. A udev event is emitted, which can be caught by the user if he wishes to automatically re-up CPUs when available, or implement a more complex policy. Signed-off-by: Alex Nixon <alex.nixon@citrix.com> Acked-by: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/xen/smp.c60
-rw-r--r--arch/x86/xen/spinlock.c5
-rw-r--r--arch/x86/xen/time.c8
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/cpu_hotplug.c90
-rw-r--r--drivers/xen/events.c4
7 files changed, 162 insertions, 13 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index baca7f2fbd8a..be5cbb2b7c60 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -11,8 +11,6 @@
11 * useful topology information for the kernel to make use of. As a 11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and 12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded. 13 * single-threaded.
14 *
15 * This does not handle HOTPLUG_CPU yet.
16 */ 14 */
17#include <linux/sched.h> 15#include <linux/sched.h>
18#include <linux/err.h> 16#include <linux/err.h>
@@ -61,11 +59,12 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
61 return IRQ_HANDLED; 59 return IRQ_HANDLED;
62} 60}
63 61
64static __cpuinit void cpu_bringup_and_idle(void) 62static __cpuinit void cpu_bringup(void)
65{ 63{
66 int cpu = smp_processor_id(); 64 int cpu = smp_processor_id();
67 65
68 cpu_init(); 66 cpu_init();
67 touch_softlockup_watchdog();
69 preempt_disable(); 68 preempt_disable();
70 69
71 xen_enable_sysenter(); 70 xen_enable_sysenter();
@@ -86,6 +85,11 @@ static __cpuinit void cpu_bringup_and_idle(void)
86 local_irq_enable(); 85 local_irq_enable();
87 86
88 wmb(); /* make sure everything is out */ 87 wmb(); /* make sure everything is out */
88}
89
90static __cpuinit void cpu_bringup_and_idle(void)
91{
92 cpu_bringup();
89 cpu_idle(); 93 cpu_idle();
90} 94}
91 95
@@ -209,8 +213,6 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
209 213
210 cpu_set(cpu, cpu_present_map); 214 cpu_set(cpu, cpu_present_map);
211 } 215 }
212
213 //init_xenbus_allowed_cpumask();
214} 216}
215 217
216static __cpuinit int 218static __cpuinit int
@@ -278,12 +280,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
278 struct task_struct *idle = idle_task(cpu); 280 struct task_struct *idle = idle_task(cpu);
279 int rc; 281 int rc;
280 282
281#if 0
282 rc = cpu_up_check(cpu);
283 if (rc)
284 return rc;
285#endif
286
287#ifdef CONFIG_X86_64 283#ifdef CONFIG_X86_64
288 /* Allocate node local memory for AP pdas */ 284 /* Allocate node local memory for AP pdas */
289 WARN_ON(cpu == 0); 285 WARN_ON(cpu == 0);
@@ -336,6 +332,42 @@ static void xen_smp_cpus_done(unsigned int max_cpus)
336{ 332{
337} 333}
338 334
335int xen_cpu_disable(void)
336{
337 unsigned int cpu = smp_processor_id();
338 if (cpu == 0)
339 return -EBUSY;
340
341 cpu_disable_common();
342
343 load_cr3(swapper_pg_dir);
344 return 0;
345}
346
347void xen_cpu_die(unsigned int cpu)
348{
349 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
350 current->state = TASK_UNINTERRUPTIBLE;
351 schedule_timeout(HZ/10);
352 }
353 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
354 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
355 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
356 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
357 xen_uninit_lock_cpu(cpu);
358 xen_teardown_timer(cpu);
359
360 if (num_online_cpus() == 1)
361 alternatives_smp_switch(0);
362}
363
364void xen_play_dead(void)
365{
366 play_dead_common();
367 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
368 cpu_bringup();
369}
370
339static void stop_self(void *v) 371static void stop_self(void *v)
340{ 372{
341 int cpu = smp_processor_id(); 373 int cpu = smp_processor_id();
@@ -419,9 +451,13 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
419static const struct smp_ops xen_smp_ops __initdata = { 451static const struct smp_ops xen_smp_ops __initdata = {
420 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 452 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
421 .smp_prepare_cpus = xen_smp_prepare_cpus, 453 .smp_prepare_cpus = xen_smp_prepare_cpus,
422 .cpu_up = xen_cpu_up,
423 .smp_cpus_done = xen_smp_cpus_done, 454 .smp_cpus_done = xen_smp_cpus_done,
424 455
456 .cpu_up = xen_cpu_up,
457 .cpu_die = xen_cpu_die,
458 .cpu_disable = xen_cpu_disable,
459 .play_dead = xen_play_dead,
460
425 .smp_send_stop = xen_smp_send_stop, 461 .smp_send_stop = xen_smp_send_stop,
426 .smp_send_reschedule = xen_smp_send_reschedule, 462 .smp_send_reschedule = xen_smp_send_reschedule,
427 463
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d072823bc06d..dd71e3a021cd 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -357,6 +357,11 @@ void __cpuinit xen_init_lock_cpu(int cpu)
357 printk("cpu %d spinlock event irq %d\n", cpu, irq); 357 printk("cpu %d spinlock event irq %d\n", cpu, irq);
358} 358}
359 359
360void xen_uninit_lock_cpu(int cpu)
361{
362 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
363}
364
360void __init xen_init_spinlocks(void) 365void __init xen_init_spinlocks(void)
361{ 366{
362 pv_lock_ops.spin_is_locked = xen_spin_is_locked; 367 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 20182d9072c4..004ba86326ae 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -450,6 +450,14 @@ void xen_setup_timer(int cpu)
450 setup_runstate_info(cpu); 450 setup_runstate_info(cpu);
451} 451}
452 452
453void xen_teardown_timer(int cpu)
454{
455 struct clock_event_device *evt;
456 BUG_ON(cpu == 0);
457 evt = &per_cpu(xen_clock_events, cpu);
458 unbind_from_irqhandler(evt->irq, NULL);
459}
460
453void xen_setup_cpu_clockevents(void) 461void xen_setup_cpu_clockevents(void)
454{ 462{
455 BUG_ON(preemptible()); 463 BUG_ON(preemptible());
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 1e8bfdaa20d3..8dbd97fd7f18 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -34,6 +34,7 @@ void __init xen_build_dynamic_phys_to_machine(void);
34 34
35void xen_init_irq_ops(void); 35void xen_init_irq_ops(void);
36void xen_setup_timer(int cpu); 36void xen_setup_timer(int cpu);
37void xen_teardown_timer(int cpu);
37cycle_t xen_clocksource_read(void); 38cycle_t xen_clocksource_read(void);
38void xen_setup_cpu_clockevents(void); 39void xen_setup_cpu_clockevents(void);
39unsigned long xen_tsc_khz(void); 40unsigned long xen_tsc_khz(void);
@@ -50,11 +51,16 @@ void xen_mark_init_mm_pinned(void);
50 51
51void __init xen_setup_vcpu_info_placement(void); 52void __init xen_setup_vcpu_info_placement(void);
52 53
54void xen_play_dead(void);
55void xen_cpu_die(unsigned int cpu);
56int xen_cpu_disable(void);
57
53#ifdef CONFIG_SMP 58#ifdef CONFIG_SMP
54void xen_smp_init(void); 59void xen_smp_init(void);
55 60
56void __init xen_init_spinlocks(void); 61void __init xen_init_spinlocks(void);
57__cpuinit void xen_init_lock_cpu(int cpu); 62__cpuinit void xen_init_lock_cpu(int cpu);
63void xen_uninit_lock_cpu(int cpu);
58 64
59extern cpumask_t xen_cpu_initialized_map; 65extern cpumask_t xen_cpu_initialized_map;
60#else 66#else
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 363286c54290..f62d8df27696 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,4 @@
1obj-y += grant-table.o features.o events.o manage.o 1obj-y += grant-table.o features.o events.o manage.o cpu_hotplug.o
2obj-y += xenbus/ 2obj-y += xenbus/
3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
4obj-$(CONFIG_XEN_BALLOON) += balloon.o 4obj-$(CONFIG_XEN_BALLOON) += balloon.o
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
new file mode 100644
index 000000000000..1bc003536cdb
--- /dev/null
+++ b/drivers/xen/cpu_hotplug.c
@@ -0,0 +1,90 @@
1#include <linux/notifier.h>
2
3#include <xen/xenbus.h>
4
5#include <asm-x86/xen/hypervisor.h>
6#include <asm/cpu.h>
7
8static void enable_hotplug_cpu(int cpu)
9{
10 if (!cpu_present(cpu))
11 arch_register_cpu(cpu);
12
13 cpu_set(cpu, cpu_present_map);
14}
15
16static void disable_hotplug_cpu(int cpu)
17{
18 if (cpu_present(cpu))
19 arch_unregister_cpu(cpu);
20
21 cpu_clear(cpu, cpu_present_map);
22}
23
24static void vcpu_hotplug(unsigned int cpu)
25{
26 int err;
27 char dir[32], state[32];
28
29 if (!cpu_possible(cpu))
30 return;
31
32 sprintf(dir, "cpu/%u", cpu);
33 err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
34 if (err != 1) {
35 printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
36 return;
37 }
38
39 if (strcmp(state, "online") == 0) {
40 enable_hotplug_cpu(cpu);
41 } else if (strcmp(state, "offline") == 0) {
42 (void)cpu_down(cpu);
43 disable_hotplug_cpu(cpu);
44 } else {
45 printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
46 state, cpu);
47 }
48}
49
50static void handle_vcpu_hotplug_event(struct xenbus_watch *watch,
51 const char **vec, unsigned int len)
52{
53 unsigned int cpu;
54 char *cpustr;
55 const char *node = vec[XS_WATCH_PATH];
56
57 cpustr = strstr(node, "cpu/");
58 if (cpustr != NULL) {
59 sscanf(cpustr, "cpu/%u", &cpu);
60 vcpu_hotplug(cpu);
61 }
62}
63
64static int setup_cpu_watcher(struct notifier_block *notifier,
65 unsigned long event, void *data)
66{
67 static struct xenbus_watch cpu_watch = {
68 .node = "cpu",
69 .callback = handle_vcpu_hotplug_event};
70
71 (void)register_xenbus_watch(&cpu_watch);
72
73 return NOTIFY_DONE;
74}
75
76static int __init setup_vcpu_hotplug_event(void)
77{
78 static struct notifier_block xsn_cpu = {
79 .notifier_call = setup_cpu_watcher };
80
81 if (!is_running_on_xen())
82 return -ENODEV;
83
84 register_xenstore_notifier(&xsn_cpu);
85
86 return 0;
87}
88
89arch_initcall(setup_vcpu_hotplug_event);
90
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index b6c2b8f16bee..c3290bc186a0 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -360,6 +360,10 @@ static void unbind_from_irq(unsigned int irq)
360 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) 360 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
361 [index_from_irq(irq)] = -1; 361 [index_from_irq(irq)] = -1;
362 break; 362 break;
363 case IRQT_IPI:
364 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
365 [index_from_irq(irq)] = -1;
366 break;
363 default: 367 default:
364 break; 368 break;
365 } 369 }