aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/xen/smp.c60
-rw-r--r--arch/x86/xen/spinlock.c5
-rw-r--r--arch/x86/xen/time.c8
-rw-r--r--arch/x86/xen/xen-ops.h6
4 files changed, 67 insertions, 12 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index baca7f2fbd8a..be5cbb2b7c60 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -11,8 +11,6 @@
11 * useful topology information for the kernel to make use of. As a 11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and 12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded. 13 * single-threaded.
14 *
15 * This does not handle HOTPLUG_CPU yet.
16 */ 14 */
17#include <linux/sched.h> 15#include <linux/sched.h>
18#include <linux/err.h> 16#include <linux/err.h>
@@ -61,11 +59,12 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
61 return IRQ_HANDLED; 59 return IRQ_HANDLED;
62} 60}
63 61
64static __cpuinit void cpu_bringup_and_idle(void) 62static __cpuinit void cpu_bringup(void)
65{ 63{
66 int cpu = smp_processor_id(); 64 int cpu = smp_processor_id();
67 65
68 cpu_init(); 66 cpu_init();
67 touch_softlockup_watchdog();
69 preempt_disable(); 68 preempt_disable();
70 69
71 xen_enable_sysenter(); 70 xen_enable_sysenter();
@@ -86,6 +85,11 @@ static __cpuinit void cpu_bringup_and_idle(void)
86 local_irq_enable(); 85 local_irq_enable();
87 86
88 wmb(); /* make sure everything is out */ 87 wmb(); /* make sure everything is out */
88}
89
90static __cpuinit void cpu_bringup_and_idle(void)
91{
92 cpu_bringup();
89 cpu_idle(); 93 cpu_idle();
90} 94}
91 95
@@ -209,8 +213,6 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
209 213
210 cpu_set(cpu, cpu_present_map); 214 cpu_set(cpu, cpu_present_map);
211 } 215 }
212
213 //init_xenbus_allowed_cpumask();
214} 216}
215 217
216static __cpuinit int 218static __cpuinit int
@@ -278,12 +280,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
278 struct task_struct *idle = idle_task(cpu); 280 struct task_struct *idle = idle_task(cpu);
279 int rc; 281 int rc;
280 282
281#if 0
282 rc = cpu_up_check(cpu);
283 if (rc)
284 return rc;
285#endif
286
287#ifdef CONFIG_X86_64 283#ifdef CONFIG_X86_64
288 /* Allocate node local memory for AP pdas */ 284 /* Allocate node local memory for AP pdas */
289 WARN_ON(cpu == 0); 285 WARN_ON(cpu == 0);
@@ -336,6 +332,42 @@ static void xen_smp_cpus_done(unsigned int max_cpus)
336{ 332{
337} 333}
338 334
335int xen_cpu_disable(void)
336{
337 unsigned int cpu = smp_processor_id();
338 if (cpu == 0)
339 return -EBUSY;
340
341 cpu_disable_common();
342
343 load_cr3(swapper_pg_dir);
344 return 0;
345}
346
347void xen_cpu_die(unsigned int cpu)
348{
349 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
350 current->state = TASK_UNINTERRUPTIBLE;
351 schedule_timeout(HZ/10);
352 }
353 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
354 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
355 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
356 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
357 xen_uninit_lock_cpu(cpu);
358 xen_teardown_timer(cpu);
359
360 if (num_online_cpus() == 1)
361 alternatives_smp_switch(0);
362}
363
364void xen_play_dead(void)
365{
366 play_dead_common();
367 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
368 cpu_bringup();
369}
370
339static void stop_self(void *v) 371static void stop_self(void *v)
340{ 372{
341 int cpu = smp_processor_id(); 373 int cpu = smp_processor_id();
@@ -419,9 +451,13 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
419static const struct smp_ops xen_smp_ops __initdata = { 451static const struct smp_ops xen_smp_ops __initdata = {
420 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 452 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
421 .smp_prepare_cpus = xen_smp_prepare_cpus, 453 .smp_prepare_cpus = xen_smp_prepare_cpus,
422 .cpu_up = xen_cpu_up,
423 .smp_cpus_done = xen_smp_cpus_done, 454 .smp_cpus_done = xen_smp_cpus_done,
424 455
456 .cpu_up = xen_cpu_up,
457 .cpu_die = xen_cpu_die,
458 .cpu_disable = xen_cpu_disable,
459 .play_dead = xen_play_dead,
460
425 .smp_send_stop = xen_smp_send_stop, 461 .smp_send_stop = xen_smp_send_stop,
426 .smp_send_reschedule = xen_smp_send_reschedule, 462 .smp_send_reschedule = xen_smp_send_reschedule,
427 463
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d072823bc06d..dd71e3a021cd 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -357,6 +357,11 @@ void __cpuinit xen_init_lock_cpu(int cpu)
357 printk("cpu %d spinlock event irq %d\n", cpu, irq); 357 printk("cpu %d spinlock event irq %d\n", cpu, irq);
358} 358}
359 359
360void xen_uninit_lock_cpu(int cpu)
361{
362 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
363}
364
360void __init xen_init_spinlocks(void) 365void __init xen_init_spinlocks(void)
361{ 366{
362 pv_lock_ops.spin_is_locked = xen_spin_is_locked; 367 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 20182d9072c4..004ba86326ae 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -450,6 +450,14 @@ void xen_setup_timer(int cpu)
450 setup_runstate_info(cpu); 450 setup_runstate_info(cpu);
451} 451}
452 452
453void xen_teardown_timer(int cpu)
454{
455 struct clock_event_device *evt;
456 BUG_ON(cpu == 0);
457 evt = &per_cpu(xen_clock_events, cpu);
458 unbind_from_irqhandler(evt->irq, NULL);
459}
460
453void xen_setup_cpu_clockevents(void) 461void xen_setup_cpu_clockevents(void)
454{ 462{
455 BUG_ON(preemptible()); 463 BUG_ON(preemptible());
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 1e8bfdaa20d3..8dbd97fd7f18 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -34,6 +34,7 @@ void __init xen_build_dynamic_phys_to_machine(void);
34 34
35void xen_init_irq_ops(void); 35void xen_init_irq_ops(void);
36void xen_setup_timer(int cpu); 36void xen_setup_timer(int cpu);
37void xen_teardown_timer(int cpu);
37cycle_t xen_clocksource_read(void); 38cycle_t xen_clocksource_read(void);
38void xen_setup_cpu_clockevents(void); 39void xen_setup_cpu_clockevents(void);
39unsigned long xen_tsc_khz(void); 40unsigned long xen_tsc_khz(void);
@@ -50,11 +51,16 @@ void xen_mark_init_mm_pinned(void);
50 51
51void __init xen_setup_vcpu_info_placement(void); 52void __init xen_setup_vcpu_info_placement(void);
52 53
54void xen_play_dead(void);
55void xen_cpu_die(unsigned int cpu);
56int xen_cpu_disable(void);
57
53#ifdef CONFIG_SMP 58#ifdef CONFIG_SMP
54void xen_smp_init(void); 59void xen_smp_init(void);
55 60
56void __init xen_init_spinlocks(void); 61void __init xen_init_spinlocks(void);
57__cpuinit void xen_init_lock_cpu(int cpu); 62__cpuinit void xen_init_lock_cpu(int cpu);
63void xen_uninit_lock_cpu(int cpu);
58 64
59extern cpumask_t xen_cpu_initialized_map; 65extern cpumask_t xen_cpu_initialized_map;
60#else 66#else