diff options
Diffstat (limited to 'arch/x86/xen/smp.c')
| -rw-r--r-- | arch/x86/xen/smp.c | 245 |
1 files changed, 66 insertions, 179 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d8faf79a0a1d..d77da613b1d2 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
| @@ -11,11 +11,8 @@ | |||
| 11 | * useful topology information for the kernel to make use of. As a | 11 | * useful topology information for the kernel to make use of. As a |
| 12 | * result, all CPUs are treated as if they're single-core and | 12 | * result, all CPUs are treated as if they're single-core and |
| 13 | * single-threaded. | 13 | * single-threaded. |
| 14 | * | ||
| 15 | * This does not handle HOTPLUG_CPU yet. | ||
| 16 | */ | 14 | */ |
| 17 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
| 18 | #include <linux/kernel_stat.h> | ||
| 19 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 20 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
| 21 | 18 | ||
| @@ -36,8 +33,6 @@ | |||
| 36 | #include "xen-ops.h" | 33 | #include "xen-ops.h" |
| 37 | #include "mmu.h" | 34 | #include "mmu.h" |
| 38 | 35 | ||
| 39 | static void __cpuinit xen_init_lock_cpu(int cpu); | ||
| 40 | |||
| 41 | cpumask_t xen_cpu_initialized_map; | 36 | cpumask_t xen_cpu_initialized_map; |
| 42 | 37 | ||
| 43 | static DEFINE_PER_CPU(int, resched_irq); | 38 | static DEFINE_PER_CPU(int, resched_irq); |
| @@ -64,11 +59,12 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | |||
| 64 | return IRQ_HANDLED; | 59 | return IRQ_HANDLED; |
| 65 | } | 60 | } |
| 66 | 61 | ||
| 67 | static __cpuinit void cpu_bringup_and_idle(void) | 62 | static __cpuinit void cpu_bringup(void) |
| 68 | { | 63 | { |
| 69 | int cpu = smp_processor_id(); | 64 | int cpu = smp_processor_id(); |
| 70 | 65 | ||
| 71 | cpu_init(); | 66 | cpu_init(); |
| 67 | touch_softlockup_watchdog(); | ||
| 72 | preempt_disable(); | 68 | preempt_disable(); |
| 73 | 69 | ||
| 74 | xen_enable_sysenter(); | 70 | xen_enable_sysenter(); |
| @@ -89,6 +85,11 @@ static __cpuinit void cpu_bringup_and_idle(void) | |||
| 89 | local_irq_enable(); | 85 | local_irq_enable(); |
| 90 | 86 | ||
| 91 | wmb(); /* make sure everything is out */ | 87 | wmb(); /* make sure everything is out */ |
| 88 | } | ||
| 89 | |||
| 90 | static __cpuinit void cpu_bringup_and_idle(void) | ||
| 91 | { | ||
| 92 | cpu_bringup(); | ||
| 92 | cpu_idle(); | 93 | cpu_idle(); |
| 93 | } | 94 | } |
| 94 | 95 | ||
| @@ -212,8 +213,6 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
| 212 | 213 | ||
| 213 | cpu_set(cpu, cpu_present_map); | 214 | cpu_set(cpu, cpu_present_map); |
| 214 | } | 215 | } |
| 215 | |||
| 216 | //init_xenbus_allowed_cpumask(); | ||
| 217 | } | 216 | } |
| 218 | 217 | ||
| 219 | static __cpuinit int | 218 | static __cpuinit int |
| @@ -281,12 +280,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | |||
| 281 | struct task_struct *idle = idle_task(cpu); | 280 | struct task_struct *idle = idle_task(cpu); |
| 282 | int rc; | 281 | int rc; |
| 283 | 282 | ||
| 284 | #if 0 | ||
| 285 | rc = cpu_up_check(cpu); | ||
| 286 | if (rc) | ||
| 287 | return rc; | ||
| 288 | #endif | ||
| 289 | |||
| 290 | #ifdef CONFIG_X86_64 | 283 | #ifdef CONFIG_X86_64 |
| 291 | /* Allocate node local memory for AP pdas */ | 284 | /* Allocate node local memory for AP pdas */ |
| 292 | WARN_ON(cpu == 0); | 285 | WARN_ON(cpu == 0); |
| @@ -339,6 +332,60 @@ static void xen_smp_cpus_done(unsigned int max_cpus) | |||
| 339 | { | 332 | { |
| 340 | } | 333 | } |
| 341 | 334 | ||
| 335 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 336 | static int xen_cpu_disable(void) | ||
| 337 | { | ||
| 338 | unsigned int cpu = smp_processor_id(); | ||
| 339 | if (cpu == 0) | ||
| 340 | return -EBUSY; | ||
| 341 | |||
| 342 | cpu_disable_common(); | ||
| 343 | |||
| 344 | load_cr3(swapper_pg_dir); | ||
| 345 | return 0; | ||
| 346 | } | ||
| 347 | |||
| 348 | static void xen_cpu_die(unsigned int cpu) | ||
| 349 | { | ||
| 350 | while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { | ||
| 351 | current->state = TASK_UNINTERRUPTIBLE; | ||
| 352 | schedule_timeout(HZ/10); | ||
| 353 | } | ||
| 354 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | ||
| 355 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | ||
| 356 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | ||
| 357 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | ||
| 358 | xen_uninit_lock_cpu(cpu); | ||
| 359 | xen_teardown_timer(cpu); | ||
| 360 | |||
| 361 | if (num_online_cpus() == 1) | ||
| 362 | alternatives_smp_switch(0); | ||
| 363 | } | ||
| 364 | |||
| 365 | static void xen_play_dead(void) | ||
| 366 | { | ||
| 367 | play_dead_common(); | ||
| 368 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | ||
| 369 | cpu_bringup(); | ||
| 370 | } | ||
| 371 | |||
| 372 | #else /* !CONFIG_HOTPLUG_CPU */ | ||
| 373 | static int xen_cpu_disable(void) | ||
| 374 | { | ||
| 375 | return -ENOSYS; | ||
| 376 | } | ||
| 377 | |||
| 378 | static void xen_cpu_die(unsigned int cpu) | ||
| 379 | { | ||
| 380 | BUG(); | ||
| 381 | } | ||
| 382 | |||
| 383 | static void xen_play_dead(void) | ||
| 384 | { | ||
| 385 | BUG(); | ||
| 386 | } | ||
| 387 | |||
| 388 | #endif | ||
| 342 | static void stop_self(void *v) | 389 | static void stop_self(void *v) |
| 343 | { | 390 | { |
| 344 | int cpu = smp_processor_id(); | 391 | int cpu = smp_processor_id(); |
| @@ -419,176 +466,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
| 419 | return IRQ_HANDLED; | 466 | return IRQ_HANDLED; |
| 420 | } | 467 | } |
| 421 | 468 | ||
| 422 | struct xen_spinlock { | ||
| 423 | unsigned char lock; /* 0 -> free; 1 -> locked */ | ||
| 424 | unsigned short spinners; /* count of waiting cpus */ | ||
| 425 | }; | ||
| 426 | |||
| 427 | static int xen_spin_is_locked(struct raw_spinlock *lock) | ||
| 428 | { | ||
| 429 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
| 430 | |||
| 431 | return xl->lock != 0; | ||
| 432 | } | ||
| 433 | |||
| 434 | static int xen_spin_is_contended(struct raw_spinlock *lock) | ||
| 435 | { | ||
| 436 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
| 437 | |||
| 438 | /* Not strictly true; this is only the count of contended | ||
| 439 | lock-takers entering the slow path. */ | ||
| 440 | return xl->spinners != 0; | ||
| 441 | } | ||
| 442 | |||
| 443 | static int xen_spin_trylock(struct raw_spinlock *lock) | ||
| 444 | { | ||
| 445 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
| 446 | u8 old = 1; | ||
| 447 | |||
| 448 | asm("xchgb %b0,%1" | ||
| 449 | : "+q" (old), "+m" (xl->lock) : : "memory"); | ||
| 450 | |||
| 451 | return old == 0; | ||
| 452 | } | ||
| 453 | |||
| 454 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | ||
| 455 | static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); | ||
| 456 | |||
| 457 | static inline void spinning_lock(struct xen_spinlock *xl) | ||
| 458 | { | ||
| 459 | __get_cpu_var(lock_spinners) = xl; | ||
| 460 | wmb(); /* set lock of interest before count */ | ||
| 461 | asm(LOCK_PREFIX " incw %0" | ||
| 462 | : "+m" (xl->spinners) : : "memory"); | ||
| 463 | } | ||
| 464 | |||
| 465 | static inline void unspinning_lock(struct xen_spinlock *xl) | ||
| 466 | { | ||
| 467 | asm(LOCK_PREFIX " decw %0" | ||
| 468 | : "+m" (xl->spinners) : : "memory"); | ||
| 469 | wmb(); /* decrement count before clearing lock */ | ||
| 470 | __get_cpu_var(lock_spinners) = NULL; | ||
| 471 | } | ||
| 472 | |||
| 473 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) | ||
| 474 | { | ||
| 475 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
| 476 | int irq = __get_cpu_var(lock_kicker_irq); | ||
| 477 | int ret; | ||
| 478 | |||
| 479 | /* If kicker interrupts not initialized yet, just spin */ | ||
| 480 | if (irq == -1) | ||
| 481 | return 0; | ||
| 482 | |||
| 483 | /* announce we're spinning */ | ||
| 484 | spinning_lock(xl); | ||
| 485 | |||
| 486 | /* clear pending */ | ||
| 487 | xen_clear_irq_pending(irq); | ||
| 488 | |||
| 489 | /* check again make sure it didn't become free while | ||
| 490 | we weren't looking */ | ||
| 491 | ret = xen_spin_trylock(lock); | ||
| 492 | if (ret) | ||
| 493 | goto out; | ||
| 494 | |||
| 495 | /* block until irq becomes pending */ | ||
| 496 | xen_poll_irq(irq); | ||
| 497 | kstat_this_cpu.irqs[irq]++; | ||
| 498 | |||
| 499 | out: | ||
| 500 | unspinning_lock(xl); | ||
| 501 | return ret; | ||
| 502 | } | ||
| 503 | |||
| 504 | static void xen_spin_lock(struct raw_spinlock *lock) | ||
| 505 | { | ||
| 506 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
| 507 | int timeout; | ||
| 508 | u8 oldval; | ||
| 509 | |||
| 510 | do { | ||
| 511 | timeout = 1 << 10; | ||
| 512 | |||
| 513 | asm("1: xchgb %1,%0\n" | ||
| 514 | " testb %1,%1\n" | ||
| 515 | " jz 3f\n" | ||
| 516 | "2: rep;nop\n" | ||
| 517 | " cmpb $0,%0\n" | ||
| 518 | " je 1b\n" | ||
| 519 | " dec %2\n" | ||
| 520 | " jnz 2b\n" | ||
| 521 | "3:\n" | ||
| 522 | : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) | ||
| 523 | : "1" (1) | ||
| 524 | : "memory"); | ||
| 525 | |||
| 526 | } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock))); | ||
| 527 | } | ||
| 528 | |||
| 529 | static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) | ||
| 530 | { | ||
| 531 | int cpu; | ||
| 532 | |||
| 533 | for_each_online_cpu(cpu) { | ||
| 534 | /* XXX should mix up next cpu selection */ | ||
| 535 | if (per_cpu(lock_spinners, cpu) == xl) { | ||
| 536 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); | ||
| 537 | break; | ||
| 538 | } | ||
| 539 | } | ||
| 540 | } | ||
| 541 | |||
| 542 | static void xen_spin_unlock(struct raw_spinlock *lock) | ||
| 543 | { | ||
| 544 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | ||
| 545 | |||
| 546 | smp_wmb(); /* make sure no writes get moved after unlock */ | ||
| 547 | xl->lock = 0; /* release lock */ | ||
| 548 | |||
| 549 | /* make sure unlock happens before kick */ | ||
| 550 | barrier(); | ||
| 551 | |||
| 552 | if (unlikely(xl->spinners)) | ||
| 553 | xen_spin_unlock_slow(xl); | ||
| 554 | } | ||
| 555 | |||
| 556 | static __cpuinit void xen_init_lock_cpu(int cpu) | ||
| 557 | { | ||
| 558 | int irq; | ||
| 559 | const char *name; | ||
| 560 | |||
| 561 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); | ||
| 562 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, | ||
| 563 | cpu, | ||
| 564 | xen_reschedule_interrupt, | ||
| 565 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
| 566 | name, | ||
| 567 | NULL); | ||
| 568 | |||
| 569 | if (irq >= 0) { | ||
| 570 | disable_irq(irq); /* make sure it's never delivered */ | ||
| 571 | per_cpu(lock_kicker_irq, cpu) = irq; | ||
| 572 | } | ||
| 573 | |||
| 574 | printk("cpu %d spinlock event irq %d\n", cpu, irq); | ||
| 575 | } | ||
| 576 | |||
| 577 | static void __init xen_init_spinlocks(void) | ||
| 578 | { | ||
| 579 | pv_lock_ops.spin_is_locked = xen_spin_is_locked; | ||
| 580 | pv_lock_ops.spin_is_contended = xen_spin_is_contended; | ||
| 581 | pv_lock_ops.spin_lock = xen_spin_lock; | ||
| 582 | pv_lock_ops.spin_trylock = xen_spin_trylock; | ||
| 583 | pv_lock_ops.spin_unlock = xen_spin_unlock; | ||
| 584 | } | ||
| 585 | |||
| 586 | static const struct smp_ops xen_smp_ops __initdata = { | 469 | static const struct smp_ops xen_smp_ops __initdata = { |
| 587 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | 470 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, |
| 588 | .smp_prepare_cpus = xen_smp_prepare_cpus, | 471 | .smp_prepare_cpus = xen_smp_prepare_cpus, |
| 589 | .cpu_up = xen_cpu_up, | ||
| 590 | .smp_cpus_done = xen_smp_cpus_done, | 472 | .smp_cpus_done = xen_smp_cpus_done, |
| 591 | 473 | ||
| 474 | .cpu_up = xen_cpu_up, | ||
| 475 | .cpu_die = xen_cpu_die, | ||
| 476 | .cpu_disable = xen_cpu_disable, | ||
| 477 | .play_dead = xen_play_dead, | ||
| 478 | |||
| 592 | .smp_send_stop = xen_smp_send_stop, | 479 | .smp_send_stop = xen_smp_send_stop, |
| 593 | .smp_send_reschedule = xen_smp_send_reschedule, | 480 | .smp_send_reschedule = xen_smp_send_reschedule, |
| 594 | 481 | ||
