diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 16:36:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 16:36:04 -0400 |
commit | 078838d56574694d0a4815d9c1b7f28e8844638b (patch) | |
tree | 0d8c72d9dc46b0d2e72073fb928ec08e14b02b76 /arch/x86/xen | |
parent | eeee78cf77df0450ca285a7cd6d73842181e825c (diff) | |
parent | 590ee7dbd569a012df705a5204fc5f1066f52b8c (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU changes from Ingo Molnar:
"The main changes in this cycle were:
- changes permitting use of call_rcu() and friends very early in
boot, for example, before rcu_init() is invoked.
- add in-kernel API to enable and disable expediting of normal RCU
grace periods.
- improve RCU's handling of (hotplug-) outgoing CPUs.
- NO_HZ_FULL_SYSIDLE fixes.
- tiny-RCU updates to make it more tiny.
- documentation updates.
- miscellaneous fixes"
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (58 commits)
cpu: Provide smpboot_thread_init() on !CONFIG_SMP kernels as well
cpu: Defer smpboot kthread unparking until CPU known to scheduler
rcu: Associate quiescent-state reports with grace period
rcu: Yet another fix for preemption and CPU hotplug
rcu: Add diagnostics to grace-period cleanup
rcutorture: Default to grace-period-initialization delays
rcu: Handle outgoing CPUs on exit from idle loop
cpu: Make CPU-offline idle-loop transition point more precise
rcu: Eliminate ->onoff_mutex from rcu_node structure
rcu: Process offlining and onlining only at grace-period start
rcu: Move rcu_report_unblock_qs_rnp() to common code
rcu: Rework preemptible expedited bitmask handling
rcu: Remove event tracing from rcu_cpu_notify(), used by offline CPUs
rcutorture: Enable slow grace-period initializations
rcu: Provide diagnostic option to slow down grace-period initialization
rcu: Detect stalls caused by failure to propagate up rcu_node tree
rcu: Eliminate empty HOTPLUG_CPU ifdef
rcu: Simplify sync_rcu_preempt_exp_init()
rcu: Put all orphan-callback-related code under same comment
rcu: Consolidate offline-CPU callback initialization
...
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/smp.c | 46 |
1 files changed, 25 insertions, 21 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 7413ee3706d0..86484384492e 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -90,14 +90,10 @@ static void cpu_bringup(void) | |||
90 | 90 | ||
91 | set_cpu_online(cpu, true); | 91 | set_cpu_online(cpu, true); |
92 | 92 | ||
93 | this_cpu_write(cpu_state, CPU_ONLINE); | 93 | cpu_set_state_online(cpu); /* Implies full memory barrier. */ |
94 | |||
95 | wmb(); | ||
96 | 94 | ||
97 | /* We can take interrupts now: we're officially "up". */ | 95 | /* We can take interrupts now: we're officially "up". */ |
98 | local_irq_enable(); | 96 | local_irq_enable(); |
99 | |||
100 | wmb(); /* make sure everything is out */ | ||
101 | } | 97 | } |
102 | 98 | ||
103 | /* | 99 | /* |
@@ -451,7 +447,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) | |||
451 | xen_setup_timer(cpu); | 447 | xen_setup_timer(cpu); |
452 | xen_init_lock_cpu(cpu); | 448 | xen_init_lock_cpu(cpu); |
453 | 449 | ||
454 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 450 | /* |
451 | * PV VCPUs are always successfully taken down (see 'while' loop | ||
452 | * in xen_cpu_die()), so -EBUSY is an error. | ||
453 | */ | ||
454 | rc = cpu_check_up_prepare(cpu); | ||
455 | if (rc) | ||
456 | return rc; | ||
455 | 457 | ||
456 | /* make sure interrupts start blocked */ | 458 | /* make sure interrupts start blocked */ |
457 | per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; | 459 | per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; |
@@ -467,10 +469,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) | |||
467 | rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); | 469 | rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); |
468 | BUG_ON(rc); | 470 | BUG_ON(rc); |
469 | 471 | ||
470 | while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { | 472 | while (cpu_report_state(cpu) != CPU_ONLINE) |
471 | HYPERVISOR_sched_op(SCHEDOP_yield, NULL); | 473 | HYPERVISOR_sched_op(SCHEDOP_yield, NULL); |
472 | barrier(); | ||
473 | } | ||
474 | 474 | ||
475 | return 0; | 475 | return 0; |
476 | } | 476 | } |
@@ -499,11 +499,11 @@ static void xen_cpu_die(unsigned int cpu) | |||
499 | schedule_timeout(HZ/10); | 499 | schedule_timeout(HZ/10); |
500 | } | 500 | } |
501 | 501 | ||
502 | cpu_die_common(cpu); | 502 | if (common_cpu_die(cpu) == 0) { |
503 | 503 | xen_smp_intr_free(cpu); | |
504 | xen_smp_intr_free(cpu); | 504 | xen_uninit_lock_cpu(cpu); |
505 | xen_uninit_lock_cpu(cpu); | 505 | xen_teardown_timer(cpu); |
506 | xen_teardown_timer(cpu); | 506 | } |
507 | } | 507 | } |
508 | 508 | ||
509 | static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ | 509 | static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ |
@@ -735,6 +735,16 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | |||
735 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) | 735 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) |
736 | { | 736 | { |
737 | int rc; | 737 | int rc; |
738 | |||
739 | /* | ||
740 | * This can happen if CPU was offlined earlier and | ||
741 | * offlining timed out in common_cpu_die(). | ||
742 | */ | ||
743 | if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) { | ||
744 | xen_smp_intr_free(cpu); | ||
745 | xen_uninit_lock_cpu(cpu); | ||
746 | } | ||
747 | |||
738 | /* | 748 | /* |
739 | * xen_smp_intr_init() needs to run before native_cpu_up() | 749 | * xen_smp_intr_init() needs to run before native_cpu_up() |
740 | * so that IPI vectors are set up on the booting CPU before | 750 | * so that IPI vectors are set up on the booting CPU before |
@@ -756,12 +766,6 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
756 | return rc; | 766 | return rc; |
757 | } | 767 | } |
758 | 768 | ||
759 | static void xen_hvm_cpu_die(unsigned int cpu) | ||
760 | { | ||
761 | xen_cpu_die(cpu); | ||
762 | native_cpu_die(cpu); | ||
763 | } | ||
764 | |||
765 | void __init xen_hvm_smp_init(void) | 769 | void __init xen_hvm_smp_init(void) |
766 | { | 770 | { |
767 | if (!xen_have_vector_callback) | 771 | if (!xen_have_vector_callback) |
@@ -769,7 +773,7 @@ void __init xen_hvm_smp_init(void) | |||
769 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; | 773 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; |
770 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; | 774 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; |
771 | smp_ops.cpu_up = xen_hvm_cpu_up; | 775 | smp_ops.cpu_up = xen_hvm_cpu_up; |
772 | smp_ops.cpu_die = xen_hvm_cpu_die; | 776 | smp_ops.cpu_die = xen_cpu_die; |
773 | smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; | 777 | smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; |
774 | smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; | 778 | smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; |
775 | smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu; | 779 | smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu; |