aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 18:31:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 18:31:08 -0500
commit66cdd0ceaf65a18996f561b770eedde1d123b019 (patch)
tree4892eaa422d366fce5d1e866ff1fe0988af95569 /arch/powerpc/kernel
parent896ea17d3da5f44b2625c9cda9874d7dfe447393 (diff)
parent58b7825bc324da55415034a9f6ca5d716b8fd898 (diff)
Merge tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Marcelo Tosatti: "Considerable KVM/PPC work, x86 kvmclock vsyscall support, IA32_TSC_ADJUST MSR emulation, amongst others." Fix up trivial conflict in kernel/sched/core.c due to cross-cpu migration notifier added next to rq migration call-back. * tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (156 commits) KVM: emulator: fix real mode segment checks in address linearization VMX: remove unneeded enable_unrestricted_guest check KVM: VMX: fix DPL during entry to protected mode x86/kexec: crash_vmclear_local_vmcss needs __rcu kvm: Fix irqfd resampler list walk KVM: VMX: provide the vmclear function and a bitmap to support VMCLEAR in kdump x86/kexec: VMCLEAR VMCSs loaded on all cpus if necessary KVM: MMU: optimize for set_spte KVM: PPC: booke: Get/set guest EPCR register using ONE_REG interface KVM: PPC: bookehv: Add EPCR support in mtspr/mfspr emulation KVM: PPC: bookehv: Add guest computation mode for irq delivery KVM: PPC: Make EPCR a valid field for booke64 and bookehv KVM: PPC: booke: Extend MAS2 EPN mask for 64-bit KVM: PPC: e500: Mask MAS2 EPN high 32-bits in 32/64 tlbwe emulation KVM: PPC: Mask ea's high 32-bits in 32/64 instr emulation KVM: PPC: e500: Add emulation helper for getting instruction ea KVM: PPC: bookehv64: Add support for interrupt handling KVM: PPC: bookehv: Remove GET_VCPU macro from exception handler KVM: PPC: booke: Fix get_tb() compile error on 64-bit KVM: PPC: e500: Silence bogus GCC warning in tlb code ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/epapr_hcalls.S28
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c11
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/smp.c46
6 files changed, 91 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 7523539cfe9f..4e23ba2f3ca7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -441,8 +441,7 @@ int main(void)
441 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); 441 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
442 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); 442 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
443 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); 443 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
444 DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter)); 444 DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
445 DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
446 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); 445 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
447 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); 446 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
448 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); 447 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
@@ -470,7 +469,6 @@ int main(void)
470 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); 469 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
471 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); 470 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
472 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); 471 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
473 DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
474 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); 472 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
475 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); 473 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
476 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 474 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
index 697b390ebfd8..62c0dc237826 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -8,13 +8,41 @@
8 */ 8 */
9 9
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <asm/epapr_hcalls.h>
11#include <asm/reg.h> 12#include <asm/reg.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include <asm/cputable.h> 14#include <asm/cputable.h>
14#include <asm/thread_info.h> 15#include <asm/thread_info.h>
15#include <asm/ppc_asm.h> 16#include <asm/ppc_asm.h>
17#include <asm/asm-compat.h>
16#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
17 19
20/* epapr_ev_idle() was derived from e500_idle() */
21_GLOBAL(epapr_ev_idle)
22 CURRENT_THREAD_INFO(r3, r1)
23 PPC_LL r4, TI_LOCAL_FLAGS(r3) /* set napping bit */
24 ori r4, r4,_TLF_NAPPING /* so when we take an exception */
25 PPC_STL r4, TI_LOCAL_FLAGS(r3) /* it will return to our caller */
26
27 wrteei 1
28
29idle_loop:
30 LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE))
31
32.global epapr_ev_idle_start
33epapr_ev_idle_start:
34 li r3, -1
35 nop
36 nop
37 nop
38
39 /*
40 * Guard against spurious wakeups from a hypervisor --
41 * only interrupt will cause us to return to LR due to
42 * _TLF_NAPPING.
43 */
44 b idle_loop
45
18/* Hypercall entry point. Will be patched with device tree instructions. */ 46/* Hypercall entry point. Will be patched with device tree instructions. */
19.global epapr_hypercall_start 47.global epapr_hypercall_start
20epapr_hypercall_start: 48epapr_hypercall_start:
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 028aeae370b6..f3eab8594d9f 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -21,6 +21,10 @@
21#include <asm/epapr_hcalls.h> 21#include <asm/epapr_hcalls.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/code-patching.h> 23#include <asm/code-patching.h>
24#include <asm/machdep.h>
25
26extern void epapr_ev_idle(void);
27extern u32 epapr_ev_idle_start[];
24 28
25bool epapr_paravirt_enabled; 29bool epapr_paravirt_enabled;
26 30
@@ -41,8 +45,13 @@ static int __init epapr_paravirt_init(void)
41 if (len % 4 || len > (4 * 4)) 45 if (len % 4 || len > (4 * 4))
42 return -ENODEV; 46 return -ENODEV;
43 47
44 for (i = 0; i < (len / 4); i++) 48 for (i = 0; i < (len / 4); i++) {
45 patch_instruction(epapr_hypercall_start + i, insts[i]); 49 patch_instruction(epapr_hypercall_start + i, insts[i]);
50 patch_instruction(epapr_ev_idle_start + i, insts[i]);
51 }
52
53 if (of_get_property(hyper_node, "has-idle", NULL))
54 ppc_md.power_save = epapr_ev_idle;
46 55
47 epapr_paravirt_enabled = true; 56 epapr_paravirt_enabled = true;
48 57
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 867db1de8949..a61b133c4f99 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -419,7 +419,7 @@ static void kvm_map_magic_page(void *data)
419 in[0] = KVM_MAGIC_PAGE; 419 in[0] = KVM_MAGIC_PAGE;
420 in[1] = KVM_MAGIC_PAGE; 420 in[1] = KVM_MAGIC_PAGE;
421 421
422 kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE); 422 kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
423 423
424 *features = out[0]; 424 *features = out[0];
425} 425}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 19e4288d8486..78b8766fd79e 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -43,6 +43,7 @@
43#include <asm/dcr.h> 43#include <asm/dcr.h>
44#include <asm/ftrace.h> 44#include <asm/ftrace.h>
45#include <asm/switch_to.h> 45#include <asm/switch_to.h>
46#include <asm/epapr_hcalls.h>
46 47
47#ifdef CONFIG_PPC32 48#ifdef CONFIG_PPC32
48extern void transfer_to_handler(void); 49extern void transfer_to_handler(void);
@@ -191,3 +192,7 @@ EXPORT_SYMBOL(__arch_hweight64);
191#ifdef CONFIG_PPC_BOOK3S_64 192#ifdef CONFIG_PPC_BOOK3S_64
192EXPORT_SYMBOL_GPL(mmu_psize_defs); 193EXPORT_SYMBOL_GPL(mmu_psize_defs);
193#endif 194#endif
195
196#ifdef CONFIG_EPAPR_PARAVIRT
197EXPORT_SYMBOL(epapr_hypercall_start);
198#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 2b952b5386fd..e5b133ebd8a5 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -427,6 +427,45 @@ int generic_check_cpu_restart(unsigned int cpu)
427{ 427{
428 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 428 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
429} 429}
430
431static atomic_t secondary_inhibit_count;
432
433/*
434 * Don't allow secondary CPU threads to come online
435 */
436void inhibit_secondary_onlining(void)
437{
438 /*
439 * This makes secondary_inhibit_count stable during cpu
440 * online/offline operations.
441 */
442 get_online_cpus();
443
444 atomic_inc(&secondary_inhibit_count);
445 put_online_cpus();
446}
447EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);
448
449/*
450 * Allow secondary CPU threads to come online again
451 */
452void uninhibit_secondary_onlining(void)
453{
454 get_online_cpus();
455 atomic_dec(&secondary_inhibit_count);
456 put_online_cpus();
457}
458EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);
459
460static int secondaries_inhibited(void)
461{
462 return atomic_read(&secondary_inhibit_count);
463}
464
465#else /* HOTPLUG_CPU */
466
467#define secondaries_inhibited() 0
468
430#endif 469#endif
431 470
432static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) 471static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
@@ -445,6 +484,13 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
445{ 484{
446 int rc, c; 485 int rc, c;
447 486
487 /*
488 * Don't allow secondary threads to come online if inhibited
489 */
490 if (threads_per_core > 1 && secondaries_inhibited() &&
491 cpu % threads_per_core != 0)
492 return -EBUSY;
493
448 if (smp_ops == NULL || 494 if (smp_ops == NULL ||
449 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 495 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
450 return -EINVAL; 496 return -EINVAL;