diff options
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_builtin.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 54 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_mmu_host.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500mc.c | 4 |
5 files changed, 50 insertions, 31 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 4fdc27c80f4c..3f1bb5a36c27 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/export.h> | 12 | #include <linux/export.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/bootmem.h> | ||
16 | #include <linux/init.h> | 15 | #include <linux/init.h> |
17 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
18 | #include <linux/sizes.h> | 17 | #include <linux/sizes.h> |
@@ -154,7 +153,7 @@ EXPORT_SYMBOL_GPL(kvm_release_hpt); | |||
154 | * kvm_cma_reserve() - reserve area for kvm hash pagetable | 153 | * kvm_cma_reserve() - reserve area for kvm hash pagetable |
155 | * | 154 | * |
156 | * This function reserves memory from early allocator. It should be | 155 | * This function reserves memory from early allocator. It should be |
157 | * called by arch specific code once the early allocator (memblock or bootmem) | 156 | * called by arch specific code once the memblock allocator |
158 | * has been activated and all other subsystems have already allocated/reserved | 157 | * has been activated and all other subsystems have already allocated/reserved |
159 | * memory. | 158 | * memory. |
160 | */ | 159 | */ |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index edb2ccdbb2ba..65c105b17a25 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -201,8 +201,6 @@ kvmppc_primary_no_guest: | |||
201 | bge kvm_novcpu_exit /* another thread already exiting */ | 201 | bge kvm_novcpu_exit /* another thread already exiting */ |
202 | li r3, NAPPING_NOVCPU | 202 | li r3, NAPPING_NOVCPU |
203 | stb r3, HSTATE_NAPPING(r13) | 203 | stb r3, HSTATE_NAPPING(r13) |
204 | li r3, 1 | ||
205 | stb r3, HSTATE_HWTHREAD_REQ(r13) | ||
206 | 204 | ||
207 | b kvm_do_nap | 205 | b kvm_do_nap |
208 | 206 | ||
@@ -293,6 +291,8 @@ kvm_start_guest: | |||
293 | /* if we have no vcpu to run, go back to sleep */ | 291 | /* if we have no vcpu to run, go back to sleep */ |
294 | beq kvm_no_guest | 292 | beq kvm_no_guest |
295 | 293 | ||
294 | kvm_secondary_got_guest: | ||
295 | |||
296 | /* Set HSTATE_DSCR(r13) to something sensible */ | 296 | /* Set HSTATE_DSCR(r13) to something sensible */ |
297 | ld r6, PACA_DSCR(r13) | 297 | ld r6, PACA_DSCR(r13) |
298 | std r6, HSTATE_DSCR(r13) | 298 | std r6, HSTATE_DSCR(r13) |
@@ -318,27 +318,46 @@ kvm_start_guest: | |||
318 | stwcx. r3, 0, r4 | 318 | stwcx. r3, 0, r4 |
319 | bne 51b | 319 | bne 51b |
320 | 320 | ||
321 | /* | ||
322 | * At this point we have finished executing in the guest. | ||
323 | * We need to wait for hwthread_req to become zero, since | ||
324 | * we may not turn on the MMU while hwthread_req is non-zero. | ||
325 | * While waiting we also need to check if we get given a vcpu to run. | ||
326 | */ | ||
321 | kvm_no_guest: | 327 | kvm_no_guest: |
322 | li r0, KVM_HWTHREAD_IN_NAP | 328 | lbz r3, HSTATE_HWTHREAD_REQ(r13) |
329 | cmpwi r3, 0 | ||
330 | bne 53f | ||
331 | HMT_MEDIUM | ||
332 | li r0, KVM_HWTHREAD_IN_KERNEL | ||
323 | stb r0, HSTATE_HWTHREAD_STATE(r13) | 333 | stb r0, HSTATE_HWTHREAD_STATE(r13) |
324 | kvm_do_nap: | 334 | /* need to recheck hwthread_req after a barrier, to avoid race */ |
325 | /* Clear the runlatch bit before napping */ | 335 | sync |
326 | mfspr r2, SPRN_CTRLF | 336 | lbz r3, HSTATE_HWTHREAD_REQ(r13) |
327 | clrrdi r2, r2, 1 | 337 | cmpwi r3, 0 |
328 | mtspr SPRN_CTRLT, r2 | 338 | bne 54f |
329 | 339 | /* | |
340 | * We jump to power7_wakeup_loss, which will return to the caller | ||
341 | * of power7_nap in the powernv cpu offline loop. The value we | ||
342 | * put in r3 becomes the return value for power7_nap. | ||
343 | */ | ||
330 | li r3, LPCR_PECE0 | 344 | li r3, LPCR_PECE0 |
331 | mfspr r4, SPRN_LPCR | 345 | mfspr r4, SPRN_LPCR |
332 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | 346 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 |
333 | mtspr SPRN_LPCR, r4 | 347 | mtspr SPRN_LPCR, r4 |
334 | isync | 348 | li r3, 0 |
335 | std r0, HSTATE_SCRATCH0(r13) | 349 | b power7_wakeup_loss |
336 | ptesync | 350 | |
337 | ld r0, HSTATE_SCRATCH0(r13) | 351 | 53: HMT_LOW |
338 | 1: cmpd r0, r0 | 352 | ld r4, HSTATE_KVM_VCPU(r13) |
339 | bne 1b | 353 | cmpdi r4, 0 |
340 | nap | 354 | beq kvm_no_guest |
341 | b . | 355 | HMT_MEDIUM |
356 | b kvm_secondary_got_guest | ||
357 | |||
358 | 54: li r0, KVM_HWTHREAD_IN_KVM | ||
359 | stb r0, HSTATE_HWTHREAD_STATE(r13) | ||
360 | b kvm_no_guest | ||
342 | 361 | ||
343 | /****************************************************************************** | 362 | /****************************************************************************** |
344 | * * | 363 | * * |
@@ -2172,6 +2191,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
2172 | * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the | 2191 | * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the |
2173 | * runlatch bit before napping. | 2192 | * runlatch bit before napping. |
2174 | */ | 2193 | */ |
2194 | kvm_do_nap: | ||
2175 | mfspr r2, SPRN_CTRLF | 2195 | mfspr r2, SPRN_CTRLF |
2176 | clrrdi r2, r2, 1 | 2196 | clrrdi r2, r2, 1 |
2177 | mtspr SPRN_CTRLT, r2 | 2197 | mtspr SPRN_CTRLT, r2 |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 2e02ed849f36..16095841afe1 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry) | |||
76 | unsigned long sid; | 76 | unsigned long sid; |
77 | int ret = -1; | 77 | int ret = -1; |
78 | 78 | ||
79 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); | 79 | sid = __this_cpu_inc_return(pcpu_last_used_sid); |
80 | if (sid < NUM_TIDS) { | 80 | if (sid < NUM_TIDS) { |
81 | __get_cpu_var(pcpu_sids).entry[sid] = entry; | 81 | __this_cpu_write(pcpu_sids)entry[sid], entry); |
82 | entry->val = sid; | 82 | entry->val = sid; |
83 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | 83 | entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]); |
84 | ret = sid; | 84 | ret = sid; |
85 | } | 85 | } |
86 | 86 | ||
@@ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry) | |||
108 | static inline int local_sid_lookup(struct id *entry) | 108 | static inline int local_sid_lookup(struct id *entry) |
109 | { | 109 | { |
110 | if (entry && entry->val != 0 && | 110 | if (entry && entry->val != 0 && |
111 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | 111 | __this_cpu_read(pcpu_sids.entry[entry->val]) == entry && |
112 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | 112 | entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val])) |
113 | return entry->val; | 113 | return entry->val; |
114 | return -1; | 114 | return -1; |
115 | } | 115 | } |
@@ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry) | |||
117 | /* Invalidate all id mappings on local core -- call with preempt disabled */ | 117 | /* Invalidate all id mappings on local core -- call with preempt disabled */ |
118 | static inline void local_sid_destroy_all(void) | 118 | static inline void local_sid_destroy_all(void) |
119 | { | 119 | { |
120 | __get_cpu_var(pcpu_last_used_sid) = 0; | 120 | __this_cpu_write(pcpu_last_used_sid, 0); |
121 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | 121 | memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids)); |
122 | } | 122 | } |
123 | 123 | ||
124 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | 124 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 769778f855b0..cc536d4a75ef 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
@@ -661,7 +661,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | |||
661 | if (unlikely((pr && !(mas3 & MAS3_UX)) || | 661 | if (unlikely((pr && !(mas3 & MAS3_UX)) || |
662 | (!pr && !(mas3 & MAS3_SX)))) { | 662 | (!pr && !(mas3 & MAS3_SX)))) { |
663 | pr_err_ratelimited( | 663 | pr_err_ratelimited( |
664 | "%s: Instuction emulation from guest addres %08lx without execute permission\n", | 664 | "%s: Instruction emulation from guest address %08lx without execute permission\n", |
665 | __func__, geaddr); | 665 | __func__, geaddr); |
666 | return EMULATE_AGAIN; | 666 | return EMULATE_AGAIN; |
667 | } | 667 | } |
@@ -673,7 +673,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | |||
673 | if (has_feature(vcpu, VCPU_FTR_MMU_V2) && | 673 | if (has_feature(vcpu, VCPU_FTR_MMU_V2) && |
674 | unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) { | 674 | unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) { |
675 | pr_err_ratelimited( | 675 | pr_err_ratelimited( |
676 | "%s: Instuction emulation from guest addres %08lx mismatches storage attributes\n", | 676 | "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n", |
677 | __func__, geaddr); | 677 | __func__, geaddr); |
678 | return EMULATE_AGAIN; | 678 | return EMULATE_AGAIN; |
679 | } | 679 | } |
@@ -686,7 +686,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | |||
686 | 686 | ||
687 | /* Guard against emulation from devices area */ | 687 | /* Guard against emulation from devices area */ |
688 | if (unlikely(!page_is_ram(pfn))) { | 688 | if (unlikely(!page_is_ram(pfn))) { |
689 | pr_err_ratelimited("%s: Instruction emulation from non-RAM host addres %08llx is not supported\n", | 689 | pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n", |
690 | __func__, addr); | 690 | __func__, addr); |
691 | return EMULATE_AGAIN; | 691 | return EMULATE_AGAIN; |
692 | } | 692 | } |
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 2fdc8722e324..cda695de8aa7 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -144,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | |||
144 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | 144 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); |
145 | 145 | ||
146 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || | 146 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
147 | __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) { | 147 | __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) { |
148 | kvmppc_e500_tlbil_all(vcpu_e500); | 148 | kvmppc_e500_tlbil_all(vcpu_e500); |
149 | __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu; | 149 | __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu); |
150 | } | 150 | } |
151 | } | 151 | } |
152 | 152 | ||