aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-09-05 10:22:45 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-09-05 10:22:45 -0400
commit593d0a3e9f813db910dc50574532914db21d09ff (patch)
tree12d8413ee57b4383ca8c906996ffe02be6d377a5 /arch/powerpc/kvm
parent50e900417b8096939d12a46848f965e27a905e36 (diff)
parent4cb38750d49010ae72e718d46605ac9ba5a851b4 (diff)
Merge commit '4cb38750d49010ae72e718d46605ac9ba5a851b4' into stable/for-linus-3.6
* commit '4cb38750d49010ae72e718d46605ac9ba5a851b4': (6849 commits) bcma: fix invalid PMU chip control masks [libata] pata_cmd64x: whitespace cleanup libata-acpi: fix up for acpi_pm_device_sleep_state API sata_dwc_460ex: device tree may specify dma_channel ahci, trivial: fixed coding style issues related to braces ahci_platform: add hibernation callbacks libata-eh.c: local functions should not be exposed globally libata-transport.c: local functions should not be exposed globally sata_dwc_460ex: support hardreset ata: use module_pci_driver drivers/ata/pata_pcmcia.c: adjust suspicious bit operation pata_imx: Convert to clk_prepare_enable/clk_disable_unprepare ahci: Enable SB600 64bit DMA on MSI K9AGM2 (MS-7327) v2 [libata] Prevent interface errors with Seagate FreeAgent GoFlex drivers/acpi/glue: revert accidental license-related 6b66d95895c bits libata-acpi: add missing inlines in libata.h i2c-omap: Add support for I2C_M_STOP message flag i2c: Fall back to emulated SMBus if the operation isn't supported natively i2c: Add SCCB support i2c-tiny-usb: Add support for the Robofuzz OSIF USB/I2C converter ...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c123
-rw-r--r--arch/powerpc/kvm/book3s_hv.c136
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S229
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S80
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c1
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S2
-rw-r--r--arch/powerpc/kvm/booke.c26
-rw-r--r--arch/powerpc/kvm/booke_emulate.c28
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S328
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S231
-rw-r--r--arch/powerpc/kvm/e500_emulate.c3
-rw-r--r--arch/powerpc/kvm/e500mc.c8
-rw-r--r--arch/powerpc/kvm/emulate.c16
-rw-r--r--arch/powerpc/kvm/powerpc.c18
17 files changed, 726 insertions, 524 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80a577517584..d03eb6f7b058 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,56 +37,121 @@
37/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ 37/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
38#define MAX_LPID_970 63 38#define MAX_LPID_970 63
39 39
40long kvmppc_alloc_hpt(struct kvm *kvm) 40/* Power architecture requires HPT is at least 256kB */
41#define PPC_MIN_HPT_ORDER 18
42
43long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
41{ 44{
42 unsigned long hpt; 45 unsigned long hpt;
43 long lpid;
44 struct revmap_entry *rev; 46 struct revmap_entry *rev;
45 struct kvmppc_linear_info *li; 47 struct kvmppc_linear_info *li;
48 long order = kvm_hpt_order;
46 49
47 /* Allocate guest's hashed page table */ 50 if (htab_orderp) {
48 li = kvm_alloc_hpt(); 51 order = *htab_orderp;
49 if (li) { 52 if (order < PPC_MIN_HPT_ORDER)
50 /* using preallocated memory */ 53 order = PPC_MIN_HPT_ORDER;
51 hpt = (ulong)li->base_virt; 54 }
52 kvm->arch.hpt_li = li; 55
53 } else { 56 /*
54 /* using dynamic memory */ 57 * If the user wants a different size from default,
58 * try first to allocate it from the kernel page allocator.
59 */
60 hpt = 0;
61 if (order != kvm_hpt_order) {
55 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| 62 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
56 __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT); 63 __GFP_NOWARN, order - PAGE_SHIFT);
64 if (!hpt)
65 --order;
57 } 66 }
58 67
68 /* Next try to allocate from the preallocated pool */
59 if (!hpt) { 69 if (!hpt) {
60 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); 70 li = kvm_alloc_hpt();
61 return -ENOMEM; 71 if (li) {
72 hpt = (ulong)li->base_virt;
73 kvm->arch.hpt_li = li;
74 order = kvm_hpt_order;
75 }
62 } 76 }
77
78 /* Lastly try successively smaller sizes from the page allocator */
79 while (!hpt && order > PPC_MIN_HPT_ORDER) {
80 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
81 __GFP_NOWARN, order - PAGE_SHIFT);
82 if (!hpt)
83 --order;
84 }
85
86 if (!hpt)
87 return -ENOMEM;
88
63 kvm->arch.hpt_virt = hpt; 89 kvm->arch.hpt_virt = hpt;
90 kvm->arch.hpt_order = order;
91 /* HPTEs are 2**4 bytes long */
92 kvm->arch.hpt_npte = 1ul << (order - 4);
93 /* 128 (2**7) bytes in each HPTEG */
94 kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
64 95
65 /* Allocate reverse map array */ 96 /* Allocate reverse map array */
66 rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE); 97 rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
67 if (!rev) { 98 if (!rev) {
68 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); 99 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
69 goto out_freehpt; 100 goto out_freehpt;
70 } 101 }
71 kvm->arch.revmap = rev; 102 kvm->arch.revmap = rev;
103 kvm->arch.sdr1 = __pa(hpt) | (order - 18);
72 104
73 lpid = kvmppc_alloc_lpid(); 105 pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
74 if (lpid < 0) 106 hpt, order, kvm->arch.lpid);
75 goto out_freeboth;
76 107
77 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); 108 if (htab_orderp)
78 kvm->arch.lpid = lpid; 109 *htab_orderp = order;
79
80 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
81 return 0; 110 return 0;
82 111
83 out_freeboth:
84 vfree(rev);
85 out_freehpt: 112 out_freehpt:
86 free_pages(hpt, HPT_ORDER - PAGE_SHIFT); 113 if (kvm->arch.hpt_li)
114 kvm_release_hpt(kvm->arch.hpt_li);
115 else
116 free_pages(hpt, order - PAGE_SHIFT);
87 return -ENOMEM; 117 return -ENOMEM;
88} 118}
89 119
120long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
121{
122 long err = -EBUSY;
123 long order;
124
125 mutex_lock(&kvm->lock);
126 if (kvm->arch.rma_setup_done) {
127 kvm->arch.rma_setup_done = 0;
128 /* order rma_setup_done vs. vcpus_running */
129 smp_mb();
130 if (atomic_read(&kvm->arch.vcpus_running)) {
131 kvm->arch.rma_setup_done = 1;
132 goto out;
133 }
134 }
135 if (kvm->arch.hpt_virt) {
136 order = kvm->arch.hpt_order;
137 /* Set the entire HPT to 0, i.e. invalid HPTEs */
138 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
139 /*
140 * Set the whole last_vcpu array to an invalid vcpu number.
141 * This ensures that each vcpu will flush its TLB on next entry.
142 */
143 memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu));
144 *htab_orderp = order;
145 err = 0;
146 } else {
147 err = kvmppc_alloc_hpt(kvm, htab_orderp);
148 order = *htab_orderp;
149 }
150 out:
151 mutex_unlock(&kvm->lock);
152 return err;
153}
154
90void kvmppc_free_hpt(struct kvm *kvm) 155void kvmppc_free_hpt(struct kvm *kvm)
91{ 156{
92 kvmppc_free_lpid(kvm->arch.lpid); 157 kvmppc_free_lpid(kvm->arch.lpid);
@@ -94,7 +159,8 @@ void kvmppc_free_hpt(struct kvm *kvm)
94 if (kvm->arch.hpt_li) 159 if (kvm->arch.hpt_li)
95 kvm_release_hpt(kvm->arch.hpt_li); 160 kvm_release_hpt(kvm->arch.hpt_li);
96 else 161 else
97 free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); 162 free_pages(kvm->arch.hpt_virt,
163 kvm->arch.hpt_order - PAGE_SHIFT);
98} 164}
99 165
100/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ 166/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
@@ -119,6 +185,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
119 unsigned long psize; 185 unsigned long psize;
120 unsigned long hp0, hp1; 186 unsigned long hp0, hp1;
121 long ret; 187 long ret;
188 struct kvm *kvm = vcpu->kvm;
122 189
123 psize = 1ul << porder; 190 psize = 1ul << porder;
124 npages = memslot->npages >> (porder - PAGE_SHIFT); 191 npages = memslot->npages >> (porder - PAGE_SHIFT);
@@ -127,8 +194,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
127 if (npages > 1ul << (40 - porder)) 194 if (npages > 1ul << (40 - porder))
128 npages = 1ul << (40 - porder); 195 npages = 1ul << (40 - porder);
129 /* Can't use more than 1 HPTE per HPTEG */ 196 /* Can't use more than 1 HPTE per HPTEG */
130 if (npages > HPT_NPTEG) 197 if (npages > kvm->arch.hpt_mask + 1)
131 npages = HPT_NPTEG; 198 npages = kvm->arch.hpt_mask + 1;
132 199
133 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | 200 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
134 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); 201 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
@@ -138,7 +205,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
138 for (i = 0; i < npages; ++i) { 205 for (i = 0; i < npages; ++i) {
139 addr = i << porder; 206 addr = i << porder;
140 /* can't use hpt_hash since va > 64 bits */ 207 /* can't use hpt_hash since va > 64 bits */
141 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK; 208 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
142 /* 209 /*
143 * We assume that the hash table is empty and no 210 * We assume that the hash table is empty and no
144 * vcpus are using it at this stage. Since we create 211 * vcpus are using it at this stage. Since we create
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c6af1d623839..83e929e66f9d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -56,7 +56,7 @@
56/* #define EXIT_DEBUG_INT */ 56/* #define EXIT_DEBUG_INT */
57 57
58static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 58static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
59static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); 59static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
60 60
61void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 61void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
62{ 62{
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
268 return err; 268 return err;
269} 269}
270 270
271static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) 271static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
272{ 272{
273 struct kvm *kvm = vcpu->kvm;
273 void *va; 274 void *va;
274 unsigned long nb; 275 unsigned long nb;
276 unsigned long gpa;
275 277
276 vpap->update_pending = 0; 278 /*
277 va = NULL; 279 * We need to pin the page pointed to by vpap->next_gpa,
278 if (vpap->next_gpa) { 280 * but we can't call kvmppc_pin_guest_page under the lock
279 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); 281 * as it does get_user_pages() and down_read(). So we
280 if (nb < vpap->len) { 282 * have to drop the lock, pin the page, then get the lock
281 /* 283 * again and check that a new area didn't get registered
282 * If it's now too short, it must be that userspace 284 * in the meantime.
283 * has changed the mappings underlying guest memory, 285 */
284 * so unregister the region. 286 for (;;) {
285 */ 287 gpa = vpap->next_gpa;
288 spin_unlock(&vcpu->arch.vpa_update_lock);
289 va = NULL;
290 nb = 0;
291 if (gpa)
292 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
293 spin_lock(&vcpu->arch.vpa_update_lock);
294 if (gpa == vpap->next_gpa)
295 break;
296 /* sigh... unpin that one and try again */
297 if (va)
286 kvmppc_unpin_guest_page(kvm, va); 298 kvmppc_unpin_guest_page(kvm, va);
287 va = NULL; 299 }
288 } 300
301 vpap->update_pending = 0;
302 if (va && nb < vpap->len) {
303 /*
304 * If it's now too short, it must be that userspace
305 * has changed the mappings underlying guest memory,
306 * so unregister the region.
307 */
308 kvmppc_unpin_guest_page(kvm, va);
309 va = NULL;
289 } 310 }
290 if (vpap->pinned_addr) 311 if (vpap->pinned_addr)
291 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); 312 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
296 317
297static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) 318static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
298{ 319{
299 struct kvm *kvm = vcpu->kvm;
300
301 spin_lock(&vcpu->arch.vpa_update_lock); 320 spin_lock(&vcpu->arch.vpa_update_lock);
302 if (vcpu->arch.vpa.update_pending) { 321 if (vcpu->arch.vpa.update_pending) {
303 kvmppc_update_vpa(kvm, &vcpu->arch.vpa); 322 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
304 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 323 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
305 } 324 }
306 if (vcpu->arch.dtl.update_pending) { 325 if (vcpu->arch.dtl.update_pending) {
307 kvmppc_update_vpa(kvm, &vcpu->arch.dtl); 326 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
308 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; 327 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
309 vcpu->arch.dtl_index = 0; 328 vcpu->arch.dtl_index = 0;
310 } 329 }
311 if (vcpu->arch.slb_shadow.update_pending) 330 if (vcpu->arch.slb_shadow.update_pending)
312 kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); 331 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
313 spin_unlock(&vcpu->arch.vpa_update_lock); 332 spin_unlock(&vcpu->arch.vpa_update_lock);
314} 333}
315 334
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
800 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 819 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
801 long ret; 820 long ret;
802 u64 now; 821 u64 now;
803 int ptid, i; 822 int ptid, i, need_vpa_update;
804 823
805 /* don't start if any threads have a signal pending */ 824 /* don't start if any threads have a signal pending */
806 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 825 need_vpa_update = 0;
826 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
807 if (signal_pending(vcpu->arch.run_task)) 827 if (signal_pending(vcpu->arch.run_task))
808 return 0; 828 return 0;
829 need_vpa_update |= vcpu->arch.vpa.update_pending |
830 vcpu->arch.slb_shadow.update_pending |
831 vcpu->arch.dtl.update_pending;
832 }
833
834 /*
835 * Initialize *vc, in particular vc->vcore_state, so we can
836 * drop the vcore lock if necessary.
837 */
838 vc->n_woken = 0;
839 vc->nap_count = 0;
840 vc->entry_exit_count = 0;
841 vc->vcore_state = VCORE_RUNNING;
842 vc->in_guest = 0;
843 vc->napping_threads = 0;
844
845 /*
846 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
847 * which can't be called with any spinlocks held.
848 */
849 if (need_vpa_update) {
850 spin_unlock(&vc->lock);
851 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
852 kvmppc_update_vpas(vcpu);
853 spin_lock(&vc->lock);
854 }
809 855
810 /* 856 /*
811 * Make sure we are running on thread 0, and that 857 * Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
838 if (vcpu->arch.ceded) 884 if (vcpu->arch.ceded)
839 vcpu->arch.ptid = ptid++; 885 vcpu->arch.ptid = ptid++;
840 886
841 vc->n_woken = 0;
842 vc->nap_count = 0;
843 vc->entry_exit_count = 0;
844 vc->vcore_state = VCORE_RUNNING;
845 vc->stolen_tb += mftb() - vc->preempt_tb; 887 vc->stolen_tb += mftb() - vc->preempt_tb;
846 vc->in_guest = 0;
847 vc->pcpu = smp_processor_id(); 888 vc->pcpu = smp_processor_id();
848 vc->napping_threads = 0;
849 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 889 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
850 kvmppc_start_thread(vcpu); 890 kvmppc_start_thread(vcpu);
851 if (vcpu->arch.vpa.update_pending ||
852 vcpu->arch.slb_shadow.update_pending ||
853 vcpu->arch.dtl.update_pending)
854 kvmppc_update_vpas(vcpu);
855 kvmppc_create_dtl_entry(vcpu, vc); 891 kvmppc_create_dtl_entry(vcpu, vc);
856 } 892 }
857 /* Grab any remaining hw threads so they can't go into the kernel */ 893 /* Grab any remaining hw threads so they can't go into the kernel */
@@ -1068,11 +1104,15 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1068 return -EINTR; 1104 return -EINTR;
1069 } 1105 }
1070 1106
1071 /* On the first time here, set up VRMA or RMA */ 1107 atomic_inc(&vcpu->kvm->arch.vcpus_running);
1108 /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
1109 smp_mb();
1110
1111 /* On the first time here, set up HTAB and VRMA or RMA */
1072 if (!vcpu->kvm->arch.rma_setup_done) { 1112 if (!vcpu->kvm->arch.rma_setup_done) {
1073 r = kvmppc_hv_setup_rma(vcpu); 1113 r = kvmppc_hv_setup_htab_rma(vcpu);
1074 if (r) 1114 if (r)
1075 return r; 1115 goto out;
1076 } 1116 }
1077 1117
1078 flush_fp_to_thread(current); 1118 flush_fp_to_thread(current);
@@ -1090,6 +1130,9 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1090 kvmppc_core_prepare_to_enter(vcpu); 1130 kvmppc_core_prepare_to_enter(vcpu);
1091 } 1131 }
1092 } while (r == RESUME_GUEST); 1132 } while (r == RESUME_GUEST);
1133
1134 out:
1135 atomic_dec(&vcpu->kvm->arch.vcpus_running);
1093 return r; 1136 return r;
1094} 1137}
1095 1138
@@ -1305,7 +1348,7 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
1305{ 1348{
1306} 1349}
1307 1350
1308static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) 1351static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1309{ 1352{
1310 int err = 0; 1353 int err = 0;
1311 struct kvm *kvm = vcpu->kvm; 1354 struct kvm *kvm = vcpu->kvm;
@@ -1324,6 +1367,15 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
1324 if (kvm->arch.rma_setup_done) 1367 if (kvm->arch.rma_setup_done)
1325 goto out; /* another vcpu beat us to it */ 1368 goto out; /* another vcpu beat us to it */
1326 1369
1370 /* Allocate hashed page table (if not done already) and reset it */
1371 if (!kvm->arch.hpt_virt) {
1372 err = kvmppc_alloc_hpt(kvm, NULL);
1373 if (err) {
1374 pr_err("KVM: Couldn't alloc HPT\n");
1375 goto out;
1376 }
1377 }
1378
1327 /* Look up the memslot for guest physical address 0 */ 1379 /* Look up the memslot for guest physical address 0 */
1328 memslot = gfn_to_memslot(kvm, 0); 1380 memslot = gfn_to_memslot(kvm, 0);
1329 1381
@@ -1435,13 +1487,14 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
1435 1487
1436int kvmppc_core_init_vm(struct kvm *kvm) 1488int kvmppc_core_init_vm(struct kvm *kvm)
1437{ 1489{
1438 long r; 1490 unsigned long lpcr, lpid;
1439 unsigned long lpcr;
1440 1491
1441 /* Allocate hashed page table */ 1492 /* Allocate the guest's logical partition ID */
1442 r = kvmppc_alloc_hpt(kvm); 1493
1443 if (r) 1494 lpid = kvmppc_alloc_lpid();
1444 return r; 1495 if (lpid < 0)
1496 return -ENOMEM;
1497 kvm->arch.lpid = lpid;
1445 1498
1446 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); 1499 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1447 1500
@@ -1451,7 +1504,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1451 1504
1452 if (cpu_has_feature(CPU_FTR_ARCH_201)) { 1505 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1453 /* PPC970; HID4 is effectively the LPCR */ 1506 /* PPC970; HID4 is effectively the LPCR */
1454 unsigned long lpid = kvm->arch.lpid;
1455 kvm->arch.host_lpid = 0; 1507 kvm->arch.host_lpid = 0;
1456 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); 1508 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1457 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); 1509 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index e1b60f56f2a1..fb4eac290fef 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -25,6 +25,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type);
25static struct kvmppc_linear_info *kvm_alloc_linear(int type); 25static struct kvmppc_linear_info *kvm_alloc_linear(int type);
26static void kvm_release_linear(struct kvmppc_linear_info *ri); 26static void kvm_release_linear(struct kvmppc_linear_info *ri);
27 27
28int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER;
29EXPORT_SYMBOL_GPL(kvm_hpt_order);
30
28/*************** RMA *************/ 31/*************** RMA *************/
29 32
30/* 33/*
@@ -209,7 +212,7 @@ static void kvm_release_linear(struct kvmppc_linear_info *ri)
209void __init kvm_linear_init(void) 212void __init kvm_linear_init(void)
210{ 213{
211 /* HPT */ 214 /* HPT */
212 kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT); 215 kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT);
213 216
214 /* RMA */ 217 /* RMA */
215 /* Only do this on PPC970 in HV mode */ 218 /* Only do this on PPC970 in HV mode */
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index cec4daddbf31..5c70d19494f9 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -237,7 +237,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
237 237
238 /* Find and lock the HPTEG slot to use */ 238 /* Find and lock the HPTEG slot to use */
239 do_insert: 239 do_insert:
240 if (pte_index >= HPT_NPTE) 240 if (pte_index >= kvm->arch.hpt_npte)
241 return H_PARAMETER; 241 return H_PARAMETER;
242 if (likely((flags & H_EXACT) == 0)) { 242 if (likely((flags & H_EXACT) == 0)) {
243 pte_index &= ~7UL; 243 pte_index &= ~7UL;
@@ -352,7 +352,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
352 unsigned long v, r, rb; 352 unsigned long v, r, rb;
353 struct revmap_entry *rev; 353 struct revmap_entry *rev;
354 354
355 if (pte_index >= HPT_NPTE) 355 if (pte_index >= kvm->arch.hpt_npte)
356 return H_PARAMETER; 356 return H_PARAMETER;
357 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 357 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
358 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 358 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
@@ -419,7 +419,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
419 i = 4; 419 i = 4;
420 break; 420 break;
421 } 421 }
422 if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) { 422 if (req != 1 || flags == 3 ||
423 pte_index >= kvm->arch.hpt_npte) {
423 /* parameter error */ 424 /* parameter error */
424 args[j] = ((0xa0 | flags) << 56) + pte_index; 425 args[j] = ((0xa0 | flags) << 56) + pte_index;
425 ret = H_PARAMETER; 426 ret = H_PARAMETER;
@@ -521,7 +522,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
521 struct revmap_entry *rev; 522 struct revmap_entry *rev;
522 unsigned long v, r, rb, mask, bits; 523 unsigned long v, r, rb, mask, bits;
523 524
524 if (pte_index >= HPT_NPTE) 525 if (pte_index >= kvm->arch.hpt_npte)
525 return H_PARAMETER; 526 return H_PARAMETER;
526 527
527 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 528 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
@@ -583,7 +584,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
583 int i, n = 1; 584 int i, n = 1;
584 struct revmap_entry *rev = NULL; 585 struct revmap_entry *rev = NULL;
585 586
586 if (pte_index >= HPT_NPTE) 587 if (pte_index >= kvm->arch.hpt_npte)
587 return H_PARAMETER; 588 return H_PARAMETER;
588 if (flags & H_READ_4) { 589 if (flags & H_READ_4) {
589 pte_index &= ~3; 590 pte_index &= ~3;
@@ -678,7 +679,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
678 somask = (1UL << 28) - 1; 679 somask = (1UL << 28) - 1;
679 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; 680 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
680 } 681 }
681 hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK; 682 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
682 avpn = slb_v & ~(somask >> 16); /* also includes B */ 683 avpn = slb_v & ~(somask >> 16); /* also includes B */
683 avpn |= (eaddr & somask) >> 16; 684 avpn |= (eaddr & somask) >> 16;
684 685
@@ -723,7 +724,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
723 if (val & HPTE_V_SECONDARY) 724 if (val & HPTE_V_SECONDARY)
724 break; 725 break;
725 val |= HPTE_V_SECONDARY; 726 val |= HPTE_V_SECONDARY;
726 hash = hash ^ HPT_HASH_MASK; 727 hash = hash ^ kvm->arch.hpt_mask;
727 } 728 }
728 return -1; 729 return -1;
729} 730}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a84aafce2a12..5a84c8d3d040 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -72,9 +72,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
72 mtsrr1 r6 72 mtsrr1 r6
73 RFI 73 RFI
74 74
75#define ULONG_SIZE 8
76#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
77
78/****************************************************************************** 75/******************************************************************************
79 * * 76 * *
80 * Entry code * 77 * Entry code *
@@ -206,24 +203,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
206 /* Load up FP, VMX and VSX registers */ 203 /* Load up FP, VMX and VSX registers */
207 bl kvmppc_load_fp 204 bl kvmppc_load_fp
208 205
209 ld r14, VCPU_GPR(r14)(r4) 206 ld r14, VCPU_GPR(R14)(r4)
210 ld r15, VCPU_GPR(r15)(r4) 207 ld r15, VCPU_GPR(R15)(r4)
211 ld r16, VCPU_GPR(r16)(r4) 208 ld r16, VCPU_GPR(R16)(r4)
212 ld r17, VCPU_GPR(r17)(r4) 209 ld r17, VCPU_GPR(R17)(r4)
213 ld r18, VCPU_GPR(r18)(r4) 210 ld r18, VCPU_GPR(R18)(r4)
214 ld r19, VCPU_GPR(r19)(r4) 211 ld r19, VCPU_GPR(R19)(r4)
215 ld r20, VCPU_GPR(r20)(r4) 212 ld r20, VCPU_GPR(R20)(r4)
216 ld r21, VCPU_GPR(r21)(r4) 213 ld r21, VCPU_GPR(R21)(r4)
217 ld r22, VCPU_GPR(r22)(r4) 214 ld r22, VCPU_GPR(R22)(r4)
218 ld r23, VCPU_GPR(r23)(r4) 215 ld r23, VCPU_GPR(R23)(r4)
219 ld r24, VCPU_GPR(r24)(r4) 216 ld r24, VCPU_GPR(R24)(r4)
220 ld r25, VCPU_GPR(r25)(r4) 217 ld r25, VCPU_GPR(R25)(r4)
221 ld r26, VCPU_GPR(r26)(r4) 218 ld r26, VCPU_GPR(R26)(r4)
222 ld r27, VCPU_GPR(r27)(r4) 219 ld r27, VCPU_GPR(R27)(r4)
223 ld r28, VCPU_GPR(r28)(r4) 220 ld r28, VCPU_GPR(R28)(r4)
224 ld r29, VCPU_GPR(r29)(r4) 221 ld r29, VCPU_GPR(R29)(r4)
225 ld r30, VCPU_GPR(r30)(r4) 222 ld r30, VCPU_GPR(R30)(r4)
226 ld r31, VCPU_GPR(r31)(r4) 223 ld r31, VCPU_GPR(R31)(r4)
227 224
228BEGIN_FTR_SECTION 225BEGIN_FTR_SECTION
229 /* Switch DSCR to guest value */ 226 /* Switch DSCR to guest value */
@@ -547,21 +544,21 @@ fast_guest_return:
547 mtlr r5 544 mtlr r5
548 mtcr r6 545 mtcr r6
549 546
550 ld r0, VCPU_GPR(r0)(r4) 547 ld r0, VCPU_GPR(R0)(r4)
551 ld r1, VCPU_GPR(r1)(r4) 548 ld r1, VCPU_GPR(R1)(r4)
552 ld r2, VCPU_GPR(r2)(r4) 549 ld r2, VCPU_GPR(R2)(r4)
553 ld r3, VCPU_GPR(r3)(r4) 550 ld r3, VCPU_GPR(R3)(r4)
554 ld r5, VCPU_GPR(r5)(r4) 551 ld r5, VCPU_GPR(R5)(r4)
555 ld r6, VCPU_GPR(r6)(r4) 552 ld r6, VCPU_GPR(R6)(r4)
556 ld r7, VCPU_GPR(r7)(r4) 553 ld r7, VCPU_GPR(R7)(r4)
557 ld r8, VCPU_GPR(r8)(r4) 554 ld r8, VCPU_GPR(R8)(r4)
558 ld r9, VCPU_GPR(r9)(r4) 555 ld r9, VCPU_GPR(R9)(r4)
559 ld r10, VCPU_GPR(r10)(r4) 556 ld r10, VCPU_GPR(R10)(r4)
560 ld r11, VCPU_GPR(r11)(r4) 557 ld r11, VCPU_GPR(R11)(r4)
561 ld r12, VCPU_GPR(r12)(r4) 558 ld r12, VCPU_GPR(R12)(r4)
562 ld r13, VCPU_GPR(r13)(r4) 559 ld r13, VCPU_GPR(R13)(r4)
563 560
564 ld r4, VCPU_GPR(r4)(r4) 561 ld r4, VCPU_GPR(R4)(r4)
565 562
566 hrfid 563 hrfid
567 b . 564 b .
@@ -590,22 +587,22 @@ kvmppc_interrupt:
590 587
591 /* Save registers */ 588 /* Save registers */
592 589
593 std r0, VCPU_GPR(r0)(r9) 590 std r0, VCPU_GPR(R0)(r9)
594 std r1, VCPU_GPR(r1)(r9) 591 std r1, VCPU_GPR(R1)(r9)
595 std r2, VCPU_GPR(r2)(r9) 592 std r2, VCPU_GPR(R2)(r9)
596 std r3, VCPU_GPR(r3)(r9) 593 std r3, VCPU_GPR(R3)(r9)
597 std r4, VCPU_GPR(r4)(r9) 594 std r4, VCPU_GPR(R4)(r9)
598 std r5, VCPU_GPR(r5)(r9) 595 std r5, VCPU_GPR(R5)(r9)
599 std r6, VCPU_GPR(r6)(r9) 596 std r6, VCPU_GPR(R6)(r9)
600 std r7, VCPU_GPR(r7)(r9) 597 std r7, VCPU_GPR(R7)(r9)
601 std r8, VCPU_GPR(r8)(r9) 598 std r8, VCPU_GPR(R8)(r9)
602 ld r0, HSTATE_HOST_R2(r13) 599 ld r0, HSTATE_HOST_R2(r13)
603 std r0, VCPU_GPR(r9)(r9) 600 std r0, VCPU_GPR(R9)(r9)
604 std r10, VCPU_GPR(r10)(r9) 601 std r10, VCPU_GPR(R10)(r9)
605 std r11, VCPU_GPR(r11)(r9) 602 std r11, VCPU_GPR(R11)(r9)
606 ld r3, HSTATE_SCRATCH0(r13) 603 ld r3, HSTATE_SCRATCH0(r13)
607 lwz r4, HSTATE_SCRATCH1(r13) 604 lwz r4, HSTATE_SCRATCH1(r13)
608 std r3, VCPU_GPR(r12)(r9) 605 std r3, VCPU_GPR(R12)(r9)
609 stw r4, VCPU_CR(r9) 606 stw r4, VCPU_CR(r9)
610 607
611 /* Restore R1/R2 so we can handle faults */ 608 /* Restore R1/R2 so we can handle faults */
@@ -626,7 +623,7 @@ kvmppc_interrupt:
626 623
627 GET_SCRATCH0(r3) 624 GET_SCRATCH0(r3)
628 mflr r4 625 mflr r4
629 std r3, VCPU_GPR(r13)(r9) 626 std r3, VCPU_GPR(R13)(r9)
630 std r4, VCPU_LR(r9) 627 std r4, VCPU_LR(r9)
631 628
632 /* Unset guest mode */ 629 /* Unset guest mode */
@@ -810,7 +807,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
810 lwz r3,VCORE_NAPPING_THREADS(r5) 807 lwz r3,VCORE_NAPPING_THREADS(r5)
811 lwz r4,VCPU_PTID(r9) 808 lwz r4,VCPU_PTID(r9)
812 li r0,1 809 li r0,1
813 sldi r0,r0,r4 810 sld r0,r0,r4
814 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 811 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
815 beq 43f 812 beq 43f
816 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 813 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
@@ -968,24 +965,24 @@ BEGIN_FTR_SECTION
968END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 965END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
969 966
970 /* Save non-volatile GPRs */ 967 /* Save non-volatile GPRs */
971 std r14, VCPU_GPR(r14)(r9) 968 std r14, VCPU_GPR(R14)(r9)
972 std r15, VCPU_GPR(r15)(r9) 969 std r15, VCPU_GPR(R15)(r9)
973 std r16, VCPU_GPR(r16)(r9) 970 std r16, VCPU_GPR(R16)(r9)
974 std r17, VCPU_GPR(r17)(r9) 971 std r17, VCPU_GPR(R17)(r9)
975 std r18, VCPU_GPR(r18)(r9) 972 std r18, VCPU_GPR(R18)(r9)
976 std r19, VCPU_GPR(r19)(r9) 973 std r19, VCPU_GPR(R19)(r9)
977 std r20, VCPU_GPR(r20)(r9) 974 std r20, VCPU_GPR(R20)(r9)
978 std r21, VCPU_GPR(r21)(r9) 975 std r21, VCPU_GPR(R21)(r9)
979 std r22, VCPU_GPR(r22)(r9) 976 std r22, VCPU_GPR(R22)(r9)
980 std r23, VCPU_GPR(r23)(r9) 977 std r23, VCPU_GPR(R23)(r9)
981 std r24, VCPU_GPR(r24)(r9) 978 std r24, VCPU_GPR(R24)(r9)
982 std r25, VCPU_GPR(r25)(r9) 979 std r25, VCPU_GPR(R25)(r9)
983 std r26, VCPU_GPR(r26)(r9) 980 std r26, VCPU_GPR(R26)(r9)
984 std r27, VCPU_GPR(r27)(r9) 981 std r27, VCPU_GPR(R27)(r9)
985 std r28, VCPU_GPR(r28)(r9) 982 std r28, VCPU_GPR(R28)(r9)
986 std r29, VCPU_GPR(r29)(r9) 983 std r29, VCPU_GPR(R29)(r9)
987 std r30, VCPU_GPR(r30)(r9) 984 std r30, VCPU_GPR(R30)(r9)
988 std r31, VCPU_GPR(r31)(r9) 985 std r31, VCPU_GPR(R31)(r9)
989 986
990 /* Save SPRGs */ 987 /* Save SPRGs */
991 mfspr r3, SPRN_SPRG0 988 mfspr r3, SPRN_SPRG0
@@ -1067,6 +1064,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1067 mtspr SPRN_DABR,r5 1064 mtspr SPRN_DABR,r5
1068 mtspr SPRN_DABRX,r6 1065 mtspr SPRN_DABRX,r6
1069 1066
1067 /* Restore SPRG3 */
1068 ld r3,HSTATE_SPRG3(r13)
1069 mtspr SPRN_SPRG3,r3
1070
1070 /* 1071 /*
1071 * Reload DEC. HDEC interrupts were disabled when 1072 * Reload DEC. HDEC interrupts were disabled when
1072 * we reloaded the host's LPCR value. 1073 * we reloaded the host's LPCR value.
@@ -1160,7 +1161,7 @@ kvmppc_hdsi:
1160 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1161 andi. r0, r11, MSR_DR /* data relocation enabled? */
1161 beq 3f 1162 beq 3f
1162 clrrdi r0, r4, 28 1163 clrrdi r0, r4, 28
1163 PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ 1164 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1164 bne 1f /* if no SLB entry found */ 1165 bne 1f /* if no SLB entry found */
11654: std r4, VCPU_FAULT_DAR(r9) 11664: std r4, VCPU_FAULT_DAR(r9)
1166 stw r6, VCPU_FAULT_DSISR(r9) 1167 stw r6, VCPU_FAULT_DSISR(r9)
@@ -1234,7 +1235,7 @@ kvmppc_hisi:
1234 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1235 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1235 beq 3f 1236 beq 3f
1236 clrrdi r0, r10, 28 1237 clrrdi r0, r10, 28
1237 PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ 1238 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1238 bne 1f /* if no SLB entry found */ 1239 bne 1f /* if no SLB entry found */
12394: 12404:
1240 /* Search the hash table. */ 1241 /* Search the hash table. */
@@ -1278,7 +1279,7 @@ kvmppc_hisi:
1278 */ 1279 */
1279 .globl hcall_try_real_mode 1280 .globl hcall_try_real_mode
1280hcall_try_real_mode: 1281hcall_try_real_mode:
1281 ld r3,VCPU_GPR(r3)(r9) 1282 ld r3,VCPU_GPR(R3)(r9)
1282 andi. r0,r11,MSR_PR 1283 andi. r0,r11,MSR_PR
1283 bne hcall_real_cont 1284 bne hcall_real_cont
1284 clrrdi r3,r3,2 1285 clrrdi r3,r3,2
@@ -1291,12 +1292,12 @@ hcall_try_real_mode:
1291 add r3,r3,r4 1292 add r3,r3,r4
1292 mtctr r3 1293 mtctr r3
1293 mr r3,r9 /* get vcpu pointer */ 1294 mr r3,r9 /* get vcpu pointer */
1294 ld r4,VCPU_GPR(r4)(r9) 1295 ld r4,VCPU_GPR(R4)(r9)
1295 bctrl 1296 bctrl
1296 cmpdi r3,H_TOO_HARD 1297 cmpdi r3,H_TOO_HARD
1297 beq hcall_real_fallback 1298 beq hcall_real_fallback
1298 ld r4,HSTATE_KVM_VCPU(r13) 1299 ld r4,HSTATE_KVM_VCPU(r13)
1299 std r3,VCPU_GPR(r3)(r4) 1300 std r3,VCPU_GPR(R3)(r4)
1300 ld r10,VCPU_PC(r4) 1301 ld r10,VCPU_PC(r4)
1301 ld r11,VCPU_MSR(r4) 1302 ld r11,VCPU_MSR(r4)
1302 b fast_guest_return 1303 b fast_guest_return
@@ -1424,7 +1425,7 @@ _GLOBAL(kvmppc_h_cede)
1424 li r0,0 /* set trap to 0 to say hcall is handled */ 1425 li r0,0 /* set trap to 0 to say hcall is handled */
1425 stw r0,VCPU_TRAP(r3) 1426 stw r0,VCPU_TRAP(r3)
1426 li r0,H_SUCCESS 1427 li r0,H_SUCCESS
1427 std r0,VCPU_GPR(r3)(r3) 1428 std r0,VCPU_GPR(R3)(r3)
1428BEGIN_FTR_SECTION 1429BEGIN_FTR_SECTION
1429 b 2f /* just send it up to host on 970 */ 1430 b 2f /* just send it up to host on 970 */
1430END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1431END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
@@ -1443,7 +1444,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1443 addi r6,r5,VCORE_NAPPING_THREADS 1444 addi r6,r5,VCORE_NAPPING_THREADS
144431: lwarx r4,0,r6 144531: lwarx r4,0,r6
1445 or r4,r4,r0 1446 or r4,r4,r0
1446 PPC_POPCNTW(r7,r4) 1447 PPC_POPCNTW(R7,R4)
1447 cmpw r7,r8 1448 cmpw r7,r8
1448 bge 2f 1449 bge 2f
1449 stwcx. r4,0,r6 1450 stwcx. r4,0,r6
@@ -1464,24 +1465,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1464 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1465 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1465 */ 1466 */
1466 /* Save non-volatile GPRs */ 1467 /* Save non-volatile GPRs */
1467 std r14, VCPU_GPR(r14)(r3) 1468 std r14, VCPU_GPR(R14)(r3)
1468 std r15, VCPU_GPR(r15)(r3) 1469 std r15, VCPU_GPR(R15)(r3)
1469 std r16, VCPU_GPR(r16)(r3) 1470 std r16, VCPU_GPR(R16)(r3)
1470 std r17, VCPU_GPR(r17)(r3) 1471 std r17, VCPU_GPR(R17)(r3)
1471 std r18, VCPU_GPR(r18)(r3) 1472 std r18, VCPU_GPR(R18)(r3)
1472 std r19, VCPU_GPR(r19)(r3) 1473 std r19, VCPU_GPR(R19)(r3)
1473 std r20, VCPU_GPR(r20)(r3) 1474 std r20, VCPU_GPR(R20)(r3)
1474 std r21, VCPU_GPR(r21)(r3) 1475 std r21, VCPU_GPR(R21)(r3)
1475 std r22, VCPU_GPR(r22)(r3) 1476 std r22, VCPU_GPR(R22)(r3)
1476 std r23, VCPU_GPR(r23)(r3) 1477 std r23, VCPU_GPR(R23)(r3)
1477 std r24, VCPU_GPR(r24)(r3) 1478 std r24, VCPU_GPR(R24)(r3)
1478 std r25, VCPU_GPR(r25)(r3) 1479 std r25, VCPU_GPR(R25)(r3)
1479 std r26, VCPU_GPR(r26)(r3) 1480 std r26, VCPU_GPR(R26)(r3)
1480 std r27, VCPU_GPR(r27)(r3) 1481 std r27, VCPU_GPR(R27)(r3)
1481 std r28, VCPU_GPR(r28)(r3) 1482 std r28, VCPU_GPR(R28)(r3)
1482 std r29, VCPU_GPR(r29)(r3) 1483 std r29, VCPU_GPR(R29)(r3)
1483 std r30, VCPU_GPR(r30)(r3) 1484 std r30, VCPU_GPR(R30)(r3)
1484 std r31, VCPU_GPR(r31)(r3) 1485 std r31, VCPU_GPR(R31)(r3)
1485 1486
1486 /* save FP state */ 1487 /* save FP state */
1487 bl .kvmppc_save_fp 1488 bl .kvmppc_save_fp
@@ -1513,24 +1514,24 @@ kvm_end_cede:
1513 bl kvmppc_load_fp 1514 bl kvmppc_load_fp
1514 1515
1515 /* Load NV GPRS */ 1516 /* Load NV GPRS */
1516 ld r14, VCPU_GPR(r14)(r4) 1517 ld r14, VCPU_GPR(R14)(r4)
1517 ld r15, VCPU_GPR(r15)(r4) 1518 ld r15, VCPU_GPR(R15)(r4)
1518 ld r16, VCPU_GPR(r16)(r4) 1519 ld r16, VCPU_GPR(R16)(r4)
1519 ld r17, VCPU_GPR(r17)(r4) 1520 ld r17, VCPU_GPR(R17)(r4)
1520 ld r18, VCPU_GPR(r18)(r4) 1521 ld r18, VCPU_GPR(R18)(r4)
1521 ld r19, VCPU_GPR(r19)(r4) 1522 ld r19, VCPU_GPR(R19)(r4)
1522 ld r20, VCPU_GPR(r20)(r4) 1523 ld r20, VCPU_GPR(R20)(r4)
1523 ld r21, VCPU_GPR(r21)(r4) 1524 ld r21, VCPU_GPR(R21)(r4)
1524 ld r22, VCPU_GPR(r22)(r4) 1525 ld r22, VCPU_GPR(R22)(r4)
1525 ld r23, VCPU_GPR(r23)(r4) 1526 ld r23, VCPU_GPR(R23)(r4)
1526 ld r24, VCPU_GPR(r24)(r4) 1527 ld r24, VCPU_GPR(R24)(r4)
1527 ld r25, VCPU_GPR(r25)(r4) 1528 ld r25, VCPU_GPR(R25)(r4)
1528 ld r26, VCPU_GPR(r26)(r4) 1529 ld r26, VCPU_GPR(R26)(r4)
1529 ld r27, VCPU_GPR(r27)(r4) 1530 ld r27, VCPU_GPR(R27)(r4)
1530 ld r28, VCPU_GPR(r28)(r4) 1531 ld r28, VCPU_GPR(R28)(r4)
1531 ld r29, VCPU_GPR(r29)(r4) 1532 ld r29, VCPU_GPR(R29)(r4)
1532 ld r30, VCPU_GPR(r30)(r4) 1533 ld r30, VCPU_GPR(R30)(r4)
1533 ld r31, VCPU_GPR(r31)(r4) 1534 ld r31, VCPU_GPR(R31)(r4)
1534 1535
1535 /* clear our bit in vcore->napping_threads */ 1536 /* clear our bit in vcore->napping_threads */
153633: ld r5,HSTATE_KVM_VCORE(r13) 153733: ld r5,HSTATE_KVM_VCORE(r13)
@@ -1649,7 +1650,7 @@ BEGIN_FTR_SECTION
1649 reg = 0 1650 reg = 0
1650 .rept 32 1651 .rept 32
1651 li r6,reg*16+VCPU_VSRS 1652 li r6,reg*16+VCPU_VSRS
1652 STXVD2X(reg,r6,r3) 1653 STXVD2X(reg,R6,R3)
1653 reg = reg + 1 1654 reg = reg + 1
1654 .endr 1655 .endr
1655FTR_SECTION_ELSE 1656FTR_SECTION_ELSE
@@ -1711,7 +1712,7 @@ BEGIN_FTR_SECTION
1711 reg = 0 1712 reg = 0
1712 .rept 32 1713 .rept 32
1713 li r7,reg*16+VCPU_VSRS 1714 li r7,reg*16+VCPU_VSRS
1714 LXVD2X(reg,r7,r4) 1715 LXVD2X(reg,R7,R4)
1715 reg = reg + 1 1716 reg = reg + 1
1716 .endr 1717 .endr
1717FTR_SECTION_ELSE 1718FTR_SECTION_ELSE
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 3e35383bdb21..48cbbf862958 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,38 +25,30 @@
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28
29#define ULONG_SIZE 8
30#define FUNC(name) GLUE(.,name) 28#define FUNC(name) GLUE(.,name)
31
32#elif defined(CONFIG_PPC_BOOK3S_32) 29#elif defined(CONFIG_PPC_BOOK3S_32)
33
34#define ULONG_SIZE 4
35#define FUNC(name) name 30#define FUNC(name) name
36
37#endif /* CONFIG_PPC_BOOK3S_XX */ 31#endif /* CONFIG_PPC_BOOK3S_XX */
38 32
39
40#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
41#define VCPU_LOAD_NVGPRS(vcpu) \ 33#define VCPU_LOAD_NVGPRS(vcpu) \
42 PPC_LL r14, VCPU_GPR(r14)(vcpu); \ 34 PPC_LL r14, VCPU_GPR(R14)(vcpu); \
43 PPC_LL r15, VCPU_GPR(r15)(vcpu); \ 35 PPC_LL r15, VCPU_GPR(R15)(vcpu); \
44 PPC_LL r16, VCPU_GPR(r16)(vcpu); \ 36 PPC_LL r16, VCPU_GPR(R16)(vcpu); \
45 PPC_LL r17, VCPU_GPR(r17)(vcpu); \ 37 PPC_LL r17, VCPU_GPR(R17)(vcpu); \
46 PPC_LL r18, VCPU_GPR(r18)(vcpu); \ 38 PPC_LL r18, VCPU_GPR(R18)(vcpu); \
47 PPC_LL r19, VCPU_GPR(r19)(vcpu); \ 39 PPC_LL r19, VCPU_GPR(R19)(vcpu); \
48 PPC_LL r20, VCPU_GPR(r20)(vcpu); \ 40 PPC_LL r20, VCPU_GPR(R20)(vcpu); \
49 PPC_LL r21, VCPU_GPR(r21)(vcpu); \ 41 PPC_LL r21, VCPU_GPR(R21)(vcpu); \
50 PPC_LL r22, VCPU_GPR(r22)(vcpu); \ 42 PPC_LL r22, VCPU_GPR(R22)(vcpu); \
51 PPC_LL r23, VCPU_GPR(r23)(vcpu); \ 43 PPC_LL r23, VCPU_GPR(R23)(vcpu); \
52 PPC_LL r24, VCPU_GPR(r24)(vcpu); \ 44 PPC_LL r24, VCPU_GPR(R24)(vcpu); \
53 PPC_LL r25, VCPU_GPR(r25)(vcpu); \ 45 PPC_LL r25, VCPU_GPR(R25)(vcpu); \
54 PPC_LL r26, VCPU_GPR(r26)(vcpu); \ 46 PPC_LL r26, VCPU_GPR(R26)(vcpu); \
55 PPC_LL r27, VCPU_GPR(r27)(vcpu); \ 47 PPC_LL r27, VCPU_GPR(R27)(vcpu); \
56 PPC_LL r28, VCPU_GPR(r28)(vcpu); \ 48 PPC_LL r28, VCPU_GPR(R28)(vcpu); \
57 PPC_LL r29, VCPU_GPR(r29)(vcpu); \ 49 PPC_LL r29, VCPU_GPR(R29)(vcpu); \
58 PPC_LL r30, VCPU_GPR(r30)(vcpu); \ 50 PPC_LL r30, VCPU_GPR(R30)(vcpu); \
59 PPC_LL r31, VCPU_GPR(r31)(vcpu); \ 51 PPC_LL r31, VCPU_GPR(R31)(vcpu); \
60 52
61/***************************************************************************** 53/*****************************************************************************
62 * * 54 * *
@@ -131,24 +123,24 @@ kvmppc_handler_highmem:
131 /* R7 = vcpu */ 123 /* R7 = vcpu */
132 PPC_LL r7, GPR4(r1) 124 PPC_LL r7, GPR4(r1)
133 125
134 PPC_STL r14, VCPU_GPR(r14)(r7) 126 PPC_STL r14, VCPU_GPR(R14)(r7)
135 PPC_STL r15, VCPU_GPR(r15)(r7) 127 PPC_STL r15, VCPU_GPR(R15)(r7)
136 PPC_STL r16, VCPU_GPR(r16)(r7) 128 PPC_STL r16, VCPU_GPR(R16)(r7)
137 PPC_STL r17, VCPU_GPR(r17)(r7) 129 PPC_STL r17, VCPU_GPR(R17)(r7)
138 PPC_STL r18, VCPU_GPR(r18)(r7) 130 PPC_STL r18, VCPU_GPR(R18)(r7)
139 PPC_STL r19, VCPU_GPR(r19)(r7) 131 PPC_STL r19, VCPU_GPR(R19)(r7)
140 PPC_STL r20, VCPU_GPR(r20)(r7) 132 PPC_STL r20, VCPU_GPR(R20)(r7)
141 PPC_STL r21, VCPU_GPR(r21)(r7) 133 PPC_STL r21, VCPU_GPR(R21)(r7)
142 PPC_STL r22, VCPU_GPR(r22)(r7) 134 PPC_STL r22, VCPU_GPR(R22)(r7)
143 PPC_STL r23, VCPU_GPR(r23)(r7) 135 PPC_STL r23, VCPU_GPR(R23)(r7)
144 PPC_STL r24, VCPU_GPR(r24)(r7) 136 PPC_STL r24, VCPU_GPR(R24)(r7)
145 PPC_STL r25, VCPU_GPR(r25)(r7) 137 PPC_STL r25, VCPU_GPR(R25)(r7)
146 PPC_STL r26, VCPU_GPR(r26)(r7) 138 PPC_STL r26, VCPU_GPR(R26)(r7)
147 PPC_STL r27, VCPU_GPR(r27)(r7) 139 PPC_STL r27, VCPU_GPR(R27)(r7)
148 PPC_STL r28, VCPU_GPR(r28)(r7) 140 PPC_STL r28, VCPU_GPR(R28)(r7)
149 PPC_STL r29, VCPU_GPR(r29)(r7) 141 PPC_STL r29, VCPU_GPR(R29)(r7)
150 PPC_STL r30, VCPU_GPR(r30)(r7) 142 PPC_STL r30, VCPU_GPR(R30)(r7)
151 PPC_STL r31, VCPU_GPR(r31)(r7) 143 PPC_STL r31, VCPU_GPR(R31)(r7)
152 144
153 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 145 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
154 mr r5, r12 146 mr r5, r12
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 3ff9013d6e79..ee02b30878ed 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
241 case H_PUT_TCE: 241 case H_PUT_TCE:
242 return kvmppc_h_pr_put_tce(vcpu); 242 return kvmppc_h_pr_put_tce(vcpu);
243 case H_CEDE: 243 case H_CEDE:
244 vcpu->arch.shared->msr |= MSR_EE;
244 kvm_vcpu_block(vcpu); 245 kvm_vcpu_block(vcpu);
245 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 246 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
246 vcpu->stat.halt_wakeup++; 247 vcpu->stat.halt_wakeup++;
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 34187585c507..ab523f3c1731 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -37,7 +37,6 @@
37#if defined(CONFIG_PPC_BOOK3S_64) 37#if defined(CONFIG_PPC_BOOK3S_64)
38 38
39#define FUNC(name) GLUE(.,name) 39#define FUNC(name) GLUE(.,name)
40#define MTMSR_EERI(reg) mtmsrd (reg),1
41 40
42 .globl kvmppc_skip_interrupt 41 .globl kvmppc_skip_interrupt
43kvmppc_skip_interrupt: 42kvmppc_skip_interrupt:
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 798491a268b3..1abe4788191a 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -23,7 +23,6 @@
23 23
24#define GET_SHADOW_VCPU(reg) \ 24#define GET_SHADOW_VCPU(reg) \
25 mr reg, r13 25 mr reg, r13
26#define MTMSR_EERI(reg) mtmsrd (reg),1
27 26
28#elif defined(CONFIG_PPC_BOOK3S_32) 27#elif defined(CONFIG_PPC_BOOK3S_32)
29 28
@@ -31,7 +30,6 @@
31 tophys(reg, r2); \ 30 tophys(reg, r2); \
32 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ 31 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
33 tophys(reg, reg) 32 tophys(reg, reg)
34#define MTMSR_EERI(reg) mtmsr (reg)
35 33
36#endif 34#endif
37 35
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 72f13f4a06e0..d25a097c852b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -612,6 +612,12 @@ static void kvmppc_fill_pt_regs(struct pt_regs *regs)
612 regs->link = lr; 612 regs->link = lr;
613} 613}
614 614
615/*
616 * For interrupts needed to be handled by host interrupt handlers,
617 * corresponding host handler are called from here in similar way
618 * (but not exact) as they are called from low level handler
619 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
620 */
615static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, 621static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
616 unsigned int exit_nr) 622 unsigned int exit_nr)
617{ 623{
@@ -639,6 +645,17 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
639 kvmppc_fill_pt_regs(&regs); 645 kvmppc_fill_pt_regs(&regs);
640 performance_monitor_exception(&regs); 646 performance_monitor_exception(&regs);
641 break; 647 break;
648 case BOOKE_INTERRUPT_WATCHDOG:
649 kvmppc_fill_pt_regs(&regs);
650#ifdef CONFIG_BOOKE_WDT
651 WatchdogException(&regs);
652#else
653 unknown_exception(&regs);
654#endif
655 break;
656 case BOOKE_INTERRUPT_CRITICAL:
657 unknown_exception(&regs);
658 break;
642 } 659 }
643} 660}
644 661
@@ -683,6 +700,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
683 r = RESUME_GUEST; 700 r = RESUME_GUEST;
684 break; 701 break;
685 702
703 case BOOKE_INTERRUPT_WATCHDOG:
704 r = RESUME_GUEST;
705 break;
706
686 case BOOKE_INTERRUPT_DOORBELL: 707 case BOOKE_INTERRUPT_DOORBELL:
687 kvmppc_account_exit(vcpu, DBELL_EXITS); 708 kvmppc_account_exit(vcpu, DBELL_EXITS);
688 r = RESUME_GUEST; 709 r = RESUME_GUEST;
@@ -1267,6 +1288,11 @@ void kvmppc_decrementer_func(unsigned long data)
1267{ 1288{
1268 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 1289 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1269 1290
1291 if (vcpu->arch.tcr & TCR_ARE) {
1292 vcpu->arch.dec = vcpu->arch.decar;
1293 kvmppc_emulate_dec(vcpu);
1294 }
1295
1270 kvmppc_set_tsr_bits(vcpu, TSR_DIS); 1296 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1271} 1297}
1272 1298
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 6c76397f2af4..12834bb608ab 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -24,6 +24,7 @@
24#include "booke.h" 24#include "booke.h"
25 25
26#define OP_19_XOP_RFI 50 26#define OP_19_XOP_RFI 50
27#define OP_19_XOP_RFCI 51
27 28
28#define OP_31_XOP_MFMSR 83 29#define OP_31_XOP_MFMSR 83
29#define OP_31_XOP_WRTEE 131 30#define OP_31_XOP_WRTEE 131
@@ -36,6 +37,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
36 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); 37 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
37} 38}
38 39
40static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
41{
42 vcpu->arch.pc = vcpu->arch.csrr0;
43 kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
44}
45
39int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 46int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
40 unsigned int inst, int *advance) 47 unsigned int inst, int *advance)
41{ 48{
@@ -52,6 +59,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
52 *advance = 0; 59 *advance = 0;
53 break; 60 break;
54 61
62 case OP_19_XOP_RFCI:
63 kvmppc_emul_rfci(vcpu);
64 kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS);
65 *advance = 0;
66 break;
67
55 default: 68 default:
56 emulated = EMULATE_FAIL; 69 emulated = EMULATE_FAIL;
57 break; 70 break;
@@ -113,6 +126,12 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
113 case SPRN_ESR: 126 case SPRN_ESR:
114 vcpu->arch.shared->esr = spr_val; 127 vcpu->arch.shared->esr = spr_val;
115 break; 128 break;
129 case SPRN_CSRR0:
130 vcpu->arch.csrr0 = spr_val;
131 break;
132 case SPRN_CSRR1:
133 vcpu->arch.csrr1 = spr_val;
134 break;
116 case SPRN_DBCR0: 135 case SPRN_DBCR0:
117 vcpu->arch.dbcr0 = spr_val; 136 vcpu->arch.dbcr0 = spr_val;
118 break; 137 break;
@@ -129,6 +148,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
129 kvmppc_set_tcr(vcpu, spr_val); 148 kvmppc_set_tcr(vcpu, spr_val);
130 break; 149 break;
131 150
151 case SPRN_DECAR:
152 vcpu->arch.decar = spr_val;
153 break;
132 /* 154 /*
133 * Note: SPRG4-7 are user-readable. 155 * Note: SPRG4-7 are user-readable.
134 * These values are loaded into the real SPRGs when resuming the 156 * These values are loaded into the real SPRGs when resuming the
@@ -229,6 +251,12 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
229 case SPRN_ESR: 251 case SPRN_ESR:
230 *spr_val = vcpu->arch.shared->esr; 252 *spr_val = vcpu->arch.shared->esr;
231 break; 253 break;
254 case SPRN_CSRR0:
255 *spr_val = vcpu->arch.csrr0;
256 break;
257 case SPRN_CSRR1:
258 *spr_val = vcpu->arch.csrr1;
259 break;
232 case SPRN_DBCR0: 260 case SPRN_DBCR0:
233 *spr_val = vcpu->arch.dbcr0; 261 *spr_val = vcpu->arch.dbcr0;
234 break; 262 break;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 8feec2ff3928..bb46b32f9813 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -25,8 +25,6 @@
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27 27
28#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
29
30/* The host stack layout: */ 28/* The host stack layout: */
31#define HOST_R1 0 /* Implied by stwu. */ 29#define HOST_R1 0 /* Implied by stwu. */
32#define HOST_CALLEE_LR 4 30#define HOST_CALLEE_LR 4
@@ -36,8 +34,9 @@
36#define HOST_R2 12 34#define HOST_R2 12
37#define HOST_CR 16 35#define HOST_CR 16
38#define HOST_NV_GPRS 20 36#define HOST_NV_GPRS 20
39#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) 37#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
40#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) 38#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
39#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
41#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ 40#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
42#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ 41#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
43 42
@@ -53,16 +52,21 @@
53 (1<<BOOKE_INTERRUPT_PROGRAM) | \ 52 (1<<BOOKE_INTERRUPT_PROGRAM) | \
54 (1<<BOOKE_INTERRUPT_DTLB_MISS)) 53 (1<<BOOKE_INTERRUPT_DTLB_MISS))
55 54
56.macro KVM_HANDLER ivor_nr 55.macro KVM_HANDLER ivor_nr scratch srr0
57_GLOBAL(kvmppc_handler_\ivor_nr) 56_GLOBAL(kvmppc_handler_\ivor_nr)
58 /* Get pointer to vcpu and record exit number. */ 57 /* Get pointer to vcpu and record exit number. */
59 mtspr SPRN_SPRG_WSCRATCH0, r4 58 mtspr \scratch , r4
60 mfspr r4, SPRN_SPRG_RVCPU 59 mfspr r4, SPRN_SPRG_RVCPU
61 stw r5, VCPU_GPR(r5)(r4) 60 stw r3, VCPU_GPR(R3)(r4)
62 stw r6, VCPU_GPR(r6)(r4) 61 stw r5, VCPU_GPR(R5)(r4)
62 stw r6, VCPU_GPR(R6)(r4)
63 mfspr r3, \scratch
63 mfctr r5 64 mfctr r5
64 lis r6, kvmppc_resume_host@h 65 stw r3, VCPU_GPR(R4)(r4)
65 stw r5, VCPU_CTR(r4) 66 stw r5, VCPU_CTR(r4)
67 mfspr r3, \srr0
68 lis r6, kvmppc_resume_host@h
69 stw r3, VCPU_PC(r4)
66 li r5, \ivor_nr 70 li r5, \ivor_nr
67 ori r6, r6, kvmppc_resume_host@l 71 ori r6, r6, kvmppc_resume_host@l
68 mtctr r6 72 mtctr r6
@@ -70,42 +74,40 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
70.endm 74.endm
71 75
72_GLOBAL(kvmppc_handlers_start) 76_GLOBAL(kvmppc_handlers_start)
73KVM_HANDLER BOOKE_INTERRUPT_CRITICAL 77KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
74KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK 78KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
75KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE 79KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
76KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE 80KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
77KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL 81KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
78KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT 82KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
79KVM_HANDLER BOOKE_INTERRUPT_PROGRAM 83KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
80KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL 84KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
81KVM_HANDLER BOOKE_INTERRUPT_SYSCALL 85KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
82KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL 86KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
83KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER 87KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
84KVM_HANDLER BOOKE_INTERRUPT_FIT 88KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
85KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG 89KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
86KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS 90KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
87KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS 91KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
88KVM_HANDLER BOOKE_INTERRUPT_DEBUG 92KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
89KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL 93KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
90KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA 94KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
91KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND 95KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
92 96
93_GLOBAL(kvmppc_handler_len) 97_GLOBAL(kvmppc_handler_len)
94 .long kvmppc_handler_1 - kvmppc_handler_0 98 .long kvmppc_handler_1 - kvmppc_handler_0
95 99
96
97/* Registers: 100/* Registers:
98 * SPRG_SCRATCH0: guest r4 101 * SPRG_SCRATCH0: guest r4
99 * r4: vcpu pointer 102 * r4: vcpu pointer
100 * r5: KVM exit number 103 * r5: KVM exit number
101 */ 104 */
102_GLOBAL(kvmppc_resume_host) 105_GLOBAL(kvmppc_resume_host)
103 stw r3, VCPU_GPR(r3)(r4)
104 mfcr r3 106 mfcr r3
105 stw r3, VCPU_CR(r4) 107 stw r3, VCPU_CR(r4)
106 stw r7, VCPU_GPR(r7)(r4) 108 stw r7, VCPU_GPR(R7)(r4)
107 stw r8, VCPU_GPR(r8)(r4) 109 stw r8, VCPU_GPR(R8)(r4)
108 stw r9, VCPU_GPR(r9)(r4) 110 stw r9, VCPU_GPR(R9)(r4)
109 111
110 li r6, 1 112 li r6, 1
111 slw r6, r6, r5 113 slw r6, r6, r5
@@ -135,23 +137,23 @@ _GLOBAL(kvmppc_resume_host)
135 isync 137 isync
136 stw r9, VCPU_LAST_INST(r4) 138 stw r9, VCPU_LAST_INST(r4)
137 139
138 stw r15, VCPU_GPR(r15)(r4) 140 stw r15, VCPU_GPR(R15)(r4)
139 stw r16, VCPU_GPR(r16)(r4) 141 stw r16, VCPU_GPR(R16)(r4)
140 stw r17, VCPU_GPR(r17)(r4) 142 stw r17, VCPU_GPR(R17)(r4)
141 stw r18, VCPU_GPR(r18)(r4) 143 stw r18, VCPU_GPR(R18)(r4)
142 stw r19, VCPU_GPR(r19)(r4) 144 stw r19, VCPU_GPR(R19)(r4)
143 stw r20, VCPU_GPR(r20)(r4) 145 stw r20, VCPU_GPR(R20)(r4)
144 stw r21, VCPU_GPR(r21)(r4) 146 stw r21, VCPU_GPR(R21)(r4)
145 stw r22, VCPU_GPR(r22)(r4) 147 stw r22, VCPU_GPR(R22)(r4)
146 stw r23, VCPU_GPR(r23)(r4) 148 stw r23, VCPU_GPR(R23)(r4)
147 stw r24, VCPU_GPR(r24)(r4) 149 stw r24, VCPU_GPR(R24)(r4)
148 stw r25, VCPU_GPR(r25)(r4) 150 stw r25, VCPU_GPR(R25)(r4)
149 stw r26, VCPU_GPR(r26)(r4) 151 stw r26, VCPU_GPR(R26)(r4)
150 stw r27, VCPU_GPR(r27)(r4) 152 stw r27, VCPU_GPR(R27)(r4)
151 stw r28, VCPU_GPR(r28)(r4) 153 stw r28, VCPU_GPR(R28)(r4)
152 stw r29, VCPU_GPR(r29)(r4) 154 stw r29, VCPU_GPR(R29)(r4)
153 stw r30, VCPU_GPR(r30)(r4) 155 stw r30, VCPU_GPR(R30)(r4)
154 stw r31, VCPU_GPR(r31)(r4) 156 stw r31, VCPU_GPR(R31)(r4)
155..skip_inst_copy: 157..skip_inst_copy:
156 158
157 /* Also grab DEAR and ESR before the host can clobber them. */ 159 /* Also grab DEAR and ESR before the host can clobber them. */
@@ -169,22 +171,18 @@ _GLOBAL(kvmppc_resume_host)
169..skip_esr: 171..skip_esr:
170 172
171 /* Save remaining volatile guest register state to vcpu. */ 173 /* Save remaining volatile guest register state to vcpu. */
172 stw r0, VCPU_GPR(r0)(r4) 174 stw r0, VCPU_GPR(R0)(r4)
173 stw r1, VCPU_GPR(r1)(r4) 175 stw r1, VCPU_GPR(R1)(r4)
174 stw r2, VCPU_GPR(r2)(r4) 176 stw r2, VCPU_GPR(R2)(r4)
175 stw r10, VCPU_GPR(r10)(r4) 177 stw r10, VCPU_GPR(R10)(r4)
176 stw r11, VCPU_GPR(r11)(r4) 178 stw r11, VCPU_GPR(R11)(r4)
177 stw r12, VCPU_GPR(r12)(r4) 179 stw r12, VCPU_GPR(R12)(r4)
178 stw r13, VCPU_GPR(r13)(r4) 180 stw r13, VCPU_GPR(R13)(r4)
179 stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ 181 stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
180 mflr r3 182 mflr r3
181 stw r3, VCPU_LR(r4) 183 stw r3, VCPU_LR(r4)
182 mfxer r3 184 mfxer r3
183 stw r3, VCPU_XER(r4) 185 stw r3, VCPU_XER(r4)
184 mfspr r3, SPRN_SPRG_RSCRATCH0
185 stw r3, VCPU_GPR(r4)(r4)
186 mfspr r3, SPRN_SRR0
187 stw r3, VCPU_PC(r4)
188 186
189 /* Restore host stack pointer and PID before IVPR, since the host 187 /* Restore host stack pointer and PID before IVPR, since the host
190 * exception handlers use them. */ 188 * exception handlers use them. */
@@ -214,28 +212,28 @@ _GLOBAL(kvmppc_resume_host)
214 212
215 /* Restore vcpu pointer and the nonvolatiles we used. */ 213 /* Restore vcpu pointer and the nonvolatiles we used. */
216 mr r4, r14 214 mr r4, r14
217 lwz r14, VCPU_GPR(r14)(r4) 215 lwz r14, VCPU_GPR(R14)(r4)
218 216
219 /* Sometimes instruction emulation must restore complete GPR state. */ 217 /* Sometimes instruction emulation must restore complete GPR state. */
220 andi. r5, r3, RESUME_FLAG_NV 218 andi. r5, r3, RESUME_FLAG_NV
221 beq ..skip_nv_load 219 beq ..skip_nv_load
222 lwz r15, VCPU_GPR(r15)(r4) 220 lwz r15, VCPU_GPR(R15)(r4)
223 lwz r16, VCPU_GPR(r16)(r4) 221 lwz r16, VCPU_GPR(R16)(r4)
224 lwz r17, VCPU_GPR(r17)(r4) 222 lwz r17, VCPU_GPR(R17)(r4)
225 lwz r18, VCPU_GPR(r18)(r4) 223 lwz r18, VCPU_GPR(R18)(r4)
226 lwz r19, VCPU_GPR(r19)(r4) 224 lwz r19, VCPU_GPR(R19)(r4)
227 lwz r20, VCPU_GPR(r20)(r4) 225 lwz r20, VCPU_GPR(R20)(r4)
228 lwz r21, VCPU_GPR(r21)(r4) 226 lwz r21, VCPU_GPR(R21)(r4)
229 lwz r22, VCPU_GPR(r22)(r4) 227 lwz r22, VCPU_GPR(R22)(r4)
230 lwz r23, VCPU_GPR(r23)(r4) 228 lwz r23, VCPU_GPR(R23)(r4)
231 lwz r24, VCPU_GPR(r24)(r4) 229 lwz r24, VCPU_GPR(R24)(r4)
232 lwz r25, VCPU_GPR(r25)(r4) 230 lwz r25, VCPU_GPR(R25)(r4)
233 lwz r26, VCPU_GPR(r26)(r4) 231 lwz r26, VCPU_GPR(R26)(r4)
234 lwz r27, VCPU_GPR(r27)(r4) 232 lwz r27, VCPU_GPR(R27)(r4)
235 lwz r28, VCPU_GPR(r28)(r4) 233 lwz r28, VCPU_GPR(R28)(r4)
236 lwz r29, VCPU_GPR(r29)(r4) 234 lwz r29, VCPU_GPR(R29)(r4)
237 lwz r30, VCPU_GPR(r30)(r4) 235 lwz r30, VCPU_GPR(R30)(r4)
238 lwz r31, VCPU_GPR(r31)(r4) 236 lwz r31, VCPU_GPR(R31)(r4)
239..skip_nv_load: 237..skip_nv_load:
240 238
241 /* Should we return to the guest? */ 239 /* Should we return to the guest? */
@@ -257,43 +255,43 @@ heavyweight_exit:
257 255
258 /* We already saved guest volatile register state; now save the 256 /* We already saved guest volatile register state; now save the
259 * non-volatiles. */ 257 * non-volatiles. */
260 stw r15, VCPU_GPR(r15)(r4) 258 stw r15, VCPU_GPR(R15)(r4)
261 stw r16, VCPU_GPR(r16)(r4) 259 stw r16, VCPU_GPR(R16)(r4)
262 stw r17, VCPU_GPR(r17)(r4) 260 stw r17, VCPU_GPR(R17)(r4)
263 stw r18, VCPU_GPR(r18)(r4) 261 stw r18, VCPU_GPR(R18)(r4)
264 stw r19, VCPU_GPR(r19)(r4) 262 stw r19, VCPU_GPR(R19)(r4)
265 stw r20, VCPU_GPR(r20)(r4) 263 stw r20, VCPU_GPR(R20)(r4)
266 stw r21, VCPU_GPR(r21)(r4) 264 stw r21, VCPU_GPR(R21)(r4)
267 stw r22, VCPU_GPR(r22)(r4) 265 stw r22, VCPU_GPR(R22)(r4)
268 stw r23, VCPU_GPR(r23)(r4) 266 stw r23, VCPU_GPR(R23)(r4)
269 stw r24, VCPU_GPR(r24)(r4) 267 stw r24, VCPU_GPR(R24)(r4)
270 stw r25, VCPU_GPR(r25)(r4) 268 stw r25, VCPU_GPR(R25)(r4)
271 stw r26, VCPU_GPR(r26)(r4) 269 stw r26, VCPU_GPR(R26)(r4)
272 stw r27, VCPU_GPR(r27)(r4) 270 stw r27, VCPU_GPR(R27)(r4)
273 stw r28, VCPU_GPR(r28)(r4) 271 stw r28, VCPU_GPR(R28)(r4)
274 stw r29, VCPU_GPR(r29)(r4) 272 stw r29, VCPU_GPR(R29)(r4)
275 stw r30, VCPU_GPR(r30)(r4) 273 stw r30, VCPU_GPR(R30)(r4)
276 stw r31, VCPU_GPR(r31)(r4) 274 stw r31, VCPU_GPR(R31)(r4)
277 275
278 /* Load host non-volatile register state from host stack. */ 276 /* Load host non-volatile register state from host stack. */
279 lwz r14, HOST_NV_GPR(r14)(r1) 277 lwz r14, HOST_NV_GPR(R14)(r1)
280 lwz r15, HOST_NV_GPR(r15)(r1) 278 lwz r15, HOST_NV_GPR(R15)(r1)
281 lwz r16, HOST_NV_GPR(r16)(r1) 279 lwz r16, HOST_NV_GPR(R16)(r1)
282 lwz r17, HOST_NV_GPR(r17)(r1) 280 lwz r17, HOST_NV_GPR(R17)(r1)
283 lwz r18, HOST_NV_GPR(r18)(r1) 281 lwz r18, HOST_NV_GPR(R18)(r1)
284 lwz r19, HOST_NV_GPR(r19)(r1) 282 lwz r19, HOST_NV_GPR(R19)(r1)
285 lwz r20, HOST_NV_GPR(r20)(r1) 283 lwz r20, HOST_NV_GPR(R20)(r1)
286 lwz r21, HOST_NV_GPR(r21)(r1) 284 lwz r21, HOST_NV_GPR(R21)(r1)
287 lwz r22, HOST_NV_GPR(r22)(r1) 285 lwz r22, HOST_NV_GPR(R22)(r1)
288 lwz r23, HOST_NV_GPR(r23)(r1) 286 lwz r23, HOST_NV_GPR(R23)(r1)
289 lwz r24, HOST_NV_GPR(r24)(r1) 287 lwz r24, HOST_NV_GPR(R24)(r1)
290 lwz r25, HOST_NV_GPR(r25)(r1) 288 lwz r25, HOST_NV_GPR(R25)(r1)
291 lwz r26, HOST_NV_GPR(r26)(r1) 289 lwz r26, HOST_NV_GPR(R26)(r1)
292 lwz r27, HOST_NV_GPR(r27)(r1) 290 lwz r27, HOST_NV_GPR(R27)(r1)
293 lwz r28, HOST_NV_GPR(r28)(r1) 291 lwz r28, HOST_NV_GPR(R28)(r1)
294 lwz r29, HOST_NV_GPR(r29)(r1) 292 lwz r29, HOST_NV_GPR(R29)(r1)
295 lwz r30, HOST_NV_GPR(r30)(r1) 293 lwz r30, HOST_NV_GPR(R30)(r1)
296 lwz r31, HOST_NV_GPR(r31)(r1) 294 lwz r31, HOST_NV_GPR(R31)(r1)
297 295
298 /* Return to kvm_vcpu_run(). */ 296 /* Return to kvm_vcpu_run(). */
299 lwz r4, HOST_STACK_LR(r1) 297 lwz r4, HOST_STACK_LR(r1)
@@ -321,44 +319,44 @@ _GLOBAL(__kvmppc_vcpu_run)
321 stw r5, HOST_CR(r1) 319 stw r5, HOST_CR(r1)
322 320
323 /* Save host non-volatile register state to stack. */ 321 /* Save host non-volatile register state to stack. */
324 stw r14, HOST_NV_GPR(r14)(r1) 322 stw r14, HOST_NV_GPR(R14)(r1)
325 stw r15, HOST_NV_GPR(r15)(r1) 323 stw r15, HOST_NV_GPR(R15)(r1)
326 stw r16, HOST_NV_GPR(r16)(r1) 324 stw r16, HOST_NV_GPR(R16)(r1)
327 stw r17, HOST_NV_GPR(r17)(r1) 325 stw r17, HOST_NV_GPR(R17)(r1)
328 stw r18, HOST_NV_GPR(r18)(r1) 326 stw r18, HOST_NV_GPR(R18)(r1)
329 stw r19, HOST_NV_GPR(r19)(r1) 327 stw r19, HOST_NV_GPR(R19)(r1)
330 stw r20, HOST_NV_GPR(r20)(r1) 328 stw r20, HOST_NV_GPR(R20)(r1)
331 stw r21, HOST_NV_GPR(r21)(r1) 329 stw r21, HOST_NV_GPR(R21)(r1)
332 stw r22, HOST_NV_GPR(r22)(r1) 330 stw r22, HOST_NV_GPR(R22)(r1)
333 stw r23, HOST_NV_GPR(r23)(r1) 331 stw r23, HOST_NV_GPR(R23)(r1)
334 stw r24, HOST_NV_GPR(r24)(r1) 332 stw r24, HOST_NV_GPR(R24)(r1)
335 stw r25, HOST_NV_GPR(r25)(r1) 333 stw r25, HOST_NV_GPR(R25)(r1)
336 stw r26, HOST_NV_GPR(r26)(r1) 334 stw r26, HOST_NV_GPR(R26)(r1)
337 stw r27, HOST_NV_GPR(r27)(r1) 335 stw r27, HOST_NV_GPR(R27)(r1)
338 stw r28, HOST_NV_GPR(r28)(r1) 336 stw r28, HOST_NV_GPR(R28)(r1)
339 stw r29, HOST_NV_GPR(r29)(r1) 337 stw r29, HOST_NV_GPR(R29)(r1)
340 stw r30, HOST_NV_GPR(r30)(r1) 338 stw r30, HOST_NV_GPR(R30)(r1)
341 stw r31, HOST_NV_GPR(r31)(r1) 339 stw r31, HOST_NV_GPR(R31)(r1)
342 340
343 /* Load guest non-volatiles. */ 341 /* Load guest non-volatiles. */
344 lwz r14, VCPU_GPR(r14)(r4) 342 lwz r14, VCPU_GPR(R14)(r4)
345 lwz r15, VCPU_GPR(r15)(r4) 343 lwz r15, VCPU_GPR(R15)(r4)
346 lwz r16, VCPU_GPR(r16)(r4) 344 lwz r16, VCPU_GPR(R16)(r4)
347 lwz r17, VCPU_GPR(r17)(r4) 345 lwz r17, VCPU_GPR(R17)(r4)
348 lwz r18, VCPU_GPR(r18)(r4) 346 lwz r18, VCPU_GPR(R18)(r4)
349 lwz r19, VCPU_GPR(r19)(r4) 347 lwz r19, VCPU_GPR(R19)(r4)
350 lwz r20, VCPU_GPR(r20)(r4) 348 lwz r20, VCPU_GPR(R20)(r4)
351 lwz r21, VCPU_GPR(r21)(r4) 349 lwz r21, VCPU_GPR(R21)(r4)
352 lwz r22, VCPU_GPR(r22)(r4) 350 lwz r22, VCPU_GPR(R22)(r4)
353 lwz r23, VCPU_GPR(r23)(r4) 351 lwz r23, VCPU_GPR(R23)(r4)
354 lwz r24, VCPU_GPR(r24)(r4) 352 lwz r24, VCPU_GPR(R24)(r4)
355 lwz r25, VCPU_GPR(r25)(r4) 353 lwz r25, VCPU_GPR(R25)(r4)
356 lwz r26, VCPU_GPR(r26)(r4) 354 lwz r26, VCPU_GPR(R26)(r4)
357 lwz r27, VCPU_GPR(r27)(r4) 355 lwz r27, VCPU_GPR(R27)(r4)
358 lwz r28, VCPU_GPR(r28)(r4) 356 lwz r28, VCPU_GPR(R28)(r4)
359 lwz r29, VCPU_GPR(r29)(r4) 357 lwz r29, VCPU_GPR(R29)(r4)
360 lwz r30, VCPU_GPR(r30)(r4) 358 lwz r30, VCPU_GPR(R30)(r4)
361 lwz r31, VCPU_GPR(r31)(r4) 359 lwz r31, VCPU_GPR(R31)(r4)
362 360
363#ifdef CONFIG_SPE 361#ifdef CONFIG_SPE
364 /* save host SPEFSCR and load guest SPEFSCR */ 362 /* save host SPEFSCR and load guest SPEFSCR */
@@ -386,13 +384,13 @@ lightweight_exit:
386#endif 384#endif
387 385
388 /* Load some guest volatiles. */ 386 /* Load some guest volatiles. */
389 lwz r0, VCPU_GPR(r0)(r4) 387 lwz r0, VCPU_GPR(R0)(r4)
390 lwz r2, VCPU_GPR(r2)(r4) 388 lwz r2, VCPU_GPR(R2)(r4)
391 lwz r9, VCPU_GPR(r9)(r4) 389 lwz r9, VCPU_GPR(R9)(r4)
392 lwz r10, VCPU_GPR(r10)(r4) 390 lwz r10, VCPU_GPR(R10)(r4)
393 lwz r11, VCPU_GPR(r11)(r4) 391 lwz r11, VCPU_GPR(R11)(r4)
394 lwz r12, VCPU_GPR(r12)(r4) 392 lwz r12, VCPU_GPR(R12)(r4)
395 lwz r13, VCPU_GPR(r13)(r4) 393 lwz r13, VCPU_GPR(R13)(r4)
396 lwz r3, VCPU_LR(r4) 394 lwz r3, VCPU_LR(r4)
397 mtlr r3 395 mtlr r3
398 lwz r3, VCPU_XER(r4) 396 lwz r3, VCPU_XER(r4)
@@ -411,7 +409,7 @@ lightweight_exit:
411 409
412 /* Can't switch the stack pointer until after IVPR is switched, 410 /* Can't switch the stack pointer until after IVPR is switched,
413 * because host interrupt handlers would get confused. */ 411 * because host interrupt handlers would get confused. */
414 lwz r1, VCPU_GPR(r1)(r4) 412 lwz r1, VCPU_GPR(R1)(r4)
415 413
416 /* 414 /*
417 * Host interrupt handlers may have clobbered these 415 * Host interrupt handlers may have clobbered these
@@ -449,10 +447,10 @@ lightweight_exit:
449 mtcr r5 447 mtcr r5
450 mtsrr0 r6 448 mtsrr0 r6
451 mtsrr1 r7 449 mtsrr1 r7
452 lwz r5, VCPU_GPR(r5)(r4) 450 lwz r5, VCPU_GPR(R5)(r4)
453 lwz r6, VCPU_GPR(r6)(r4) 451 lwz r6, VCPU_GPR(R6)(r4)
454 lwz r7, VCPU_GPR(r7)(r4) 452 lwz r7, VCPU_GPR(R7)(r4)
455 lwz r8, VCPU_GPR(r8)(r4) 453 lwz r8, VCPU_GPR(R8)(r4)
456 454
457 /* Clear any debug events which occurred since we disabled MSR[DE]. 455 /* Clear any debug events which occurred since we disabled MSR[DE].
458 * XXX This gives us a 3-instruction window in which a breakpoint 456 * XXX This gives us a 3-instruction window in which a breakpoint
@@ -461,8 +459,8 @@ lightweight_exit:
461 ori r3, r3, 0xffff 459 ori r3, r3, 0xffff
462 mtspr SPRN_DBSR, r3 460 mtspr SPRN_DBSR, r3
463 461
464 lwz r3, VCPU_GPR(r3)(r4) 462 lwz r3, VCPU_GPR(R3)(r4)
465 lwz r4, VCPU_GPR(r4)(r4) 463 lwz r4, VCPU_GPR(R4)(r4)
466 rfi 464 rfi
467 465
468#ifdef CONFIG_SPE 466#ifdef CONFIG_SPE
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 6048a00515d7..d28c2d43ac1b 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -37,7 +37,6 @@
37 37
38#define LONGBYTES (BITS_PER_LONG / 8) 38#define LONGBYTES (BITS_PER_LONG / 8)
39 39
40#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES))
41#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) 40#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
42 41
43/* The host stack layout: */ 42/* The host stack layout: */
@@ -67,15 +66,15 @@
67 */ 66 */
68.macro kvm_handler_common intno, srr0, flags 67.macro kvm_handler_common intno, srr0, flags
69 /* Restore host stack pointer */ 68 /* Restore host stack pointer */
70 PPC_STL r1, VCPU_GPR(r1)(r4) 69 PPC_STL r1, VCPU_GPR(R1)(r4)
71 PPC_STL r2, VCPU_GPR(r2)(r4) 70 PPC_STL r2, VCPU_GPR(R2)(r4)
72 PPC_LL r1, VCPU_HOST_STACK(r4) 71 PPC_LL r1, VCPU_HOST_STACK(r4)
73 PPC_LL r2, HOST_R2(r1) 72 PPC_LL r2, HOST_R2(r1)
74 73
75 mfspr r10, SPRN_PID 74 mfspr r10, SPRN_PID
76 lwz r8, VCPU_HOST_PID(r4) 75 lwz r8, VCPU_HOST_PID(r4)
77 PPC_LL r11, VCPU_SHARED(r4) 76 PPC_LL r11, VCPU_SHARED(r4)
78 PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ 77 PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */
79 li r14, \intno 78 li r14, \intno
80 79
81 stw r10, VCPU_GUEST_PID(r4) 80 stw r10, VCPU_GUEST_PID(r4)
@@ -137,35 +136,31 @@
137 */ 136 */
138 137
139 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ 138 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
140 PPC_STL r15, VCPU_GPR(r15)(r4) 139 PPC_STL r15, VCPU_GPR(R15)(r4)
141 PPC_STL r16, VCPU_GPR(r16)(r4) 140 PPC_STL r16, VCPU_GPR(R16)(r4)
142 PPC_STL r17, VCPU_GPR(r17)(r4) 141 PPC_STL r17, VCPU_GPR(R17)(r4)
143 PPC_STL r18, VCPU_GPR(r18)(r4) 142 PPC_STL r18, VCPU_GPR(R18)(r4)
144 PPC_STL r19, VCPU_GPR(r19)(r4) 143 PPC_STL r19, VCPU_GPR(R19)(r4)
145 mr r8, r3 144 mr r8, r3
146 PPC_STL r20, VCPU_GPR(r20)(r4) 145 PPC_STL r20, VCPU_GPR(R20)(r4)
147 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS 146 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
148 PPC_STL r21, VCPU_GPR(r21)(r4) 147 PPC_STL r21, VCPU_GPR(R21)(r4)
149 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR 148 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
150 PPC_STL r22, VCPU_GPR(r22)(r4) 149 PPC_STL r22, VCPU_GPR(R22)(r4)
151 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID 150 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
152 PPC_STL r23, VCPU_GPR(r23)(r4) 151 PPC_STL r23, VCPU_GPR(R23)(r4)
153 PPC_STL r24, VCPU_GPR(r24)(r4) 152 PPC_STL r24, VCPU_GPR(R24)(r4)
154 PPC_STL r25, VCPU_GPR(r25)(r4) 153 PPC_STL r25, VCPU_GPR(R25)(r4)
155 PPC_STL r26, VCPU_GPR(r26)(r4) 154 PPC_STL r26, VCPU_GPR(R26)(r4)
156 PPC_STL r27, VCPU_GPR(r27)(r4) 155 PPC_STL r27, VCPU_GPR(R27)(r4)
157 PPC_STL r28, VCPU_GPR(r28)(r4) 156 PPC_STL r28, VCPU_GPR(R28)(r4)
158 PPC_STL r29, VCPU_GPR(r29)(r4) 157 PPC_STL r29, VCPU_GPR(R29)(r4)
159 PPC_STL r30, VCPU_GPR(r30)(r4) 158 PPC_STL r30, VCPU_GPR(R30)(r4)
160 PPC_STL r31, VCPU_GPR(r31)(r4) 159 PPC_STL r31, VCPU_GPR(R31)(r4)
161 mtspr SPRN_EPLC, r8 160 mtspr SPRN_EPLC, r8
162 161
163 /* disable preemption, so we are sure we hit the fixup handler */ 162 /* disable preemption, so we are sure we hit the fixup handler */
164#ifdef CONFIG_PPC64 163 CURRENT_THREAD_INFO(r8, r1)
165 clrrdi r8,r1,THREAD_SHIFT
166#else
167 rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
168#endif
169 li r7, 1 164 li r7, 1
170 stw r7, TI_PREEMPT(r8) 165 stw r7, TI_PREEMPT(r8)
171 166
@@ -211,24 +206,24 @@
211.macro kvm_handler intno srr0, srr1, flags 206.macro kvm_handler intno srr0, srr1, flags
212_GLOBAL(kvmppc_handler_\intno\()_\srr1) 207_GLOBAL(kvmppc_handler_\intno\()_\srr1)
213 GET_VCPU(r11, r10) 208 GET_VCPU(r11, r10)
214 PPC_STL r3, VCPU_GPR(r3)(r11) 209 PPC_STL r3, VCPU_GPR(R3)(r11)
215 mfspr r3, SPRN_SPRG_RSCRATCH0 210 mfspr r3, SPRN_SPRG_RSCRATCH0
216 PPC_STL r4, VCPU_GPR(r4)(r11) 211 PPC_STL r4, VCPU_GPR(R4)(r11)
217 PPC_LL r4, THREAD_NORMSAVE(0)(r10) 212 PPC_LL r4, THREAD_NORMSAVE(0)(r10)
218 PPC_STL r5, VCPU_GPR(r5)(r11) 213 PPC_STL r5, VCPU_GPR(R5)(r11)
219 stw r13, VCPU_CR(r11) 214 stw r13, VCPU_CR(r11)
220 mfspr r5, \srr0 215 mfspr r5, \srr0
221 PPC_STL r3, VCPU_GPR(r10)(r11) 216 PPC_STL r3, VCPU_GPR(R10)(r11)
222 PPC_LL r3, THREAD_NORMSAVE(2)(r10) 217 PPC_LL r3, THREAD_NORMSAVE(2)(r10)
223 PPC_STL r6, VCPU_GPR(r6)(r11) 218 PPC_STL r6, VCPU_GPR(R6)(r11)
224 PPC_STL r4, VCPU_GPR(r11)(r11) 219 PPC_STL r4, VCPU_GPR(R11)(r11)
225 mfspr r6, \srr1 220 mfspr r6, \srr1
226 PPC_STL r7, VCPU_GPR(r7)(r11) 221 PPC_STL r7, VCPU_GPR(R7)(r11)
227 PPC_STL r8, VCPU_GPR(r8)(r11) 222 PPC_STL r8, VCPU_GPR(R8)(r11)
228 PPC_STL r9, VCPU_GPR(r9)(r11) 223 PPC_STL r9, VCPU_GPR(R9)(r11)
229 PPC_STL r3, VCPU_GPR(r13)(r11) 224 PPC_STL r3, VCPU_GPR(R13)(r11)
230 mfctr r7 225 mfctr r7
231 PPC_STL r12, VCPU_GPR(r12)(r11) 226 PPC_STL r12, VCPU_GPR(R12)(r11)
232 PPC_STL r7, VCPU_CTR(r11) 227 PPC_STL r7, VCPU_CTR(r11)
233 mr r4, r11 228 mr r4, r11
234 kvm_handler_common \intno, \srr0, \flags 229 kvm_handler_common \intno, \srr0, \flags
@@ -238,25 +233,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
238_GLOBAL(kvmppc_handler_\intno\()_\srr1) 233_GLOBAL(kvmppc_handler_\intno\()_\srr1)
239 mfspr r10, SPRN_SPRG_THREAD 234 mfspr r10, SPRN_SPRG_THREAD
240 GET_VCPU(r11, r10) 235 GET_VCPU(r11, r10)
241 PPC_STL r3, VCPU_GPR(r3)(r11) 236 PPC_STL r3, VCPU_GPR(R3)(r11)
242 mfspr r3, \scratch 237 mfspr r3, \scratch
243 PPC_STL r4, VCPU_GPR(r4)(r11) 238 PPC_STL r4, VCPU_GPR(R4)(r11)
244 PPC_LL r4, GPR9(r8) 239 PPC_LL r4, GPR9(r8)
245 PPC_STL r5, VCPU_GPR(r5)(r11) 240 PPC_STL r5, VCPU_GPR(R5)(r11)
246 stw r9, VCPU_CR(r11) 241 stw r9, VCPU_CR(r11)
247 mfspr r5, \srr0 242 mfspr r5, \srr0
248 PPC_STL r3, VCPU_GPR(r8)(r11) 243 PPC_STL r3, VCPU_GPR(R8)(r11)
249 PPC_LL r3, GPR10(r8) 244 PPC_LL r3, GPR10(r8)
250 PPC_STL r6, VCPU_GPR(r6)(r11) 245 PPC_STL r6, VCPU_GPR(R6)(r11)
251 PPC_STL r4, VCPU_GPR(r9)(r11) 246 PPC_STL r4, VCPU_GPR(R9)(r11)
252 mfspr r6, \srr1 247 mfspr r6, \srr1
253 PPC_LL r4, GPR11(r8) 248 PPC_LL r4, GPR11(r8)
254 PPC_STL r7, VCPU_GPR(r7)(r11) 249 PPC_STL r7, VCPU_GPR(R7)(r11)
255 PPC_STL r3, VCPU_GPR(r10)(r11) 250 PPC_STL r3, VCPU_GPR(R10)(r11)
256 mfctr r7 251 mfctr r7
257 PPC_STL r12, VCPU_GPR(r12)(r11) 252 PPC_STL r12, VCPU_GPR(R12)(r11)
258 PPC_STL r13, VCPU_GPR(r13)(r11) 253 PPC_STL r13, VCPU_GPR(R13)(r11)
259 PPC_STL r4, VCPU_GPR(r11)(r11) 254 PPC_STL r4, VCPU_GPR(R11)(r11)
260 PPC_STL r7, VCPU_CTR(r11) 255 PPC_STL r7, VCPU_CTR(r11)
261 mr r4, r11 256 mr r4, r11
262 kvm_handler_common \intno, \srr0, \flags 257 kvm_handler_common \intno, \srr0, \flags
@@ -267,7 +262,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
267kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ 262kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
268 SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 263 SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
269kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ 264kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
270 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) 265 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
271kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR 266kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
272kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 267kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
273kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ 268kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
@@ -310,7 +305,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
310_GLOBAL(kvmppc_resume_host) 305_GLOBAL(kvmppc_resume_host)
311 /* Save remaining volatile guest register state to vcpu. */ 306 /* Save remaining volatile guest register state to vcpu. */
312 mfspr r3, SPRN_VRSAVE 307 mfspr r3, SPRN_VRSAVE
313 PPC_STL r0, VCPU_GPR(r0)(r4) 308 PPC_STL r0, VCPU_GPR(R0)(r4)
314 mflr r5 309 mflr r5
315 mfspr r6, SPRN_SPRG4 310 mfspr r6, SPRN_SPRG4
316 PPC_STL r5, VCPU_LR(r4) 311 PPC_STL r5, VCPU_LR(r4)
@@ -358,27 +353,27 @@ _GLOBAL(kvmppc_resume_host)
358 353
359 /* Restore vcpu pointer and the nonvolatiles we used. */ 354 /* Restore vcpu pointer and the nonvolatiles we used. */
360 mr r4, r14 355 mr r4, r14
361 PPC_LL r14, VCPU_GPR(r14)(r4) 356 PPC_LL r14, VCPU_GPR(R14)(r4)
362 357
363 andi. r5, r3, RESUME_FLAG_NV 358 andi. r5, r3, RESUME_FLAG_NV
364 beq skip_nv_load 359 beq skip_nv_load
365 PPC_LL r15, VCPU_GPR(r15)(r4) 360 PPC_LL r15, VCPU_GPR(R15)(r4)
366 PPC_LL r16, VCPU_GPR(r16)(r4) 361 PPC_LL r16, VCPU_GPR(R16)(r4)
367 PPC_LL r17, VCPU_GPR(r17)(r4) 362 PPC_LL r17, VCPU_GPR(R17)(r4)
368 PPC_LL r18, VCPU_GPR(r18)(r4) 363 PPC_LL r18, VCPU_GPR(R18)(r4)
369 PPC_LL r19, VCPU_GPR(r19)(r4) 364 PPC_LL r19, VCPU_GPR(R19)(r4)
370 PPC_LL r20, VCPU_GPR(r20)(r4) 365 PPC_LL r20, VCPU_GPR(R20)(r4)
371 PPC_LL r21, VCPU_GPR(r21)(r4) 366 PPC_LL r21, VCPU_GPR(R21)(r4)
372 PPC_LL r22, VCPU_GPR(r22)(r4) 367 PPC_LL r22, VCPU_GPR(R22)(r4)
373 PPC_LL r23, VCPU_GPR(r23)(r4) 368 PPC_LL r23, VCPU_GPR(R23)(r4)
374 PPC_LL r24, VCPU_GPR(r24)(r4) 369 PPC_LL r24, VCPU_GPR(R24)(r4)
375 PPC_LL r25, VCPU_GPR(r25)(r4) 370 PPC_LL r25, VCPU_GPR(R25)(r4)
376 PPC_LL r26, VCPU_GPR(r26)(r4) 371 PPC_LL r26, VCPU_GPR(R26)(r4)
377 PPC_LL r27, VCPU_GPR(r27)(r4) 372 PPC_LL r27, VCPU_GPR(R27)(r4)
378 PPC_LL r28, VCPU_GPR(r28)(r4) 373 PPC_LL r28, VCPU_GPR(R28)(r4)
379 PPC_LL r29, VCPU_GPR(r29)(r4) 374 PPC_LL r29, VCPU_GPR(R29)(r4)
380 PPC_LL r30, VCPU_GPR(r30)(r4) 375 PPC_LL r30, VCPU_GPR(R30)(r4)
381 PPC_LL r31, VCPU_GPR(r31)(r4) 376 PPC_LL r31, VCPU_GPR(R31)(r4)
382skip_nv_load: 377skip_nv_load:
383 /* Should we return to the guest? */ 378 /* Should we return to the guest? */
384 andi. r5, r3, RESUME_FLAG_HOST 379 andi. r5, r3, RESUME_FLAG_HOST
@@ -396,23 +391,23 @@ heavyweight_exit:
396 * non-volatiles. 391 * non-volatiles.
397 */ 392 */
398 393
399 PPC_STL r15, VCPU_GPR(r15)(r4) 394 PPC_STL r15, VCPU_GPR(R15)(r4)
400 PPC_STL r16, VCPU_GPR(r16)(r4) 395 PPC_STL r16, VCPU_GPR(R16)(r4)
401 PPC_STL r17, VCPU_GPR(r17)(r4) 396 PPC_STL r17, VCPU_GPR(R17)(r4)
402 PPC_STL r18, VCPU_GPR(r18)(r4) 397 PPC_STL r18, VCPU_GPR(R18)(r4)
403 PPC_STL r19, VCPU_GPR(r19)(r4) 398 PPC_STL r19, VCPU_GPR(R19)(r4)
404 PPC_STL r20, VCPU_GPR(r20)(r4) 399 PPC_STL r20, VCPU_GPR(R20)(r4)
405 PPC_STL r21, VCPU_GPR(r21)(r4) 400 PPC_STL r21, VCPU_GPR(R21)(r4)
406 PPC_STL r22, VCPU_GPR(r22)(r4) 401 PPC_STL r22, VCPU_GPR(R22)(r4)
407 PPC_STL r23, VCPU_GPR(r23)(r4) 402 PPC_STL r23, VCPU_GPR(R23)(r4)
408 PPC_STL r24, VCPU_GPR(r24)(r4) 403 PPC_STL r24, VCPU_GPR(R24)(r4)
409 PPC_STL r25, VCPU_GPR(r25)(r4) 404 PPC_STL r25, VCPU_GPR(R25)(r4)
410 PPC_STL r26, VCPU_GPR(r26)(r4) 405 PPC_STL r26, VCPU_GPR(R26)(r4)
411 PPC_STL r27, VCPU_GPR(r27)(r4) 406 PPC_STL r27, VCPU_GPR(R27)(r4)
412 PPC_STL r28, VCPU_GPR(r28)(r4) 407 PPC_STL r28, VCPU_GPR(R28)(r4)
413 PPC_STL r29, VCPU_GPR(r29)(r4) 408 PPC_STL r29, VCPU_GPR(R29)(r4)
414 PPC_STL r30, VCPU_GPR(r30)(r4) 409 PPC_STL r30, VCPU_GPR(R30)(r4)
415 PPC_STL r31, VCPU_GPR(r31)(r4) 410 PPC_STL r31, VCPU_GPR(R31)(r4)
416 411
417 /* Load host non-volatile register state from host stack. */ 412 /* Load host non-volatile register state from host stack. */
418 PPC_LL r14, HOST_NV_GPR(r14)(r1) 413 PPC_LL r14, HOST_NV_GPR(r14)(r1)
@@ -478,24 +473,24 @@ _GLOBAL(__kvmppc_vcpu_run)
478 PPC_STL r31, HOST_NV_GPR(r31)(r1) 473 PPC_STL r31, HOST_NV_GPR(r31)(r1)
479 474
480 /* Load guest non-volatiles. */ 475 /* Load guest non-volatiles. */
481 PPC_LL r14, VCPU_GPR(r14)(r4) 476 PPC_LL r14, VCPU_GPR(R14)(r4)
482 PPC_LL r15, VCPU_GPR(r15)(r4) 477 PPC_LL r15, VCPU_GPR(R15)(r4)
483 PPC_LL r16, VCPU_GPR(r16)(r4) 478 PPC_LL r16, VCPU_GPR(R16)(r4)
484 PPC_LL r17, VCPU_GPR(r17)(r4) 479 PPC_LL r17, VCPU_GPR(R17)(r4)
485 PPC_LL r18, VCPU_GPR(r18)(r4) 480 PPC_LL r18, VCPU_GPR(R18)(r4)
486 PPC_LL r19, VCPU_GPR(r19)(r4) 481 PPC_LL r19, VCPU_GPR(R19)(r4)
487 PPC_LL r20, VCPU_GPR(r20)(r4) 482 PPC_LL r20, VCPU_GPR(R20)(r4)
488 PPC_LL r21, VCPU_GPR(r21)(r4) 483 PPC_LL r21, VCPU_GPR(R21)(r4)
489 PPC_LL r22, VCPU_GPR(r22)(r4) 484 PPC_LL r22, VCPU_GPR(R22)(r4)
490 PPC_LL r23, VCPU_GPR(r23)(r4) 485 PPC_LL r23, VCPU_GPR(R23)(r4)
491 PPC_LL r24, VCPU_GPR(r24)(r4) 486 PPC_LL r24, VCPU_GPR(R24)(r4)
492 PPC_LL r25, VCPU_GPR(r25)(r4) 487 PPC_LL r25, VCPU_GPR(R25)(r4)
493 PPC_LL r26, VCPU_GPR(r26)(r4) 488 PPC_LL r26, VCPU_GPR(R26)(r4)
494 PPC_LL r27, VCPU_GPR(r27)(r4) 489 PPC_LL r27, VCPU_GPR(R27)(r4)
495 PPC_LL r28, VCPU_GPR(r28)(r4) 490 PPC_LL r28, VCPU_GPR(R28)(r4)
496 PPC_LL r29, VCPU_GPR(r29)(r4) 491 PPC_LL r29, VCPU_GPR(R29)(r4)
497 PPC_LL r30, VCPU_GPR(r30)(r4) 492 PPC_LL r30, VCPU_GPR(R30)(r4)
498 PPC_LL r31, VCPU_GPR(r31)(r4) 493 PPC_LL r31, VCPU_GPR(R31)(r4)
499 494
500 495
501lightweight_exit: 496lightweight_exit:
@@ -554,13 +549,13 @@ lightweight_exit:
554 lwz r7, VCPU_CR(r4) 549 lwz r7, VCPU_CR(r4)
555 PPC_LL r8, VCPU_PC(r4) 550 PPC_LL r8, VCPU_PC(r4)
556 PPC_LD(r9, VCPU_SHARED_MSR, r11) 551 PPC_LD(r9, VCPU_SHARED_MSR, r11)
557 PPC_LL r0, VCPU_GPR(r0)(r4) 552 PPC_LL r0, VCPU_GPR(R0)(r4)
558 PPC_LL r1, VCPU_GPR(r1)(r4) 553 PPC_LL r1, VCPU_GPR(R1)(r4)
559 PPC_LL r2, VCPU_GPR(r2)(r4) 554 PPC_LL r2, VCPU_GPR(R2)(r4)
560 PPC_LL r10, VCPU_GPR(r10)(r4) 555 PPC_LL r10, VCPU_GPR(R10)(r4)
561 PPC_LL r11, VCPU_GPR(r11)(r4) 556 PPC_LL r11, VCPU_GPR(R11)(r4)
562 PPC_LL r12, VCPU_GPR(r12)(r4) 557 PPC_LL r12, VCPU_GPR(R12)(r4)
563 PPC_LL r13, VCPU_GPR(r13)(r4) 558 PPC_LL r13, VCPU_GPR(R13)(r4)
564 mtlr r3 559 mtlr r3
565 mtxer r5 560 mtxer r5
566 mtctr r6 561 mtctr r6
@@ -586,12 +581,12 @@ lightweight_exit:
586 mtcr r7 581 mtcr r7
587 582
588 /* Finish loading guest volatiles and jump to guest. */ 583 /* Finish loading guest volatiles and jump to guest. */
589 PPC_LL r5, VCPU_GPR(r5)(r4) 584 PPC_LL r5, VCPU_GPR(R5)(r4)
590 PPC_LL r6, VCPU_GPR(r6)(r4) 585 PPC_LL r6, VCPU_GPR(R6)(r4)
591 PPC_LL r7, VCPU_GPR(r7)(r4) 586 PPC_LL r7, VCPU_GPR(R7)(r4)
592 PPC_LL r8, VCPU_GPR(r8)(r4) 587 PPC_LL r8, VCPU_GPR(R8)(r4)
593 PPC_LL r9, VCPU_GPR(r9)(r4) 588 PPC_LL r9, VCPU_GPR(R9)(r4)
594 589
595 PPC_LL r3, VCPU_GPR(r3)(r4) 590 PPC_LL r3, VCPU_GPR(R3)(r4)
596 PPC_LL r4, VCPU_GPR(r4)(r4) 591 PPC_LL r4, VCPU_GPR(R4)(r4)
597 rfi 592 rfi
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 8b99e076dc81..e04b0ef55ce0 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -269,6 +269,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
269 *spr_val = vcpu->arch.shared->mas7_3 >> 32; 269 *spr_val = vcpu->arch.shared->mas7_3 >> 32;
270 break; 270 break;
271#endif 271#endif
272 case SPRN_DECAR:
273 *spr_val = vcpu->arch.decar;
274 break;
272 case SPRN_TLB0CFG: 275 case SPRN_TLB0CFG:
273 *spr_val = vcpu->arch.tlbcfg[0]; 276 *spr_val = vcpu->arch.tlbcfg[0];
274 break; 277 break;
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index fe6c1de6b701..1f89d26e65fb 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Varun Sethi, <varun.sethi@freescale.com> 4 * Author: Varun Sethi, <varun.sethi@freescale.com>
5 * 5 *
@@ -57,7 +57,8 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
57 struct kvm_book3e_206_tlb_entry *gtlbe) 57 struct kvm_book3e_206_tlb_entry *gtlbe)
58{ 58{
59 unsigned int tid, ts; 59 unsigned int tid, ts;
60 u32 val, eaddr, lpid; 60 gva_t eaddr;
61 u32 val, lpid;
61 unsigned long flags; 62 unsigned long flags;
62 63
63 ts = get_tlb_ts(gtlbe); 64 ts = get_tlb_ts(gtlbe);
@@ -183,6 +184,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
183 184
184 vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ 185 vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
185 SPRN_EPCR_DUVD; 186 SPRN_EPCR_DUVD;
187#ifdef CONFIG_64BIT
188 vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
189#endif
186 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP; 190 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
187 vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT); 191 vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
188 vcpu->arch.epsc = vcpu->arch.eplc; 192 vcpu->arch.epsc = vcpu->arch.eplc;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index f90e86dea7a2..ee04abaefe23 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -59,11 +59,13 @@
59#define OP_31_XOP_STHBRX 918 59#define OP_31_XOP_STHBRX 918
60 60
61#define OP_LWZ 32 61#define OP_LWZ 32
62#define OP_LD 58
62#define OP_LWZU 33 63#define OP_LWZU 33
63#define OP_LBZ 34 64#define OP_LBZ 34
64#define OP_LBZU 35 65#define OP_LBZU 35
65#define OP_STW 36 66#define OP_STW 36
66#define OP_STWU 37 67#define OP_STWU 37
68#define OP_STD 62
67#define OP_STB 38 69#define OP_STB 38
68#define OP_STBU 39 70#define OP_STBU 39
69#define OP_LHZ 40 71#define OP_LHZ 40
@@ -392,6 +394,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
392 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 394 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
393 break; 395 break;
394 396
397 /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
398 case OP_LD:
399 rt = get_rt(inst);
400 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
401 break;
402
395 case OP_LWZU: 403 case OP_LWZU:
396 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 404 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
397 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 405 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
@@ -412,6 +420,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
412 4, 1); 420 4, 1);
413 break; 421 break;
414 422
423 /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
424 case OP_STD:
425 rs = get_rs(inst);
426 emulated = kvmppc_handle_store(run, vcpu,
427 kvmppc_get_gpr(vcpu, rs),
428 8, 1);
429 break;
430
415 case OP_STWU: 431 case OP_STWU:
416 emulated = kvmppc_handle_store(run, vcpu, 432 emulated = kvmppc_handle_store(run, vcpu,
417 kvmppc_get_gpr(vcpu, rs), 433 kvmppc_get_gpr(vcpu, rs),
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1493c8de947b..87f4dc886076 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -246,6 +246,7 @@ int kvm_dev_ioctl_check_extension(long ext)
246#endif 246#endif
247#ifdef CONFIG_PPC_BOOK3S_64 247#ifdef CONFIG_PPC_BOOK3S_64
248 case KVM_CAP_SPAPR_TCE: 248 case KVM_CAP_SPAPR_TCE:
249 case KVM_CAP_PPC_ALLOC_HTAB:
249 r = 1; 250 r = 1;
250 break; 251 break;
251#endif /* CONFIG_PPC_BOOK3S_64 */ 252#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -802,6 +803,23 @@ long kvm_arch_vm_ioctl(struct file *filp,
802 r = -EFAULT; 803 r = -EFAULT;
803 break; 804 break;
804 } 805 }
806
807 case KVM_PPC_ALLOCATE_HTAB: {
808 struct kvm *kvm = filp->private_data;
809 u32 htab_order;
810
811 r = -EFAULT;
812 if (get_user(htab_order, (u32 __user *)argp))
813 break;
814 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
815 if (r)
816 break;
817 r = -EFAULT;
818 if (put_user(htab_order, (u32 __user *)argp))
819 break;
820 r = 0;
821 break;
822 }
805#endif /* CONFIG_KVM_BOOK3S_64_HV */ 823#endif /* CONFIG_KVM_BOOK3S_64_HV */
806 824
807#ifdef CONFIG_PPC_BOOK3S_64 825#ifdef CONFIG_PPC_BOOK3S_64