aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2013-11-04 03:20:57 -0500
committerGleb Natapov <gleb@redhat.com>2013-11-04 03:20:57 -0500
commit95f328d3ad1a8e4e3175a18546fb35c495e31130 (patch)
tree2f6496ef8354e9a0a315f23faef744f93fee8265 /arch
parentdaf727225b8abfdfe424716abac3d15a3ac5626a (diff)
parenta78b55d1c0218b6d91d504941d20e36435c276f5 (diff)
Merge branch 'kvm-ppc-queue' of git://github.com/agraf/linux-2.6 into queue
Conflicts: arch/powerpc/include/asm/processor.h
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kvm/arm.c5
-rw-r--r--arch/ia64/kvm/kvm-ia64.c5
-rw-r--r--arch/mips/kvm/kvm_mips.c5
-rw-r--r--arch/powerpc/include/asm/disassemble.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h21
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h232
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h9
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h52
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h107
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/processor.h37
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h2
-rw-r--r--arch/powerpc/include/asm/reg.h15
-rw-r--r--arch/powerpc/include/asm/reg_booke.h8
-rw-r--r--arch/powerpc/include/asm/switch_to.h1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h86
-rw-r--r--arch/powerpc/kernel/asm-offsets.c23
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S30
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/process.c45
-rw-r--r--arch/powerpc/kernel/ptrace.c154
-rw-r--r--arch/powerpc/kernel/ptrace32.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c6
-rw-r--r--arch/powerpc/kernel/traps.c35
-rw-r--r--arch/powerpc/kvm/44x.c58
-rw-r--r--arch/powerpc/kvm/44x_emulate.c8
-rw-r--r--arch/powerpc/kvm/44x_tlb.c2
-rw-r--r--arch/powerpc/kvm/Kconfig28
-rw-r--r--arch/powerpc/kvm/Makefile29
-rw-r--r--arch/powerpc/kvm/book3s.c257
-rw-r--r--arch/powerpc/kvm/book3s.h34
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c73
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c16
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c181
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c106
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c18
-rw-r--r--arch/powerpc/kvm/book3s_exports.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c389
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S618
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S32
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c66
-rw-r--r--arch/powerpc/kvm/book3s_pr.c498
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c52
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S32
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/kvm/book3s_xics.c7
-rw-r--r--arch/powerpc/kvm/booke.c337
-rw-r--r--arch/powerpc/kvm/booke.h29
-rw-r--r--arch/powerpc/kvm/e500.c59
-rw-r--r--arch/powerpc/kvm/e500.h2
-rw-r--r--arch/powerpc/kvm/e500_emulate.c34
-rw-r--r--arch/powerpc/kvm/e500_mmu.c4
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c6
-rw-r--r--arch/powerpc/kvm/e500mc.c58
-rw-r--r--arch/powerpc/kvm/emulate.c12
-rw-r--r--arch/powerpc/kvm/powerpc.c171
-rw-r--r--arch/powerpc/kvm/trace.h429
-rw-r--r--arch/powerpc/kvm/trace_booke.h177
-rw-r--r--arch/powerpc/kvm/trace_pr.h297
-rw-r--r--arch/s390/kvm/kvm-s390.c5
-rw-r--r--arch/x86/kvm/x86.c5
69 files changed, 3347 insertions, 1729 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index cc5adb9349ef..e312e4a53f8d 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -152,12 +152,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
152 return VM_FAULT_SIGBUS; 152 return VM_FAULT_SIGBUS;
153} 153}
154 154
155void kvm_arch_free_memslot(struct kvm_memory_slot *free, 155void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
156 struct kvm_memory_slot *dont) 156 struct kvm_memory_slot *dont)
157{ 157{
158} 158}
159 159
160int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 160int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
161 unsigned long npages)
161{ 162{
162 return 0; 163 return 0;
163} 164}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index bdfd8789b376..985bf80c622e 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1550,12 +1550,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1550 return VM_FAULT_SIGBUS; 1550 return VM_FAULT_SIGBUS;
1551} 1551}
1552 1552
1553void kvm_arch_free_memslot(struct kvm_memory_slot *free, 1553void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1554 struct kvm_memory_slot *dont) 1554 struct kvm_memory_slot *dont)
1555{ 1555{
1556} 1556}
1557 1557
1558int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 1558int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1559 unsigned long npages)
1559{ 1560{
1560 return 0; 1561 return 0;
1561} 1562}
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index a7b044536de4..73b34827826c 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -198,12 +198,13 @@ kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
198 return -ENOIOCTLCMD; 198 return -ENOIOCTLCMD;
199} 199}
200 200
201void kvm_arch_free_memslot(struct kvm_memory_slot *free, 201void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
202 struct kvm_memory_slot *dont) 202 struct kvm_memory_slot *dont)
203{ 203{
204} 204}
205 205
206int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 206int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
207 unsigned long npages)
207{ 208{
208 return 0; 209 return 0;
209} 210}
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 9b198d1b3b2b..856f8deb557a 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst)
77 return inst & 0xffff; 77 return inst & 0xffff;
78} 78}
79 79
80static inline unsigned int get_oc(u32 inst)
81{
82 return (inst >> 11) & 0x7fff;
83}
80#endif /* __ASM_PPC_DISASSEMBLE_H__ */ 84#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index cca12f084842..894662a5d4d5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
198 cmpwi r10,0; \ 198 cmpwi r10,0; \
199 bne do_kvm_##n 199 bne do_kvm_##n
200 200
201#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
202/*
203 * If hv is possible, interrupts come into to the hv version
204 * of the kvmppc_interrupt code, which then jumps to the PR handler,
205 * kvmppc_interrupt_pr, if the guest is a PR guest.
206 */
207#define kvmppc_interrupt kvmppc_interrupt_hv
208#else
209#define kvmppc_interrupt kvmppc_interrupt_pr
210#endif
211
201#define __KVM_HANDLER(area, h, n) \ 212#define __KVM_HANDLER(area, h, n) \
202do_kvm_##n: \ 213do_kvm_##n: \
203 BEGIN_FTR_SECTION_NESTED(947) \ 214 BEGIN_FTR_SECTION_NESTED(947) \
204 ld r10,area+EX_CFAR(r13); \ 215 ld r10,area+EX_CFAR(r13); \
205 std r10,HSTATE_CFAR(r13); \ 216 std r10,HSTATE_CFAR(r13); \
206 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ 217 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
218 BEGIN_FTR_SECTION_NESTED(948) \
219 ld r10,area+EX_PPR(r13); \
220 std r10,HSTATE_PPR(r13); \
221 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
207 ld r10,area+EX_R10(r13); \ 222 ld r10,area+EX_R10(r13); \
208 stw r9,HSTATE_SCRATCH1(r13); \ 223 stw r9,HSTATE_SCRATCH1(r13); \
209 ld r9,area+EX_R9(r13); \ 224 ld r9,area+EX_R9(r13); \
@@ -217,6 +232,10 @@ do_kvm_##n: \
217 ld r10,area+EX_R10(r13); \ 232 ld r10,area+EX_R10(r13); \
218 beq 89f; \ 233 beq 89f; \
219 stw r9,HSTATE_SCRATCH1(r13); \ 234 stw r9,HSTATE_SCRATCH1(r13); \
235 BEGIN_FTR_SECTION_NESTED(948) \
236 ld r9,area+EX_PPR(r13); \
237 std r9,HSTATE_PPR(r13); \
238 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
220 ld r9,area+EX_R9(r13); \ 239 ld r9,area+EX_R9(r13); \
221 std r12,HSTATE_SCRATCH0(r13); \ 240 std r12,HSTATE_SCRATCH0(r13); \
222 li r12,n; \ 241 li r12,n; \
@@ -236,7 +255,7 @@ do_kvm_##n: \
236#define KVM_HANDLER_SKIP(area, h, n) 255#define KVM_HANDLER_SKIP(area, h, n)
237#endif 256#endif
238 257
239#ifdef CONFIG_KVM_BOOK3S_PR 258#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
240#define KVMTEST_PR(n) __KVMTEST(n) 259#define KVMTEST_PR(n) __KVMTEST(n)
241#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) 260#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
242#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 261#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 851bac7afa4b..1bd92fd43cfb 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -123,6 +123,8 @@
123#define BOOK3S_HFLAG_SLB 0x2 123#define BOOK3S_HFLAG_SLB 0x2
124#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 124#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
125#define BOOK3S_HFLAG_NATIVE_PS 0x8 125#define BOOK3S_HFLAG_NATIVE_PS 0x8
126#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
127#define BOOK3S_HFLAG_NEW_TLBIE 0x20
126 128
127#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 129#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
128#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 130#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
@@ -136,6 +138,8 @@
136#define KVM_GUEST_MODE_NONE 0 138#define KVM_GUEST_MODE_NONE 0
137#define KVM_GUEST_MODE_GUEST 1 139#define KVM_GUEST_MODE_GUEST 1
138#define KVM_GUEST_MODE_SKIP 2 140#define KVM_GUEST_MODE_SKIP 2
141#define KVM_GUEST_MODE_GUEST_HV 3
142#define KVM_GUEST_MODE_HOST_HV 4
139 143
140#define KVM_INST_FETCH_FAILED -1 144#define KVM_INST_FETCH_FAILED -1
141 145
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fa19e2f1a874..4a594b76674d 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -58,16 +58,18 @@ struct hpte_cache {
58 struct hlist_node list_pte_long; 58 struct hlist_node list_pte_long;
59 struct hlist_node list_vpte; 59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long; 60 struct hlist_node list_vpte_long;
61#ifdef CONFIG_PPC_BOOK3S_64
62 struct hlist_node list_vpte_64k;
63#endif
61 struct rcu_head rcu_head; 64 struct rcu_head rcu_head;
62 u64 host_vpn; 65 u64 host_vpn;
63 u64 pfn; 66 u64 pfn;
64 ulong slot; 67 ulong slot;
65 struct kvmppc_pte pte; 68 struct kvmppc_pte pte;
69 int pagesize;
66}; 70};
67 71
68struct kvmppc_vcpu_book3s { 72struct kvmppc_vcpu_book3s {
69 struct kvm_vcpu vcpu;
70 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
71 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 73 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
72 struct { 74 struct {
73 u64 esid; 75 u64 esid;
@@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s {
99 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 101 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
100 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 102 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
101 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 103 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
104#ifdef CONFIG_PPC_BOOK3S_64
105 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
106#endif
102 int hpte_cache_count; 107 int hpte_cache_count;
103 spinlock_t mmu_lock; 108 spinlock_t mmu_lock;
104}; 109};
@@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s {
107#define CONTEXT_GUEST 1 112#define CONTEXT_GUEST 1
108#define CONTEXT_GUEST_END 2 113#define CONTEXT_GUEST_END 2
109 114
110#define VSID_REAL 0x0fffffffffc00000ULL 115#define VSID_REAL 0x07ffffffffc00000ULL
111#define VSID_BAT 0x0fffffffffb00000ULL 116#define VSID_BAT 0x07ffffffffb00000ULL
117#define VSID_64K 0x0800000000000000ULL
112#define VSID_1T 0x1000000000000000ULL 118#define VSID_1T 0x1000000000000000ULL
113#define VSID_REAL_DR 0x2000000000000000ULL 119#define VSID_REAL_DR 0x2000000000000000ULL
114#define VSID_REAL_IR 0x4000000000000000ULL 120#define VSID_REAL_IR 0x4000000000000000ULL
@@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
118extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 124extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
119extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 125extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
120extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 126extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
121extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
122extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 127extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
123extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 128extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
124extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 129extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
125extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 130extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
131 bool iswrite);
132extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
126extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 133extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
127extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 134extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
128extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 135extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
134 141
135extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 142extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
136extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 143extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
144extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
137extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); 145extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
138extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); 146extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
139extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 147extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
151 bool upper, u32 val); 159 bool upper, u32 val);
152extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 160extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
153extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 161extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
154extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 162extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
163 bool *writable);
155extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 164extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
156 unsigned long *rmap, long pte_index, int realmode); 165 unsigned long *rmap, long pte_index, int realmode);
157extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 166extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
@@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
172 unsigned long *hpret); 181 unsigned long *hpret);
173extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, 182extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
174 struct kvm_memory_slot *memslot, unsigned long *map); 183 struct kvm_memory_slot *memslot, unsigned long *map);
184extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
185 unsigned long mask);
175 186
176extern void kvmppc_entry_trampoline(void); 187extern void kvmppc_entry_trampoline(void);
177extern void kvmppc_hv_entry_trampoline(void); 188extern void kvmppc_hv_entry_trampoline(void);
@@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
184 195
185static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 196static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
186{ 197{
187 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); 198 return vcpu->arch.book3s;
188} 199}
189 200
190extern void kvm_return_point(void);
191
192/* Also add subarch specific defines */ 201/* Also add subarch specific defines */
193 202
194#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 203#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -198,203 +207,6 @@ extern void kvm_return_point(void);
198#include <asm/kvm_book3s_64.h> 207#include <asm/kvm_book3s_64.h>
199#endif 208#endif
200 209
201#ifdef CONFIG_KVM_BOOK3S_PR
202
203static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
204{
205 return to_book3s(vcpu)->hior;
206}
207
208static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
209 unsigned long pending_now, unsigned long old_pending)
210{
211 if (pending_now)
212 vcpu->arch.shared->int_pending = 1;
213 else if (old_pending)
214 vcpu->arch.shared->int_pending = 0;
215}
216
217static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
218{
219 if ( num < 14 ) {
220 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
221 svcpu->gpr[num] = val;
222 svcpu_put(svcpu);
223 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
224 } else
225 vcpu->arch.gpr[num] = val;
226}
227
228static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
229{
230 if ( num < 14 ) {
231 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
232 ulong r = svcpu->gpr[num];
233 svcpu_put(svcpu);
234 return r;
235 } else
236 return vcpu->arch.gpr[num];
237}
238
239static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
240{
241 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
242 svcpu->cr = val;
243 svcpu_put(svcpu);
244 to_book3s(vcpu)->shadow_vcpu->cr = val;
245}
246
247static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
248{
249 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
250 u32 r;
251 r = svcpu->cr;
252 svcpu_put(svcpu);
253 return r;
254}
255
256static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
257{
258 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
259 svcpu->xer = val;
260 to_book3s(vcpu)->shadow_vcpu->xer = val;
261 svcpu_put(svcpu);
262}
263
264static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
265{
266 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
267 u32 r;
268 r = svcpu->xer;
269 svcpu_put(svcpu);
270 return r;
271}
272
273static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
274{
275 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
276 svcpu->ctr = val;
277 svcpu_put(svcpu);
278}
279
280static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
281{
282 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
283 ulong r;
284 r = svcpu->ctr;
285 svcpu_put(svcpu);
286 return r;
287}
288
289static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
290{
291 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
292 svcpu->lr = val;
293 svcpu_put(svcpu);
294}
295
296static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
297{
298 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
299 ulong r;
300 r = svcpu->lr;
301 svcpu_put(svcpu);
302 return r;
303}
304
305static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
306{
307 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
308 svcpu->pc = val;
309 svcpu_put(svcpu);
310}
311
312static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
313{
314 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
315 ulong r;
316 r = svcpu->pc;
317 svcpu_put(svcpu);
318 return r;
319}
320
321static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
322{
323 ulong pc = kvmppc_get_pc(vcpu);
324 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
325 u32 r;
326
327 /* Load the instruction manually if it failed to do so in the
328 * exit path */
329 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
330 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
331
332 r = svcpu->last_inst;
333 svcpu_put(svcpu);
334 return r;
335}
336
337/*
338 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
339 * Because the sc instruction sets SRR0 to point to the following
340 * instruction, we have to fetch from pc - 4.
341 */
342static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
343{
344 ulong pc = kvmppc_get_pc(vcpu) - 4;
345 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
346 u32 r;
347
348 /* Load the instruction manually if it failed to do so in the
349 * exit path */
350 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
351 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
352
353 r = svcpu->last_inst;
354 svcpu_put(svcpu);
355 return r;
356}
357
358static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
359{
360 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
361 ulong r;
362 r = svcpu->fault_dar;
363 svcpu_put(svcpu);
364 return r;
365}
366
367static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
368{
369 ulong crit_raw = vcpu->arch.shared->critical;
370 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
371 bool crit;
372
373 /* Truncate crit indicators in 32 bit mode */
374 if (!(vcpu->arch.shared->msr & MSR_SF)) {
375 crit_raw &= 0xffffffff;
376 crit_r1 &= 0xffffffff;
377 }
378
379 /* Critical section when crit == r1 */
380 crit = (crit_raw == crit_r1);
381 /* ... and we're in supervisor mode */
382 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
383
384 return crit;
385}
386#else /* CONFIG_KVM_BOOK3S_PR */
387
388static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
389{
390 return 0;
391}
392
393static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
394 unsigned long pending_now, unsigned long old_pending)
395{
396}
397
398static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 210static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
399{ 211{
400 vcpu->arch.gpr[num] = val; 212 vcpu->arch.gpr[num] = val;
@@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
489 return vcpu->arch.fault_dar; 301 return vcpu->arch.fault_dar;
490} 302}
491 303
492static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
493{
494 return false;
495}
496#endif
497
498/* Magic register values loaded into r3 and r4 before the 'sc' assembly 304/* Magic register values loaded into r3 and r4 before the 'sc' assembly
499 * instruction for the OSI hypercalls */ 305 * instruction for the OSI hypercalls */
500#define OSI_SC_MAGIC_R3 0x113724FA 306#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index ce0ef6ce8f86..c720e0b3238d 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -22,7 +22,7 @@
22 22
23static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 23static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
24{ 24{
25 return to_book3s(vcpu)->shadow_vcpu; 25 return vcpu->arch.shadow_vcpu;
26} 26}
27 27
28static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) 28static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 86d638a3b359..bf0fa8b0a883 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,7 +20,7 @@
20#ifndef __ASM_KVM_BOOK3S_64_H__ 20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__ 21#define __ASM_KVM_BOOK3S_64_H__
22 22
23#ifdef CONFIG_KVM_BOOK3S_PR 23#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25{ 25{
26 preempt_disable(); 26 preempt_disable();
@@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
35 35
36#define SPAPR_TCE_SHIFT 12 36#define SPAPR_TCE_SHIFT 12
37 37
38#ifdef CONFIG_KVM_BOOK3S_64_HV 38#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40extern unsigned long kvm_rma_pages; 40extern unsigned long kvm_rma_pages;
41#endif 41#endif
@@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v)
278 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); 278 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
279} 279}
280 280
281#ifdef CONFIG_KVM_BOOK3S_64_HV 281#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
282/* 282/*
283 * Note modification of an HPTE; set the HPTE modified bit 283 * Note modification of an HPTE; set the HPTE modified bit
284 * if anyone is interested. 284 * if anyone is interested.
@@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm,
289 if (atomic_read(&kvm->arch.hpte_mod_interest)) 289 if (atomic_read(&kvm->arch.hpte_mod_interest))
290 rev->guest_rpte |= HPTE_GR_MODIFIED; 290 rev->guest_rpte |= HPTE_GR_MODIFIED;
291} 291}
292#endif /* CONFIG_KVM_BOOK3S_64_HV */ 292#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
293 293
294#endif /* __ASM_KVM_BOOK3S_64_H__ */ 294#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 9039d3c97eec..0bd9348a4db9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -83,7 +83,7 @@ struct kvmppc_host_state {
83 u8 restore_hid5; 83 u8 restore_hid5;
84 u8 napping; 84 u8 napping;
85 85
86#ifdef CONFIG_KVM_BOOK3S_64_HV 86#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
87 u8 hwthread_req; 87 u8 hwthread_req;
88 u8 hwthread_state; 88 u8 hwthread_state;
89 u8 host_ipi; 89 u8 host_ipi;
@@ -101,6 +101,7 @@ struct kvmppc_host_state {
101#endif 101#endif
102#ifdef CONFIG_PPC_BOOK3S_64 102#ifdef CONFIG_PPC_BOOK3S_64
103 u64 cfar; 103 u64 cfar;
104 u64 ppr;
104#endif 105#endif
105}; 106};
106 107
@@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu {
108 ulong gpr[14]; 109 ulong gpr[14];
109 u32 cr; 110 u32 cr;
110 u32 xer; 111 u32 xer;
111
112 u32 fault_dsisr;
113 u32 last_inst;
114 ulong ctr; 112 ulong ctr;
115 ulong lr; 113 ulong lr;
116 ulong pc; 114 ulong pc;
115
117 ulong shadow_srr1; 116 ulong shadow_srr1;
118 ulong fault_dar; 117 ulong fault_dar;
118 u32 fault_dsisr;
119 u32 last_inst;
119 120
120#ifdef CONFIG_PPC_BOOK3S_32 121#ifdef CONFIG_PPC_BOOK3S_32
121 u32 sr[16]; /* Guest SRs */ 122 u32 sr[16]; /* Guest SRs */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d3c1eb34c986..dd8f61510dfd 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -26,7 +26,12 @@
26/* LPIDs we support with this build -- runtime limit may be lower */ 26/* LPIDs we support with this build -- runtime limit may be lower */
27#define KVMPPC_NR_LPIDS 64 27#define KVMPPC_NR_LPIDS 64
28 28
29#define KVMPPC_INST_EHPRIV 0x7c00021c 29#define KVMPPC_INST_EHPRIV 0x7c00021c
30#define EHPRIV_OC_SHIFT 11
31/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
32#define EHPRIV_OC_DEBUG 1
33#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
34 (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
30 35
31static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 36static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
32{ 37{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 0866230b7c2d..237d1d25b448 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -68,10 +68,12 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
68#define HPTEG_HASH_BITS_PTE_LONG 12 68#define HPTEG_HASH_BITS_PTE_LONG 12
69#define HPTEG_HASH_BITS_VPTE 13 69#define HPTEG_HASH_BITS_VPTE 13
70#define HPTEG_HASH_BITS_VPTE_LONG 5 70#define HPTEG_HASH_BITS_VPTE_LONG 5
71#define HPTEG_HASH_BITS_VPTE_64K 11
71#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) 72#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
72#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) 73#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
73#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) 74#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
74#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) 75#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
76#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
75 77
76/* Physical Address Mask - allowed range of real mode RAM access */ 78/* Physical Address Mask - allowed range of real mode RAM access */
77#define KVM_PAM 0x0fffffffffffffffULL 79#define KVM_PAM 0x0fffffffffffffffULL
@@ -84,6 +86,9 @@ struct lppaca;
84struct slb_shadow; 86struct slb_shadow;
85struct dtl_entry; 87struct dtl_entry;
86 88
89struct kvmppc_vcpu_book3s;
90struct kvmppc_book3s_shadow_vcpu;
91
87struct kvm_vm_stat { 92struct kvm_vm_stat {
88 u32 remote_tlb_flush; 93 u32 remote_tlb_flush;
89}; 94};
@@ -219,15 +224,15 @@ struct revmap_entry {
219#define KVMPPC_GOT_PAGE 0x80 224#define KVMPPC_GOT_PAGE 0x80
220 225
221struct kvm_arch_memory_slot { 226struct kvm_arch_memory_slot {
222#ifdef CONFIG_KVM_BOOK3S_64_HV 227#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
223 unsigned long *rmap; 228 unsigned long *rmap;
224 unsigned long *slot_phys; 229 unsigned long *slot_phys;
225#endif /* CONFIG_KVM_BOOK3S_64_HV */ 230#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
226}; 231};
227 232
228struct kvm_arch { 233struct kvm_arch {
229 unsigned int lpid; 234 unsigned int lpid;
230#ifdef CONFIG_KVM_BOOK3S_64_HV 235#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
231 unsigned long hpt_virt; 236 unsigned long hpt_virt;
232 struct revmap_entry *revmap; 237 struct revmap_entry *revmap;
233 unsigned int host_lpid; 238 unsigned int host_lpid;
@@ -251,7 +256,10 @@ struct kvm_arch {
251 cpumask_t need_tlb_flush; 256 cpumask_t need_tlb_flush;
252 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
253 int hpt_cma_alloc; 258 int hpt_cma_alloc;
254#endif /* CONFIG_KVM_BOOK3S_64_HV */ 259#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
260#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
261 struct mutex hpt_mutex;
262#endif
255#ifdef CONFIG_PPC_BOOK3S_64 263#ifdef CONFIG_PPC_BOOK3S_64
256 struct list_head spapr_tce_tables; 264 struct list_head spapr_tce_tables;
257 struct list_head rtas_tokens; 265 struct list_head rtas_tokens;
@@ -262,6 +270,7 @@ struct kvm_arch {
262#ifdef CONFIG_KVM_XICS 270#ifdef CONFIG_KVM_XICS
263 struct kvmppc_xics *xics; 271 struct kvmppc_xics *xics;
264#endif 272#endif
273 struct kvmppc_ops *kvm_ops;
265}; 274};
266 275
267/* 276/*
@@ -289,6 +298,10 @@ struct kvmppc_vcore {
289 u64 stolen_tb; 298 u64 stolen_tb;
290 u64 preempt_tb; 299 u64 preempt_tb;
291 struct kvm_vcpu *runner; 300 struct kvm_vcpu *runner;
301 u64 tb_offset; /* guest timebase - host timebase */
302 ulong lpcr;
303 u32 arch_compat;
304 ulong pcr;
292}; 305};
293 306
294#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 307#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -323,6 +336,7 @@ struct kvmppc_pte {
323 bool may_read : 1; 336 bool may_read : 1;
324 bool may_write : 1; 337 bool may_write : 1;
325 bool may_execute : 1; 338 bool may_execute : 1;
339 u8 page_size; /* MMU_PAGE_xxx */
326}; 340};
327 341
328struct kvmppc_mmu { 342struct kvmppc_mmu {
@@ -335,7 +349,8 @@ struct kvmppc_mmu {
335 /* book3s */ 349 /* book3s */
336 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); 350 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
337 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); 351 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
338 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); 352 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
353 struct kvmppc_pte *pte, bool data, bool iswrite);
339 void (*reset_msr)(struct kvm_vcpu *vcpu); 354 void (*reset_msr)(struct kvm_vcpu *vcpu);
340 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); 355 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
341 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); 356 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
@@ -355,6 +370,7 @@ struct kvmppc_slb {
355 bool large : 1; /* PTEs are 16MB */ 370 bool large : 1; /* PTEs are 16MB */
356 bool tb : 1; /* 1TB segment */ 371 bool tb : 1; /* 1TB segment */
357 bool class : 1; 372 bool class : 1;
373 u8 base_page_size; /* MMU_PAGE_xxx */
358}; 374};
359 375
360# ifdef CONFIG_PPC_FSL_BOOK3E 376# ifdef CONFIG_PPC_FSL_BOOK3E
@@ -372,17 +388,6 @@ struct kvmppc_slb {
372#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */ 388#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
373#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */ 389#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
374 390
375struct kvmppc_booke_debug_reg {
376 u32 dbcr0;
377 u32 dbcr1;
378 u32 dbcr2;
379#ifdef CONFIG_KVM_E500MC
380 u32 dbcr4;
381#endif
382 u64 iac[KVMPPC_BOOKE_MAX_IAC];
383 u64 dac[KVMPPC_BOOKE_MAX_DAC];
384};
385
386#define KVMPPC_IRQ_DEFAULT 0 391#define KVMPPC_IRQ_DEFAULT 0
387#define KVMPPC_IRQ_MPIC 1 392#define KVMPPC_IRQ_MPIC 1
388#define KVMPPC_IRQ_XICS 2 393#define KVMPPC_IRQ_XICS 2
@@ -397,6 +402,10 @@ struct kvm_vcpu_arch {
397 int slb_max; /* 1 + index of last valid entry in slb[] */ 402 int slb_max; /* 1 + index of last valid entry in slb[] */
398 int slb_nr; /* total number of entries in SLB */ 403 int slb_nr; /* total number of entries in SLB */
399 struct kvmppc_mmu mmu; 404 struct kvmppc_mmu mmu;
405 struct kvmppc_vcpu_book3s *book3s;
406#endif
407#ifdef CONFIG_PPC_BOOK3S_32
408 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
400#endif 409#endif
401 410
402 ulong gpr[32]; 411 ulong gpr[32];
@@ -458,6 +467,8 @@ struct kvm_vcpu_arch {
458 u32 ctrl; 467 u32 ctrl;
459 ulong dabr; 468 ulong dabr;
460 ulong cfar; 469 ulong cfar;
470 ulong ppr;
471 ulong shadow_srr1;
461#endif 472#endif
462 u32 vrsave; /* also USPRG0 */ 473 u32 vrsave; /* also USPRG0 */
463 u32 mmucr; 474 u32 mmucr;
@@ -493,6 +504,8 @@ struct kvm_vcpu_arch {
493 504
494 u64 mmcr[3]; 505 u64 mmcr[3];
495 u32 pmc[8]; 506 u32 pmc[8];
507 u64 siar;
508 u64 sdar;
496 509
497#ifdef CONFIG_KVM_EXIT_TIMING 510#ifdef CONFIG_KVM_EXIT_TIMING
498 struct mutex exit_timing_lock; 511 struct mutex exit_timing_lock;
@@ -526,7 +539,10 @@ struct kvm_vcpu_arch {
526 u32 eptcfg; 539 u32 eptcfg;
527 u32 epr; 540 u32 epr;
528 u32 crit_save; 541 u32 crit_save;
529 struct kvmppc_booke_debug_reg dbg_reg; 542 /* guest debug registers*/
543 struct debug_reg dbg_reg;
544 /* hardware visible debug registers when in guest state */
545 struct debug_reg shadow_dbg_reg;
530#endif 546#endif
531 gpa_t paddr_accessed; 547 gpa_t paddr_accessed;
532 gva_t vaddr_accessed; 548 gva_t vaddr_accessed;
@@ -577,7 +593,7 @@ struct kvm_vcpu_arch {
577 struct kvmppc_icp *icp; /* XICS presentation controller */ 593 struct kvmppc_icp *icp; /* XICS presentation controller */
578#endif 594#endif
579 595
580#ifdef CONFIG_KVM_BOOK3S_64_HV 596#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
581 struct kvm_vcpu_arch_shared shregs; 597 struct kvm_vcpu_arch_shared shregs;
582 598
583 unsigned long pgfault_addr; 599 unsigned long pgfault_addr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b15554a26c20..c8317fbf92c4 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
106 struct kvm_interrupt *irq); 106 struct kvm_interrupt *irq);
107extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); 107extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
108extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); 108extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
109
110extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
111 unsigned int op, int *advance);
112extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
113 ulong val);
114extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
115 ulong *val);
116extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); 109extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
117 110
118extern int kvmppc_booke_init(void); 111extern int kvmppc_booke_init(void);
@@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
135 struct kvm_create_spapr_tce *args); 128 struct kvm_create_spapr_tce *args);
136extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 129extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
137 unsigned long ioba, unsigned long tce); 130 unsigned long ioba, unsigned long tce);
138extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
139 struct kvm_allocate_rma *rma);
140extern struct kvm_rma_info *kvm_alloc_rma(void); 131extern struct kvm_rma_info *kvm_alloc_rma(void);
141extern void kvm_release_rma(struct kvm_rma_info *ri); 132extern void kvm_release_rma(struct kvm_rma_info *ri);
142extern struct page *kvm_alloc_hpt(unsigned long nr_pages); 133extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
143extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); 134extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
144extern int kvmppc_core_init_vm(struct kvm *kvm); 135extern int kvmppc_core_init_vm(struct kvm *kvm);
145extern void kvmppc_core_destroy_vm(struct kvm *kvm); 136extern void kvmppc_core_destroy_vm(struct kvm *kvm);
146extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 137extern void kvmppc_core_free_memslot(struct kvm *kvm,
138 struct kvm_memory_slot *free,
147 struct kvm_memory_slot *dont); 139 struct kvm_memory_slot *dont);
148extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 140extern int kvmppc_core_create_memslot(struct kvm *kvm,
141 struct kvm_memory_slot *slot,
149 unsigned long npages); 142 unsigned long npages);
150extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 143extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
151 struct kvm_memory_slot *memslot, 144 struct kvm_memory_slot *memslot,
@@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
177extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); 170extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
178extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); 171extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
179 172
173union kvmppc_one_reg {
174 u32 wval;
175 u64 dval;
176 vector128 vval;
177 u64 vsxval[2];
178 struct {
179 u64 addr;
180 u64 length;
181 } vpaval;
182};
183
184struct kvmppc_ops {
185 struct module *owner;
186 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
187 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
188 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
189 union kvmppc_one_reg *val);
190 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
191 union kvmppc_one_reg *val);
192 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
193 void (*vcpu_put)(struct kvm_vcpu *vcpu);
194 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
195 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
196 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
197 void (*vcpu_free)(struct kvm_vcpu *vcpu);
198 int (*check_requests)(struct kvm_vcpu *vcpu);
199 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
200 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
201 int (*prepare_memory_region)(struct kvm *kvm,
202 struct kvm_memory_slot *memslot,
203 struct kvm_userspace_memory_region *mem);
204 void (*commit_memory_region)(struct kvm *kvm,
205 struct kvm_userspace_memory_region *mem,
206 const struct kvm_memory_slot *old);
207 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
208 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
209 unsigned long end);
210 int (*age_hva)(struct kvm *kvm, unsigned long hva);
211 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
212 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
213 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
214 void (*free_memslot)(struct kvm_memory_slot *free,
215 struct kvm_memory_slot *dont);
216 int (*create_memslot)(struct kvm_memory_slot *slot,
217 unsigned long npages);
218 int (*init_vm)(struct kvm *kvm);
219 void (*destroy_vm)(struct kvm *kvm);
220 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
221 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
222 unsigned int inst, int *advance);
223 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
224 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
225 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
226 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
227 unsigned long arg);
228
229};
230
231extern struct kvmppc_ops *kvmppc_hv_ops;
232extern struct kvmppc_ops *kvmppc_pr_ops;
233
234static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
235{
236 return kvm->arch.kvm_ops == kvmppc_hv_ops;
237}
238
180/* 239/*
181 * Cuts out inst bits with ordering according to spec. 240 * Cuts out inst bits with ordering according to spec.
182 * That means the leftmost bit is zero. All given bits are included. 241 * That means the leftmost bit is zero. All given bits are included.
@@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
210 return r; 269 return r;
211} 270}
212 271
213union kvmppc_one_reg {
214 u32 wval;
215 u64 dval;
216 vector128 vval;
217 u64 vsxval[2];
218 struct {
219 u64 addr;
220 u64 length;
221 } vpaval;
222};
223
224#define one_reg_size(id) \ 272#define one_reg_size(id) \
225 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) 273 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
226 274
@@ -245,10 +293,10 @@ union kvmppc_one_reg {
245 __v; \ 293 __v; \
246}) 294})
247 295
248void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 296int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
249int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 297int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
250 298
251void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 299int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
252int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 300int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
253 301
254int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); 302int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
@@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
260 308
261struct openpic; 309struct openpic;
262 310
263#ifdef CONFIG_KVM_BOOK3S_64_HV 311#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
264extern void kvm_cma_reserve(void) __init; 312extern void kvm_cma_reserve(void) __init;
265static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 313static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
266{ 314{
@@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
269 317
270static inline u32 kvmppc_get_xics_latch(void) 318static inline u32 kvmppc_get_xics_latch(void)
271{ 319{
272 u32 xirr = get_paca()->kvm_hstate.saved_xirr; 320 u32 xirr;
273 321
322 xirr = get_paca()->kvm_hstate.saved_xirr;
274 get_paca()->kvm_hstate.saved_xirr = 0; 323 get_paca()->kvm_hstate.saved_xirr = 0;
275
276 return xirr; 324 return xirr;
277} 325}
278 326
@@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
281 paca[cpu].kvm_hstate.host_ipi = host_ipi; 329 paca[cpu].kvm_hstate.host_ipi = host_ipi;
282} 330}
283 331
284extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); 332static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
333{
334 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
335}
285 336
286#else 337#else
287static inline void __init kvm_cma_reserve(void) 338static inline void __init kvm_cma_reserve(void)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a5954cebbc55..b6ea9e068c13 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -166,7 +166,7 @@ struct paca_struct {
166 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ 166 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
167 167
168#ifdef CONFIG_KVM_BOOK3S_HANDLER 168#ifdef CONFIG_KVM_BOOK3S_HANDLER
169#ifdef CONFIG_KVM_BOOK3S_PR 169#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
170 /* We use this to store guest state in */ 170 /* We use this to store guest state in */
171 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 171 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
172#endif 172#endif
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index ce4de5aed7b5..75a9e5a34ef9 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -147,21 +147,7 @@ typedef struct {
147#define TS_FPR(i) fpr[i][TS_FPROFFSET] 147#define TS_FPR(i) fpr[i][TS_FPROFFSET]
148#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET] 148#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
149 149
150struct thread_struct { 150struct debug_reg {
151 unsigned long ksp; /* Kernel stack pointer */
152#ifdef CONFIG_PPC64
153 unsigned long ksp_vsid;
154#endif
155 struct pt_regs *regs; /* Pointer to saved register state */
156 mm_segment_t fs; /* for get_fs() validation */
157#ifdef CONFIG_BOOKE
158 /* BookE base exception scratch space; align on cacheline */
159 unsigned long normsave[8] ____cacheline_aligned;
160#endif
161#ifdef CONFIG_PPC32
162 void *pgdir; /* root of page-table tree */
163 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
164#endif
165#ifdef CONFIG_PPC_ADV_DEBUG_REGS 151#ifdef CONFIG_PPC_ADV_DEBUG_REGS
166 /* 152 /*
167 * The following help to manage the use of Debug Control Registers 153 * The following help to manage the use of Debug Control Registers
@@ -198,6 +184,27 @@ struct thread_struct {
198 unsigned long dvc2; 184 unsigned long dvc2;
199#endif 185#endif
200#endif 186#endif
187};
188
189struct thread_struct {
190 unsigned long ksp; /* Kernel stack pointer */
191
192#ifdef CONFIG_PPC64
193 unsigned long ksp_vsid;
194#endif
195 struct pt_regs *regs; /* Pointer to saved register state */
196 mm_segment_t fs; /* for get_fs() validation */
197#ifdef CONFIG_BOOKE
198 /* BookE base exception scratch space; align on cacheline */
199 unsigned long normsave[8] ____cacheline_aligned;
200#endif
201#ifdef CONFIG_PPC32
202 void *pgdir; /* root of page-table tree */
203 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
204#endif
205 /* Debug Registers */
206 struct debug_reg debug;
207
201 /* FP and VSX 0-31 register set */ 208 /* FP and VSX 0-31 register set */
202 double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); 209 double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
203 struct { 210 struct {
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
index 0156702ba24e..576ad88104cb 100644
--- a/arch/powerpc/include/asm/pte-book3e.h
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -40,7 +40,7 @@
40#define _PAGE_U1 0x010000 40#define _PAGE_U1 0x010000
41#define _PAGE_U0 0x020000 41#define _PAGE_U0 0x020000
42#define _PAGE_ACCESSED 0x040000 42#define _PAGE_ACCESSED 0x040000
43#define _PAGE_LENDIAN 0x080000 43#define _PAGE_ENDIAN 0x080000
44#define _PAGE_GUARDED 0x100000 44#define _PAGE_GUARDED 0x100000
45#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ 45#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
46#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ 46#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 10d1ef016bf1..e294673e9d4b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -243,6 +243,7 @@
243#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 243#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
244#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 244#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
245#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ 245#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
246#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
246#define SPRN_SPURR 0x134 /* Scaled PURR */ 247#define SPRN_SPURR 0x134 /* Scaled PURR */
247#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ 248#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
248#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ 249#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
@@ -283,6 +284,7 @@
283#define LPCR_ISL (1ul << (63-2)) 284#define LPCR_ISL (1ul << (63-2))
284#define LPCR_VC_SH (63-2) 285#define LPCR_VC_SH (63-2)
285#define LPCR_DPFD_SH (63-11) 286#define LPCR_DPFD_SH (63-11)
287#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
286#define LPCR_VRMASD (0x1ful << (63-16)) 288#define LPCR_VRMASD (0x1ful << (63-16))
287#define LPCR_VRMA_L (1ul << (63-12)) 289#define LPCR_VRMA_L (1ul << (63-12))
288#define LPCR_VRMA_LP0 (1ul << (63-15)) 290#define LPCR_VRMA_LP0 (1ul << (63-15))
@@ -299,6 +301,7 @@
299#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ 301#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
300#define LPCR_MER 0x00000800 /* Mediated External Exception */ 302#define LPCR_MER 0x00000800 /* Mediated External Exception */
301#define LPCR_MER_SH 11 303#define LPCR_MER_SH 11
304#define LPCR_TC 0x00000200 /* Translation control */
302#define LPCR_LPES 0x0000000c 305#define LPCR_LPES 0x0000000c
303#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ 306#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
304#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ 307#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
@@ -311,6 +314,10 @@
311#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ 314#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
312#define SPRN_HMER 0x150 /* Hardware m? error recovery */ 315#define SPRN_HMER 0x150 /* Hardware m? error recovery */
313#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ 316#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
317#define SPRN_PCR 0x152 /* Processor compatibility register */
318#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
319#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
320#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
314#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ 321#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
315#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ 322#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
316#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ 323#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
@@ -420,6 +427,7 @@
420#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ 427#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
421#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ 428#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
422#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ 429#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
430#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
423#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ 431#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
424#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ 432#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
425#define HID4_LPID1_SH 0 /* partition ID top 2 bits */ 433#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
@@ -1102,6 +1110,13 @@
1102#define PVR_BE 0x0070 1110#define PVR_BE 0x0070
1103#define PVR_PA6T 0x0090 1111#define PVR_PA6T 0x0090
1104 1112
1113/* "Logical" PVR values defined in PAPR, representing architecture levels */
1114#define PVR_ARCH_204 0x0f000001
1115#define PVR_ARCH_205 0x0f000002
1116#define PVR_ARCH_206 0x0f000003
1117#define PVR_ARCH_206p 0x0f100003
1118#define PVR_ARCH_207 0x0f000004
1119
1105/* Macros for setting and retrieving special purpose registers */ 1120/* Macros for setting and retrieving special purpose registers */
1106#ifndef __ASSEMBLY__ 1121#ifndef __ASSEMBLY__
1107#define mfmsr() ({unsigned long rval; \ 1122#define mfmsr() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index ed8f836da094..2e31aacd8acc 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -381,7 +381,7 @@
381#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ 381#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */
382#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ 382#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
383 383
384#define dbcr_iac_range(task) ((task)->thread.dbcr0) 384#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0)
385#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */ 385#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */
386#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */ 386#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */
387#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */ 387#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */
@@ -395,7 +395,7 @@
395#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */ 395#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */
396#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */ 396#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */
397 397
398#define dbcr_dac(task) ((task)->thread.dbcr1) 398#define dbcr_dac(task) ((task)->thread.debug.dbcr1)
399#define DBCR_DAC1R DBCR1_DAC1R 399#define DBCR_DAC1R DBCR1_DAC1R
400#define DBCR_DAC1W DBCR1_DAC1W 400#define DBCR_DAC1W DBCR1_DAC1W
401#define DBCR_DAC2R DBCR1_DAC2R 401#define DBCR_DAC2R DBCR1_DAC2R
@@ -441,7 +441,7 @@
441#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */ 441#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */
442#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ 442#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
443 443
444#define dbcr_dac(task) ((task)->thread.dbcr0) 444#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
445#define DBCR_DAC1R DBCR0_DAC1R 445#define DBCR_DAC1R DBCR0_DAC1R
446#define DBCR_DAC1W DBCR0_DAC1W 446#define DBCR_DAC1W DBCR0_DAC1W
447#define DBCR_DAC2R DBCR0_DAC2R 447#define DBCR_DAC2R DBCR0_DAC2R
@@ -475,7 +475,7 @@
475#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */ 475#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */
476#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */ 476#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */
477 477
478#define dbcr_iac_range(task) ((task)->thread.dbcr1) 478#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
479#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */ 479#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */
480#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */ 480#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */
481#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */ 481#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 2be5618cdec6..9ee12610af02 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,6 +35,7 @@ extern void giveup_vsx(struct task_struct *);
35extern void enable_kernel_spe(void); 35extern void enable_kernel_spe(void);
36extern void giveup_spe(struct task_struct *); 36extern void giveup_spe(struct task_struct *);
37extern void load_up_spe(struct task_struct *); 37extern void load_up_spe(struct task_struct *);
38extern void switch_booke_debug_regs(struct thread_struct *new_thread);
38 39
39#ifndef CONFIG_SMP 40#ifndef CONFIG_SMP
40extern void discard_lazy_cpu_state(void); 41extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e9ff90..6836ec79a830 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
27#define __KVM_HAVE_PPC_SMT 27#define __KVM_HAVE_PPC_SMT
28#define __KVM_HAVE_IRQCHIP 28#define __KVM_HAVE_IRQCHIP
29#define __KVM_HAVE_IRQ_LINE 29#define __KVM_HAVE_IRQ_LINE
30#define __KVM_HAVE_GUEST_DEBUG
30 31
31struct kvm_regs { 32struct kvm_regs {
32 __u64 pc; 33 __u64 pc;
@@ -269,7 +270,24 @@ struct kvm_fpu {
269 __u64 fpr[32]; 270 __u64 fpr[32];
270}; 271};
271 272
273/*
274 * Defines for h/w breakpoint, watchpoint (read, write or both) and
275 * software breakpoint.
276 * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
277 * for KVM_DEBUG_EXIT.
278 */
279#define KVMPPC_DEBUG_NONE 0x0
280#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
281#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
282#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
272struct kvm_debug_exit_arch { 283struct kvm_debug_exit_arch {
284 __u64 address;
285 /*
286 * exiting to userspace because of h/w breakpoint, watchpoint
287 * (read, write or both) and software breakpoint.
288 */
289 __u32 status;
290 __u32 reserved;
273}; 291};
274 292
275/* for KVM_SET_GUEST_DEBUG */ 293/* for KVM_SET_GUEST_DEBUG */
@@ -281,10 +299,6 @@ struct kvm_guest_debug_arch {
281 * Type denotes h/w breakpoint, read watchpoint, write 299 * Type denotes h/w breakpoint, read watchpoint, write
282 * watchpoint or watchpoint (both read and write). 300 * watchpoint or watchpoint (both read and write).
283 */ 301 */
284#define KVMPPC_DEBUG_NONE 0x0
285#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
286#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
287#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
288 __u32 type; 302 __u32 type;
289 __u32 reserved; 303 __u32 reserved;
290 } bp[16]; 304 } bp[16];
@@ -429,6 +443,11 @@ struct kvm_get_htab_header {
429#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) 443#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
430#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) 444#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
431#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) 445#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
446#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
447#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
448#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
449#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
450#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
432 451
433#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) 452#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
434#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) 453#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
@@ -499,6 +518,65 @@ struct kvm_get_htab_header {
499#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a) 518#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
500#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b) 519#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
501 520
521/* Timebase offset */
522#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
523
524/* POWER8 registers */
525#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
526#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
527#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
528#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
529#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
530#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
531#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
532#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
533#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
534#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
535#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
536#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
537#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
538#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
539#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
540#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
541#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
542#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
543#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
544#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
545#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
546#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
547#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
548
549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
551#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
552
553/* Architecture compatibility level */
554#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
555
556/* Transactional Memory checkpointed state:
557 * This is all GPRs, all VSX regs and a subset of SPRs
558 */
559#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000)
560/* TM GPRs */
561#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
562#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n))
563#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
564/* TM VSX */
565#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
566#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n))
567#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
568/* TM SPRS */
569#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
570#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
571#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
572#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
573#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
574#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
575#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
576#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
577#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
578#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
579
502/* PPC64 eXternal Interrupt Controller Specification */ 580/* PPC64 eXternal Interrupt Controller Specification */
503#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ 581#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
504 582
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 502c7a4e73f7..479b036d36d7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -114,7 +114,7 @@ int main(void)
114#endif /* CONFIG_SPE */ 114#endif /* CONFIG_SPE */
115#endif /* CONFIG_PPC64 */ 115#endif /* CONFIG_PPC64 */
116#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 116#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
117 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); 117 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0));
118#endif 118#endif
119#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 119#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
120 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); 120 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
@@ -446,7 +446,7 @@ int main(void)
446 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 446 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
447 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 447 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
448 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 448 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
449#ifdef CONFIG_KVM_BOOK3S_64_HV 449#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
450 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); 450 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
451 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); 451 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
452 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); 452 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
@@ -477,7 +477,7 @@ int main(void)
477 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); 477 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
478 478
479 /* book3s */ 479 /* book3s */
480#ifdef CONFIG_KVM_BOOK3S_64_HV 480#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
481 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); 481 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
482 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); 482 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
483 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); 483 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -509,6 +509,8 @@ int main(void)
509 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); 509 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
510 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); 510 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
511 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); 511 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
512 DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
513 DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
512 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); 514 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
513 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); 515 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
514 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); 516 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
@@ -518,18 +520,22 @@ int main(void)
518 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); 520 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
519 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); 521 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
520 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); 522 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
523 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
524 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
521 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); 525 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
522 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); 526 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
523 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); 527 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
524 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); 528 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
525 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - 529 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
526 offsetof(struct kvmppc_vcpu_book3s, vcpu)); 530 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
531 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
527 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); 532 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
528 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); 533 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
529 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); 534 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
530 535
531#ifdef CONFIG_PPC_BOOK3S_64 536#ifdef CONFIG_PPC_BOOK3S_64
532#ifdef CONFIG_KVM_BOOK3S_PR 537#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
538 DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
533# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) 539# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
534#else 540#else
535# define SVCPU_FIELD(x, f) 541# define SVCPU_FIELD(x, f)
@@ -581,7 +587,7 @@ int main(void)
581 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 587 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
582 HSTATE_FIELD(HSTATE_NAPPING, napping); 588 HSTATE_FIELD(HSTATE_NAPPING, napping);
583 589
584#ifdef CONFIG_KVM_BOOK3S_64_HV 590#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
585 HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req); 591 HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
586 HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state); 592 HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
587 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); 593 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
@@ -597,10 +603,11 @@ int main(void)
597 HSTATE_FIELD(HSTATE_DABR, dabr); 603 HSTATE_FIELD(HSTATE_DABR, dabr);
598 HSTATE_FIELD(HSTATE_DECEXP, dec_expires); 604 HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
599 DEFINE(IPI_PRIORITY, IPI_PRIORITY); 605 DEFINE(IPI_PRIORITY, IPI_PRIORITY);
600#endif /* CONFIG_KVM_BOOK3S_64_HV */ 606#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
601 607
602#ifdef CONFIG_PPC_BOOK3S_64 608#ifdef CONFIG_PPC_BOOK3S_64
603 HSTATE_FIELD(HSTATE_CFAR, cfar); 609 HSTATE_FIELD(HSTATE_CFAR, cfar);
610 HSTATE_FIELD(HSTATE_PPR, ppr);
604#endif /* CONFIG_PPC_BOOK3S_64 */ 611#endif /* CONFIG_PPC_BOOK3S_64 */
605 612
606#else /* CONFIG_PPC_BOOK3S */ 613#else /* CONFIG_PPC_BOOK3S */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3a9ed6ac224b..9f905e40922e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -126,7 +126,7 @@ BEGIN_FTR_SECTION
126 bgt cr1,. 126 bgt cr1,.
127 GET_PACA(r13) 127 GET_PACA(r13)
128 128
129#ifdef CONFIG_KVM_BOOK3S_64_HV 129#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
130 li r0,KVM_HWTHREAD_IN_KERNEL 130 li r0,KVM_HWTHREAD_IN_KERNEL
131 stb r0,HSTATE_HWTHREAD_STATE(r13) 131 stb r0,HSTATE_HWTHREAD_STATE(r13)
132 /* Order setting hwthread_state vs. testing hwthread_req */ 132 /* Order setting hwthread_state vs. testing hwthread_req */
@@ -425,7 +425,7 @@ data_access_check_stab:
425 mfspr r9,SPRN_DSISR 425 mfspr r9,SPRN_DSISR
426 srdi r10,r10,60 426 srdi r10,r10,60
427 rlwimi r10,r9,16,0x20 427 rlwimi r10,r9,16,0x20
428#ifdef CONFIG_KVM_BOOK3S_PR 428#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
429 lbz r9,HSTATE_IN_GUEST(r13) 429 lbz r9,HSTATE_IN_GUEST(r13)
430 rlwimi r10,r9,8,0x300 430 rlwimi r10,r9,8,0x300
431#endif 431#endif
@@ -650,6 +650,32 @@ slb_miss_user_pseries:
650 b . /* prevent spec. execution */ 650 b . /* prevent spec. execution */
651#endif /* __DISABLED__ */ 651#endif /* __DISABLED__ */
652 652
653#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
654kvmppc_skip_interrupt:
655 /*
656 * Here all GPRs are unchanged from when the interrupt happened
657 * except for r13, which is saved in SPRG_SCRATCH0.
658 */
659 mfspr r13, SPRN_SRR0
660 addi r13, r13, 4
661 mtspr SPRN_SRR0, r13
662 GET_SCRATCH0(r13)
663 rfid
664 b .
665
666kvmppc_skip_Hinterrupt:
667 /*
668 * Here all GPRs are unchanged from when the interrupt happened
669 * except for r13, which is saved in SPRG_SCRATCH0.
670 */
671 mfspr r13, SPRN_HSRR0
672 addi r13, r13, 4
673 mtspr SPRN_HSRR0, r13
674 GET_SCRATCH0(r13)
675 hrfid
676 b .
677#endif
678
653/* 679/*
654 * Code from here down to __end_handlers is invoked from the 680 * Code from here down to __end_handlers is invoked from the
655 * exception prologs above. Because the prologs assemble the 681 * exception prologs above. Because the prologs assemble the
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index e11863f4e595..847e40e62fce 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -84,7 +84,7 @@ _GLOBAL(power7_nap)
84 std r9,_MSR(r1) 84 std r9,_MSR(r1)
85 std r1,PACAR1(r13) 85 std r1,PACAR1(r13)
86 86
87#ifdef CONFIG_KVM_BOOK3S_64_HV 87#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
88 /* Tell KVM we're napping */ 88 /* Tell KVM we're napping */
89 li r4,KVM_HWTHREAD_IN_NAP 89 li r4,KVM_HWTHREAD_IN_NAP
90 stb r4,HSTATE_HWTHREAD_STATE(r13) 90 stb r4,HSTATE_HWTHREAD_STATE(r13)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 96d2fdf3aa9e..ec5ae55ba36a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -314,28 +314,28 @@ static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
314 */ 314 */
315static void set_debug_reg_defaults(struct thread_struct *thread) 315static void set_debug_reg_defaults(struct thread_struct *thread)
316{ 316{
317 thread->iac1 = thread->iac2 = 0; 317 thread->debug.iac1 = thread->debug.iac2 = 0;
318#if CONFIG_PPC_ADV_DEBUG_IACS > 2 318#if CONFIG_PPC_ADV_DEBUG_IACS > 2
319 thread->iac3 = thread->iac4 = 0; 319 thread->debug.iac3 = thread->debug.iac4 = 0;
320#endif 320#endif
321 thread->dac1 = thread->dac2 = 0; 321 thread->debug.dac1 = thread->debug.dac2 = 0;
322#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 322#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
323 thread->dvc1 = thread->dvc2 = 0; 323 thread->debug.dvc1 = thread->debug.dvc2 = 0;
324#endif 324#endif
325 thread->dbcr0 = 0; 325 thread->debug.dbcr0 = 0;
326#ifdef CONFIG_BOOKE 326#ifdef CONFIG_BOOKE
327 /* 327 /*
328 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) 328 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
329 */ 329 */
330 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \ 330 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
331 DBCR1_IAC3US | DBCR1_IAC4US; 331 DBCR1_IAC3US | DBCR1_IAC4US;
332 /* 332 /*
333 * Force Data Address Compare User/Supervisor bits to be User-only 333 * Force Data Address Compare User/Supervisor bits to be User-only
334 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. 334 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
335 */ 335 */
336 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; 336 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
337#else 337#else
338 thread->dbcr1 = 0; 338 thread->debug.dbcr1 = 0;
339#endif 339#endif
340} 340}
341 341
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
348 */ 348 */
349 mtmsr(mfmsr() & ~MSR_DE); 349 mtmsr(mfmsr() & ~MSR_DE);
350 350
351 mtspr(SPRN_IAC1, thread->iac1); 351 mtspr(SPRN_IAC1, thread->debug.iac1);
352 mtspr(SPRN_IAC2, thread->iac2); 352 mtspr(SPRN_IAC2, thread->debug.iac2);
353#if CONFIG_PPC_ADV_DEBUG_IACS > 2 353#if CONFIG_PPC_ADV_DEBUG_IACS > 2
354 mtspr(SPRN_IAC3, thread->iac3); 354 mtspr(SPRN_IAC3, thread->debug.iac3);
355 mtspr(SPRN_IAC4, thread->iac4); 355 mtspr(SPRN_IAC4, thread->debug.iac4);
356#endif 356#endif
357 mtspr(SPRN_DAC1, thread->dac1); 357 mtspr(SPRN_DAC1, thread->debug.dac1);
358 mtspr(SPRN_DAC2, thread->dac2); 358 mtspr(SPRN_DAC2, thread->debug.dac2);
359#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 359#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
360 mtspr(SPRN_DVC1, thread->dvc1); 360 mtspr(SPRN_DVC1, thread->debug.dvc1);
361 mtspr(SPRN_DVC2, thread->dvc2); 361 mtspr(SPRN_DVC2, thread->debug.dvc2);
362#endif 362#endif
363 mtspr(SPRN_DBCR0, thread->dbcr0); 363 mtspr(SPRN_DBCR0, thread->debug.dbcr0);
364 mtspr(SPRN_DBCR1, thread->dbcr1); 364 mtspr(SPRN_DBCR1, thread->debug.dbcr1);
365#ifdef CONFIG_BOOKE 365#ifdef CONFIG_BOOKE
366 mtspr(SPRN_DBCR2, thread->dbcr2); 366 mtspr(SPRN_DBCR2, thread->debug.dbcr2);
367#endif 367#endif
368} 368}
369/* 369/*
@@ -371,12 +371,13 @@ static void prime_debug_regs(struct thread_struct *thread)
371 * debug registers, set the debug registers from the values 371 * debug registers, set the debug registers from the values
372 * stored in the new thread. 372 * stored in the new thread.
373 */ 373 */
374static void switch_booke_debug_regs(struct thread_struct *new_thread) 374void switch_booke_debug_regs(struct thread_struct *new_thread)
375{ 375{
376 if ((current->thread.dbcr0 & DBCR0_IDM) 376 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
377 || (new_thread->dbcr0 & DBCR0_IDM)) 377 || (new_thread->debug.dbcr0 & DBCR0_IDM))
378 prime_debug_regs(new_thread); 378 prime_debug_regs(new_thread);
379} 379}
380EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
380#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 381#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
381#ifndef CONFIG_HAVE_HW_BREAKPOINT 382#ifndef CONFIG_HAVE_HW_BREAKPOINT
382static void set_debug_reg_defaults(struct thread_struct *thread) 383static void set_debug_reg_defaults(struct thread_struct *thread)
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 9a0d24c390a3..ddaf1780879c 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -854,8 +854,8 @@ void user_enable_single_step(struct task_struct *task)
854 854
855 if (regs != NULL) { 855 if (regs != NULL) {
856#ifdef CONFIG_PPC_ADV_DEBUG_REGS 856#ifdef CONFIG_PPC_ADV_DEBUG_REGS
857 task->thread.dbcr0 &= ~DBCR0_BT; 857 task->thread.debug.dbcr0 &= ~DBCR0_BT;
858 task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; 858 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
859 regs->msr |= MSR_DE; 859 regs->msr |= MSR_DE;
860#else 860#else
861 regs->msr &= ~MSR_BE; 861 regs->msr &= ~MSR_BE;
@@ -871,8 +871,8 @@ void user_enable_block_step(struct task_struct *task)
871 871
872 if (regs != NULL) { 872 if (regs != NULL) {
873#ifdef CONFIG_PPC_ADV_DEBUG_REGS 873#ifdef CONFIG_PPC_ADV_DEBUG_REGS
874 task->thread.dbcr0 &= ~DBCR0_IC; 874 task->thread.debug.dbcr0 &= ~DBCR0_IC;
875 task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; 875 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
876 regs->msr |= MSR_DE; 876 regs->msr |= MSR_DE;
877#else 877#else
878 regs->msr &= ~MSR_SE; 878 regs->msr &= ~MSR_SE;
@@ -894,16 +894,16 @@ void user_disable_single_step(struct task_struct *task)
894 * And, after doing so, if all debug flags are off, turn 894 * And, after doing so, if all debug flags are off, turn
895 * off DBCR0(IDM) and MSR(DE) .... Torez 895 * off DBCR0(IDM) and MSR(DE) .... Torez
896 */ 896 */
897 task->thread.dbcr0 &= ~DBCR0_IC; 897 task->thread.debug.dbcr0 &= ~DBCR0_IC;
898 /* 898 /*
899 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. 899 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
900 */ 900 */
901 if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, 901 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
902 task->thread.dbcr1)) { 902 task->thread.debug.dbcr1)) {
903 /* 903 /*
904 * All debug events were off..... 904 * All debug events were off.....
905 */ 905 */
906 task->thread.dbcr0 &= ~DBCR0_IDM; 906 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
907 regs->msr &= ~MSR_DE; 907 regs->msr &= ~MSR_DE;
908 } 908 }
909#else 909#else
@@ -1022,14 +1022,14 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
1022 */ 1022 */
1023 1023
1024 /* DAC's hold the whole address without any mode flags */ 1024 /* DAC's hold the whole address without any mode flags */
1025 task->thread.dac1 = data & ~0x3UL; 1025 task->thread.debug.dac1 = data & ~0x3UL;
1026 1026
1027 if (task->thread.dac1 == 0) { 1027 if (task->thread.debug.dac1 == 0) {
1028 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1028 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1029 if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, 1029 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
1030 task->thread.dbcr1)) { 1030 task->thread.debug.dbcr1)) {
1031 task->thread.regs->msr &= ~MSR_DE; 1031 task->thread.regs->msr &= ~MSR_DE;
1032 task->thread.dbcr0 &= ~DBCR0_IDM; 1032 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
1033 } 1033 }
1034 return 0; 1034 return 0;
1035 } 1035 }
@@ -1041,7 +1041,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
1041 1041
1042 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 1042 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
1043 register */ 1043 register */
1044 task->thread.dbcr0 |= DBCR0_IDM; 1044 task->thread.debug.dbcr0 |= DBCR0_IDM;
1045 1045
1046 /* Check for write and read flags and set DBCR0 1046 /* Check for write and read flags and set DBCR0
1047 accordingly */ 1047 accordingly */
@@ -1071,10 +1071,10 @@ static long set_instruction_bp(struct task_struct *child,
1071 struct ppc_hw_breakpoint *bp_info) 1071 struct ppc_hw_breakpoint *bp_info)
1072{ 1072{
1073 int slot; 1073 int slot;
1074 int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0); 1074 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
1075 int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0); 1075 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
1076 int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0); 1076 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
1077 int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0); 1077 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
1078 1078
1079 if (dbcr_iac_range(child) & DBCR_IAC12MODE) 1079 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1080 slot2_in_use = 1; 1080 slot2_in_use = 1;
@@ -1093,9 +1093,9 @@ static long set_instruction_bp(struct task_struct *child,
1093 /* We need a pair of IAC regsisters */ 1093 /* We need a pair of IAC regsisters */
1094 if ((!slot1_in_use) && (!slot2_in_use)) { 1094 if ((!slot1_in_use) && (!slot2_in_use)) {
1095 slot = 1; 1095 slot = 1;
1096 child->thread.iac1 = bp_info->addr; 1096 child->thread.debug.iac1 = bp_info->addr;
1097 child->thread.iac2 = bp_info->addr2; 1097 child->thread.debug.iac2 = bp_info->addr2;
1098 child->thread.dbcr0 |= DBCR0_IAC1; 1098 child->thread.debug.dbcr0 |= DBCR0_IAC1;
1099 if (bp_info->addr_mode == 1099 if (bp_info->addr_mode ==
1100 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) 1100 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1101 dbcr_iac_range(child) |= DBCR_IAC12X; 1101 dbcr_iac_range(child) |= DBCR_IAC12X;
@@ -1104,9 +1104,9 @@ static long set_instruction_bp(struct task_struct *child,
1104#if CONFIG_PPC_ADV_DEBUG_IACS > 2 1104#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1105 } else if ((!slot3_in_use) && (!slot4_in_use)) { 1105 } else if ((!slot3_in_use) && (!slot4_in_use)) {
1106 slot = 3; 1106 slot = 3;
1107 child->thread.iac3 = bp_info->addr; 1107 child->thread.debug.iac3 = bp_info->addr;
1108 child->thread.iac4 = bp_info->addr2; 1108 child->thread.debug.iac4 = bp_info->addr2;
1109 child->thread.dbcr0 |= DBCR0_IAC3; 1109 child->thread.debug.dbcr0 |= DBCR0_IAC3;
1110 if (bp_info->addr_mode == 1110 if (bp_info->addr_mode ==
1111 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) 1111 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1112 dbcr_iac_range(child) |= DBCR_IAC34X; 1112 dbcr_iac_range(child) |= DBCR_IAC34X;
@@ -1126,30 +1126,30 @@ static long set_instruction_bp(struct task_struct *child,
1126 */ 1126 */
1127 if (slot2_in_use || (slot3_in_use == slot4_in_use)) { 1127 if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
1128 slot = 1; 1128 slot = 1;
1129 child->thread.iac1 = bp_info->addr; 1129 child->thread.debug.iac1 = bp_info->addr;
1130 child->thread.dbcr0 |= DBCR0_IAC1; 1130 child->thread.debug.dbcr0 |= DBCR0_IAC1;
1131 goto out; 1131 goto out;
1132 } 1132 }
1133 } 1133 }
1134 if (!slot2_in_use) { 1134 if (!slot2_in_use) {
1135 slot = 2; 1135 slot = 2;
1136 child->thread.iac2 = bp_info->addr; 1136 child->thread.debug.iac2 = bp_info->addr;
1137 child->thread.dbcr0 |= DBCR0_IAC2; 1137 child->thread.debug.dbcr0 |= DBCR0_IAC2;
1138#if CONFIG_PPC_ADV_DEBUG_IACS > 2 1138#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1139 } else if (!slot3_in_use) { 1139 } else if (!slot3_in_use) {
1140 slot = 3; 1140 slot = 3;
1141 child->thread.iac3 = bp_info->addr; 1141 child->thread.debug.iac3 = bp_info->addr;
1142 child->thread.dbcr0 |= DBCR0_IAC3; 1142 child->thread.debug.dbcr0 |= DBCR0_IAC3;
1143 } else if (!slot4_in_use) { 1143 } else if (!slot4_in_use) {
1144 slot = 4; 1144 slot = 4;
1145 child->thread.iac4 = bp_info->addr; 1145 child->thread.debug.iac4 = bp_info->addr;
1146 child->thread.dbcr0 |= DBCR0_IAC4; 1146 child->thread.debug.dbcr0 |= DBCR0_IAC4;
1147#endif 1147#endif
1148 } else 1148 } else
1149 return -ENOSPC; 1149 return -ENOSPC;
1150 } 1150 }
1151out: 1151out:
1152 child->thread.dbcr0 |= DBCR0_IDM; 1152 child->thread.debug.dbcr0 |= DBCR0_IDM;
1153 child->thread.regs->msr |= MSR_DE; 1153 child->thread.regs->msr |= MSR_DE;
1154 1154
1155 return slot; 1155 return slot;
@@ -1159,49 +1159,49 @@ static int del_instruction_bp(struct task_struct *child, int slot)
1159{ 1159{
1160 switch (slot) { 1160 switch (slot) {
1161 case 1: 1161 case 1:
1162 if ((child->thread.dbcr0 & DBCR0_IAC1) == 0) 1162 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
1163 return -ENOENT; 1163 return -ENOENT;
1164 1164
1165 if (dbcr_iac_range(child) & DBCR_IAC12MODE) { 1165 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
1166 /* address range - clear slots 1 & 2 */ 1166 /* address range - clear slots 1 & 2 */
1167 child->thread.iac2 = 0; 1167 child->thread.debug.iac2 = 0;
1168 dbcr_iac_range(child) &= ~DBCR_IAC12MODE; 1168 dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
1169 } 1169 }
1170 child->thread.iac1 = 0; 1170 child->thread.debug.iac1 = 0;
1171 child->thread.dbcr0 &= ~DBCR0_IAC1; 1171 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1172 break; 1172 break;
1173 case 2: 1173 case 2:
1174 if ((child->thread.dbcr0 & DBCR0_IAC2) == 0) 1174 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
1175 return -ENOENT; 1175 return -ENOENT;
1176 1176
1177 if (dbcr_iac_range(child) & DBCR_IAC12MODE) 1177 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1178 /* used in a range */ 1178 /* used in a range */
1179 return -EINVAL; 1179 return -EINVAL;
1180 child->thread.iac2 = 0; 1180 child->thread.debug.iac2 = 0;
1181 child->thread.dbcr0 &= ~DBCR0_IAC2; 1181 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1182 break; 1182 break;
1183#if CONFIG_PPC_ADV_DEBUG_IACS > 2 1183#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1184 case 3: 1184 case 3:
1185 if ((child->thread.dbcr0 & DBCR0_IAC3) == 0) 1185 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
1186 return -ENOENT; 1186 return -ENOENT;
1187 1187
1188 if (dbcr_iac_range(child) & DBCR_IAC34MODE) { 1188 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
1189 /* address range - clear slots 3 & 4 */ 1189 /* address range - clear slots 3 & 4 */
1190 child->thread.iac4 = 0; 1190 child->thread.debug.iac4 = 0;
1191 dbcr_iac_range(child) &= ~DBCR_IAC34MODE; 1191 dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
1192 } 1192 }
1193 child->thread.iac3 = 0; 1193 child->thread.debug.iac3 = 0;
1194 child->thread.dbcr0 &= ~DBCR0_IAC3; 1194 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1195 break; 1195 break;
1196 case 4: 1196 case 4:
1197 if ((child->thread.dbcr0 & DBCR0_IAC4) == 0) 1197 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
1198 return -ENOENT; 1198 return -ENOENT;
1199 1199
1200 if (dbcr_iac_range(child) & DBCR_IAC34MODE) 1200 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1201 /* Used in a range */ 1201 /* Used in a range */
1202 return -EINVAL; 1202 return -EINVAL;
1203 child->thread.iac4 = 0; 1203 child->thread.debug.iac4 = 0;
1204 child->thread.dbcr0 &= ~DBCR0_IAC4; 1204 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1205 break; 1205 break;
1206#endif 1206#endif
1207 default: 1207 default:
@@ -1231,18 +1231,18 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
1231 dbcr_dac(child) |= DBCR_DAC1R; 1231 dbcr_dac(child) |= DBCR_DAC1R;
1232 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1232 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1233 dbcr_dac(child) |= DBCR_DAC1W; 1233 dbcr_dac(child) |= DBCR_DAC1W;
1234 child->thread.dac1 = (unsigned long)bp_info->addr; 1234 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
1235#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 1235#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1236 if (byte_enable) { 1236 if (byte_enable) {
1237 child->thread.dvc1 = 1237 child->thread.debug.dvc1 =
1238 (unsigned long)bp_info->condition_value; 1238 (unsigned long)bp_info->condition_value;
1239 child->thread.dbcr2 |= 1239 child->thread.debug.dbcr2 |=
1240 ((byte_enable << DBCR2_DVC1BE_SHIFT) | 1240 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
1241 (condition_mode << DBCR2_DVC1M_SHIFT)); 1241 (condition_mode << DBCR2_DVC1M_SHIFT));
1242 } 1242 }
1243#endif 1243#endif
1244#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1244#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1245 } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) { 1245 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
1246 /* Both dac1 and dac2 are part of a range */ 1246 /* Both dac1 and dac2 are part of a range */
1247 return -ENOSPC; 1247 return -ENOSPC;
1248#endif 1248#endif
@@ -1252,19 +1252,19 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
1252 dbcr_dac(child) |= DBCR_DAC2R; 1252 dbcr_dac(child) |= DBCR_DAC2R;
1253 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1253 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1254 dbcr_dac(child) |= DBCR_DAC2W; 1254 dbcr_dac(child) |= DBCR_DAC2W;
1255 child->thread.dac2 = (unsigned long)bp_info->addr; 1255 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
1256#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 1256#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1257 if (byte_enable) { 1257 if (byte_enable) {
1258 child->thread.dvc2 = 1258 child->thread.debug.dvc2 =
1259 (unsigned long)bp_info->condition_value; 1259 (unsigned long)bp_info->condition_value;
1260 child->thread.dbcr2 |= 1260 child->thread.debug.dbcr2 |=
1261 ((byte_enable << DBCR2_DVC2BE_SHIFT) | 1261 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
1262 (condition_mode << DBCR2_DVC2M_SHIFT)); 1262 (condition_mode << DBCR2_DVC2M_SHIFT));
1263 } 1263 }
1264#endif 1264#endif
1265 } else 1265 } else
1266 return -ENOSPC; 1266 return -ENOSPC;
1267 child->thread.dbcr0 |= DBCR0_IDM; 1267 child->thread.debug.dbcr0 |= DBCR0_IDM;
1268 child->thread.regs->msr |= MSR_DE; 1268 child->thread.regs->msr |= MSR_DE;
1269 1269
1270 return slot + 4; 1270 return slot + 4;
@@ -1276,32 +1276,32 @@ static int del_dac(struct task_struct *child, int slot)
1276 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) 1276 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
1277 return -ENOENT; 1277 return -ENOENT;
1278 1278
1279 child->thread.dac1 = 0; 1279 child->thread.debug.dac1 = 0;
1280 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1280 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1281#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1281#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1282 if (child->thread.dbcr2 & DBCR2_DAC12MODE) { 1282 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
1283 child->thread.dac2 = 0; 1283 child->thread.debug.dac2 = 0;
1284 child->thread.dbcr2 &= ~DBCR2_DAC12MODE; 1284 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1285 } 1285 }
1286 child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); 1286 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
1287#endif 1287#endif
1288#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 1288#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1289 child->thread.dvc1 = 0; 1289 child->thread.debug.dvc1 = 0;
1290#endif 1290#endif
1291 } else if (slot == 2) { 1291 } else if (slot == 2) {
1292 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) 1292 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
1293 return -ENOENT; 1293 return -ENOENT;
1294 1294
1295#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1295#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1296 if (child->thread.dbcr2 & DBCR2_DAC12MODE) 1296 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
1297 /* Part of a range */ 1297 /* Part of a range */
1298 return -EINVAL; 1298 return -EINVAL;
1299 child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); 1299 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
1300#endif 1300#endif
1301#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 1301#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1302 child->thread.dvc2 = 0; 1302 child->thread.debug.dvc2 = 0;
1303#endif 1303#endif
1304 child->thread.dac2 = 0; 1304 child->thread.debug.dac2 = 0;
1305 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1305 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1306 } else 1306 } else
1307 return -EINVAL; 1307 return -EINVAL;
@@ -1343,22 +1343,22 @@ static int set_dac_range(struct task_struct *child,
1343 return -EIO; 1343 return -EIO;
1344 } 1344 }
1345 1345
1346 if (child->thread.dbcr0 & 1346 if (child->thread.debug.dbcr0 &
1347 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)) 1347 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
1348 return -ENOSPC; 1348 return -ENOSPC;
1349 1349
1350 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1350 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1351 child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); 1351 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
1352 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1352 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1353 child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); 1353 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
1354 child->thread.dac1 = bp_info->addr; 1354 child->thread.debug.dac1 = bp_info->addr;
1355 child->thread.dac2 = bp_info->addr2; 1355 child->thread.debug.dac2 = bp_info->addr2;
1356 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) 1356 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1357 child->thread.dbcr2 |= DBCR2_DAC12M; 1357 child->thread.debug.dbcr2 |= DBCR2_DAC12M;
1358 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) 1358 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1359 child->thread.dbcr2 |= DBCR2_DAC12MX; 1359 child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
1360 else /* PPC_BREAKPOINT_MODE_MASK */ 1360 else /* PPC_BREAKPOINT_MODE_MASK */
1361 child->thread.dbcr2 |= DBCR2_DAC12MM; 1361 child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
1362 child->thread.regs->msr |= MSR_DE; 1362 child->thread.regs->msr |= MSR_DE;
1363 1363
1364 return 5; 1364 return 5;
@@ -1489,9 +1489,9 @@ static long ppc_del_hwdebug(struct task_struct *child, long data)
1489 rc = del_dac(child, (int)data - 4); 1489 rc = del_dac(child, (int)data - 4);
1490 1490
1491 if (!rc) { 1491 if (!rc) {
1492 if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0, 1492 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
1493 child->thread.dbcr1)) { 1493 child->thread.debug.dbcr1)) {
1494 child->thread.dbcr0 &= ~DBCR0_IDM; 1494 child->thread.debug.dbcr0 &= ~DBCR0_IDM;
1495 child->thread.regs->msr &= ~MSR_DE; 1495 child->thread.regs->msr &= ~MSR_DE;
1496 } 1496 }
1497 } 1497 }
@@ -1669,7 +1669,7 @@ long arch_ptrace(struct task_struct *child, long request,
1669 if (addr > 0) 1669 if (addr > 0)
1670 break; 1670 break;
1671#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1671#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1672 ret = put_user(child->thread.dac1, datalp); 1672 ret = put_user(child->thread.debug.dac1, datalp);
1673#else 1673#else
1674 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | 1674 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
1675 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); 1675 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index f51599e941c7..18c7c65ea46d 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -269,7 +269,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
269 if (addr > 0) 269 if (addr > 0)
270 break; 270 break;
271#ifdef CONFIG_PPC_ADV_DEBUG_REGS 271#ifdef CONFIG_PPC_ADV_DEBUG_REGS
272 ret = put_user(child->thread.dac1, (u32 __user *)data); 272 ret = put_user(child->thread.debug.dac1, (u32 __user *)data);
273#else 273#else
274 dabr_fake = ( 274 dabr_fake = (
275 (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | 275 (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index bebdf1a1a540..3f220d93c72f 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1309,7 +1309,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
1309 unsigned char tmp; 1309 unsigned char tmp;
1310 unsigned long new_msr = regs->msr; 1310 unsigned long new_msr = regs->msr;
1311#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1311#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1312 unsigned long new_dbcr0 = current->thread.dbcr0; 1312 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1313#endif 1313#endif
1314 1314
1315 for (i=0; i<ndbg; i++) { 1315 for (i=0; i<ndbg; i++) {
@@ -1324,7 +1324,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
1324 } else { 1324 } else {
1325 new_dbcr0 &= ~DBCR0_IC; 1325 new_dbcr0 &= ~DBCR0_IC;
1326 if (!DBCR_ACTIVE_EVENTS(new_dbcr0, 1326 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1327 current->thread.dbcr1)) { 1327 current->thread.debug.dbcr1)) {
1328 new_msr &= ~MSR_DE; 1328 new_msr &= ~MSR_DE;
1329 new_dbcr0 &= ~DBCR0_IDM; 1329 new_dbcr0 &= ~DBCR0_IDM;
1330 } 1330 }
@@ -1359,7 +1359,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
1359 the user is really doing something wrong. */ 1359 the user is really doing something wrong. */
1360 regs->msr = new_msr; 1360 regs->msr = new_msr;
1361#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1361#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1362 current->thread.dbcr0 = new_dbcr0; 1362 current->thread.debug.dbcr0 = new_dbcr0;
1363#endif 1363#endif
1364 1364
1365 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) 1365 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f783c932faeb..4f5df4e7df12 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -351,8 +351,8 @@ static inline int check_io_access(struct pt_regs *regs)
351#define REASON_TRAP ESR_PTR 351#define REASON_TRAP ESR_PTR
352 352
353/* single-step stuff */ 353/* single-step stuff */
354#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) 354#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
355#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) 355#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
356 356
357#else 357#else
358/* On non-4xx, the reason for the machine check or program 358/* On non-4xx, the reason for the machine check or program
@@ -1486,7 +1486,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1486 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1486 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1487 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1487 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1488#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1488#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1489 current->thread.dbcr2 &= ~DBCR2_DAC12MODE; 1489 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1490#endif 1490#endif
1491 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1491 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1492 5); 1492 5);
@@ -1497,24 +1497,24 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1497 6); 1497 6);
1498 changed |= 0x01; 1498 changed |= 0x01;
1499 } else if (debug_status & DBSR_IAC1) { 1499 } else if (debug_status & DBSR_IAC1) {
1500 current->thread.dbcr0 &= ~DBCR0_IAC1; 1500 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1501 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1501 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1502 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1502 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1503 1); 1503 1);
1504 changed |= 0x01; 1504 changed |= 0x01;
1505 } else if (debug_status & DBSR_IAC2) { 1505 } else if (debug_status & DBSR_IAC2) {
1506 current->thread.dbcr0 &= ~DBCR0_IAC2; 1506 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1507 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1507 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1508 2); 1508 2);
1509 changed |= 0x01; 1509 changed |= 0x01;
1510 } else if (debug_status & DBSR_IAC3) { 1510 } else if (debug_status & DBSR_IAC3) {
1511 current->thread.dbcr0 &= ~DBCR0_IAC3; 1511 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1512 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1512 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1513 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1513 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1514 3); 1514 3);
1515 changed |= 0x01; 1515 changed |= 0x01;
1516 } else if (debug_status & DBSR_IAC4) { 1516 } else if (debug_status & DBSR_IAC4) {
1517 current->thread.dbcr0 &= ~DBCR0_IAC4; 1517 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1518 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1518 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1519 4); 1519 4);
1520 changed |= 0x01; 1520 changed |= 0x01;
@@ -1524,19 +1524,20 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1524 * Check all other debug flags and see if that bit needs to be turned 1524 * Check all other debug flags and see if that bit needs to be turned
1525 * back on or not. 1525 * back on or not.
1526 */ 1526 */
1527 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) 1527 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1528 current->thread.debug.dbcr1))
1528 regs->msr |= MSR_DE; 1529 regs->msr |= MSR_DE;
1529 else 1530 else
1530 /* Make sure the IDM flag is off */ 1531 /* Make sure the IDM flag is off */
1531 current->thread.dbcr0 &= ~DBCR0_IDM; 1532 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1532 1533
1533 if (changed & 0x01) 1534 if (changed & 0x01)
1534 mtspr(SPRN_DBCR0, current->thread.dbcr0); 1535 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1535} 1536}
1536 1537
1537void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1538void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1538{ 1539{
1539 current->thread.dbsr = debug_status; 1540 current->thread.debug.dbsr = debug_status;
1540 1541
1541 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1542 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1542 * on server, it stops on the target of the branch. In order to simulate 1543 * on server, it stops on the target of the branch. In order to simulate
@@ -1553,8 +1554,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1553 1554
1554 /* Do the single step trick only when coming from userspace */ 1555 /* Do the single step trick only when coming from userspace */
1555 if (user_mode(regs)) { 1556 if (user_mode(regs)) {
1556 current->thread.dbcr0 &= ~DBCR0_BT; 1557 current->thread.debug.dbcr0 &= ~DBCR0_BT;
1557 current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1558 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1558 regs->msr |= MSR_DE; 1559 regs->msr |= MSR_DE;
1559 return; 1560 return;
1560 } 1561 }
@@ -1582,13 +1583,13 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1582 return; 1583 return;
1583 1584
1584 if (user_mode(regs)) { 1585 if (user_mode(regs)) {
1585 current->thread.dbcr0 &= ~DBCR0_IC; 1586 current->thread.debug.dbcr0 &= ~DBCR0_IC;
1586 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1587 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1587 current->thread.dbcr1)) 1588 current->thread.debug.dbcr1))
1588 regs->msr |= MSR_DE; 1589 regs->msr |= MSR_DE;
1589 else 1590 else
1590 /* Make sure the IDM bit is off */ 1591 /* Make sure the IDM bit is off */
1591 current->thread.dbcr0 &= ~DBCR0_IDM; 1592 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1592 } 1593 }
1593 1594
1594 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1595 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 2f5c6b6d6877..93221e87b911 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -31,13 +31,13 @@
31#include "44x_tlb.h" 31#include "44x_tlb.h"
32#include "booke.h" 32#include "booke.h"
33 33
34void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 34static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
35{ 35{
36 kvmppc_booke_vcpu_load(vcpu, cpu); 36 kvmppc_booke_vcpu_load(vcpu, cpu);
37 kvmppc_44x_tlb_load(vcpu); 37 kvmppc_44x_tlb_load(vcpu);
38} 38}
39 39
40void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 40static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
41{ 41{
42 kvmppc_44x_tlb_put(vcpu); 42 kvmppc_44x_tlb_put(vcpu);
43 kvmppc_booke_vcpu_put(vcpu); 43 kvmppc_booke_vcpu_put(vcpu);
@@ -114,29 +114,32 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
114 return 0; 114 return 0;
115} 115}
116 116
117void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 117static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
118 struct kvm_sregs *sregs)
118{ 119{
119 kvmppc_get_sregs_ivor(vcpu, sregs); 120 return kvmppc_get_sregs_ivor(vcpu, sregs);
120} 121}
121 122
122int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 123static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
124 struct kvm_sregs *sregs)
123{ 125{
124 return kvmppc_set_sregs_ivor(vcpu, sregs); 126 return kvmppc_set_sregs_ivor(vcpu, sregs);
125} 127}
126 128
127int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 129static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
128 union kvmppc_one_reg *val) 130 union kvmppc_one_reg *val)
129{ 131{
130 return -EINVAL; 132 return -EINVAL;
131} 133}
132 134
133int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 135static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
134 union kvmppc_one_reg *val) 136 union kvmppc_one_reg *val)
135{ 137{
136 return -EINVAL; 138 return -EINVAL;
137} 139}
138 140
139struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 141static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
142 unsigned int id)
140{ 143{
141 struct kvmppc_vcpu_44x *vcpu_44x; 144 struct kvmppc_vcpu_44x *vcpu_44x;
142 struct kvm_vcpu *vcpu; 145 struct kvm_vcpu *vcpu;
@@ -167,7 +170,7 @@ out:
167 return ERR_PTR(err); 170 return ERR_PTR(err);
168} 171}
169 172
170void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 173static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
171{ 174{
172 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 175 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
173 176
@@ -176,28 +179,53 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
176 kmem_cache_free(kvm_vcpu_cache, vcpu_44x); 179 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
177} 180}
178 181
179int kvmppc_core_init_vm(struct kvm *kvm) 182static int kvmppc_core_init_vm_44x(struct kvm *kvm)
180{ 183{
181 return 0; 184 return 0;
182} 185}
183 186
184void kvmppc_core_destroy_vm(struct kvm *kvm) 187static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
185{ 188{
186} 189}
187 190
191static struct kvmppc_ops kvm_ops_44x = {
192 .get_sregs = kvmppc_core_get_sregs_44x,
193 .set_sregs = kvmppc_core_set_sregs_44x,
194 .get_one_reg = kvmppc_get_one_reg_44x,
195 .set_one_reg = kvmppc_set_one_reg_44x,
196 .vcpu_load = kvmppc_core_vcpu_load_44x,
197 .vcpu_put = kvmppc_core_vcpu_put_44x,
198 .vcpu_create = kvmppc_core_vcpu_create_44x,
199 .vcpu_free = kvmppc_core_vcpu_free_44x,
200 .mmu_destroy = kvmppc_mmu_destroy_44x,
201 .init_vm = kvmppc_core_init_vm_44x,
202 .destroy_vm = kvmppc_core_destroy_vm_44x,
203 .emulate_op = kvmppc_core_emulate_op_44x,
204 .emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
205 .emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
206};
207
188static int __init kvmppc_44x_init(void) 208static int __init kvmppc_44x_init(void)
189{ 209{
190 int r; 210 int r;
191 211
192 r = kvmppc_booke_init(); 212 r = kvmppc_booke_init();
193 if (r) 213 if (r)
194 return r; 214 goto err_out;
215
216 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
217 if (r)
218 goto err_out;
219 kvm_ops_44x.owner = THIS_MODULE;
220 kvmppc_pr_ops = &kvm_ops_44x;
195 221
196 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); 222err_out:
223 return r;
197} 224}
198 225
199static void __exit kvmppc_44x_exit(void) 226static void __exit kvmppc_44x_exit(void)
200{ 227{
228 kvmppc_pr_ops = NULL;
201 kvmppc_booke_exit(); 229 kvmppc_booke_exit();
202} 230}
203 231
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 35ec0a8547da..92c9ab4bcfec 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -91,8 +91,8 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
91 return EMULATE_DONE; 91 return EMULATE_DONE;
92} 92}
93 93
94int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 94int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
95 unsigned int inst, int *advance) 95 unsigned int inst, int *advance)
96{ 96{
97 int emulated = EMULATE_DONE; 97 int emulated = EMULATE_DONE;
98 int dcrn = get_dcrn(inst); 98 int dcrn = get_dcrn(inst);
@@ -152,7 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
152 return emulated; 152 return emulated;
153} 153}
154 154
155int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 155int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
156{ 156{
157 int emulated = EMULATE_DONE; 157 int emulated = EMULATE_DONE;
158 158
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
172 return emulated; 172 return emulated;
173} 173}
174 174
175int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 175int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
176{ 176{
177 int emulated = EMULATE_DONE; 177 int emulated = EMULATE_DONE;
178 178
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ed0385448148..0deef1082e02 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
268 trace_kvm_stlb_inval(stlb_index); 268 trace_kvm_stlb_inval(stlb_index);
269} 269}
270 270
271void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 271void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
272{ 272{
273 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 273 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
274 int i; 274 int i;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index ffaef2cb101a..8aeeda1ff42a 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -34,17 +34,20 @@ config KVM_BOOK3S_64_HANDLER
34 bool 34 bool
35 select KVM_BOOK3S_HANDLER 35 select KVM_BOOK3S_HANDLER
36 36
37config KVM_BOOK3S_PR 37config KVM_BOOK3S_PR_POSSIBLE
38 bool 38 bool
39 select KVM_MMIO 39 select KVM_MMIO
40 select MMU_NOTIFIER 40 select MMU_NOTIFIER
41 41
42config KVM_BOOK3S_HV_POSSIBLE
43 bool
44
42config KVM_BOOK3S_32 45config KVM_BOOK3S_32
43 tristate "KVM support for PowerPC book3s_32 processors" 46 tristate "KVM support for PowerPC book3s_32 processors"
44 depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT 47 depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
45 select KVM 48 select KVM
46 select KVM_BOOK3S_32_HANDLER 49 select KVM_BOOK3S_32_HANDLER
47 select KVM_BOOK3S_PR 50 select KVM_BOOK3S_PR_POSSIBLE
48 ---help--- 51 ---help---
49 Support running unmodified book3s_32 guest kernels 52 Support running unmodified book3s_32 guest kernels
50 in virtual machines on book3s_32 host processors. 53 in virtual machines on book3s_32 host processors.
@@ -59,6 +62,7 @@ config KVM_BOOK3S_64
59 depends on PPC_BOOK3S_64 62 depends on PPC_BOOK3S_64
60 select KVM_BOOK3S_64_HANDLER 63 select KVM_BOOK3S_64_HANDLER
61 select KVM 64 select KVM
65 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
62 ---help--- 66 ---help---
63 Support running unmodified book3s_64 and book3s_32 guest kernels 67 Support running unmodified book3s_64 and book3s_32 guest kernels
64 in virtual machines on book3s_64 host processors. 68 in virtual machines on book3s_64 host processors.
@@ -69,8 +73,9 @@ config KVM_BOOK3S_64
69 If unsure, say N. 73 If unsure, say N.
70 74
71config KVM_BOOK3S_64_HV 75config KVM_BOOK3S_64_HV
72 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" 76 tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
73 depends on KVM_BOOK3S_64 77 depends on KVM_BOOK3S_64
78 select KVM_BOOK3S_HV_POSSIBLE
74 select MMU_NOTIFIER 79 select MMU_NOTIFIER
75 select CMA 80 select CMA
76 ---help--- 81 ---help---
@@ -89,9 +94,20 @@ config KVM_BOOK3S_64_HV
89 If unsure, say N. 94 If unsure, say N.
90 95
91config KVM_BOOK3S_64_PR 96config KVM_BOOK3S_64_PR
92 def_bool y 97 tristate "KVM support without using hypervisor mode in host"
93 depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV 98 depends on KVM_BOOK3S_64
94 select KVM_BOOK3S_PR 99 select KVM_BOOK3S_PR_POSSIBLE
100 ---help---
101 Support running guest kernels in virtual machines on processors
102 without using hypervisor mode in the host, by running the
103 guest in user mode (problem state) and emulating all
104 privileged instructions and registers.
105
106 This is not as fast as using hypervisor mode, but works on
107 machines where hypervisor mode is not available or not usable,
108 and can emulate processors that are different from the host
109 processor, including emulating 32-bit processors on a 64-bit
110 host.
95 111
96config KVM_BOOKE_HV 112config KVM_BOOKE_HV
97 bool 113 bool
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 6646c952c5e3..ce569b6bf4d8 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -53,41 +53,51 @@ kvm-e500mc-objs := \
53 e500_emulate.o 53 e500_emulate.o
54kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) 54kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
55 55
56kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ 56kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
57 $(KVM)/coalesced_mmio.o \ 57 book3s_64_vio_hv.o
58
59kvm-pr-y := \
58 fpu.o \ 60 fpu.o \
59 book3s_paired_singles.o \ 61 book3s_paired_singles.o \
60 book3s_pr.o \ 62 book3s_pr.o \
61 book3s_pr_papr.o \ 63 book3s_pr_papr.o \
62 book3s_64_vio_hv.o \
63 book3s_emulate.o \ 64 book3s_emulate.o \
64 book3s_interrupts.o \ 65 book3s_interrupts.o \
65 book3s_mmu_hpte.o \ 66 book3s_mmu_hpte.o \
66 book3s_64_mmu_host.o \ 67 book3s_64_mmu_host.o \
67 book3s_64_mmu.o \ 68 book3s_64_mmu.o \
68 book3s_32_mmu.o 69 book3s_32_mmu.o
69kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ 70
71ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
72kvm-book3s_64-module-objs := \
73 $(KVM)/coalesced_mmio.o
74
75kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
70 book3s_rmhandlers.o 76 book3s_rmhandlers.o
77endif
71 78
72kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ 79kvm-hv-y += \
73 book3s_hv.o \ 80 book3s_hv.o \
74 book3s_hv_interrupts.o \ 81 book3s_hv_interrupts.o \
75 book3s_64_mmu_hv.o 82 book3s_64_mmu_hv.o
83
76kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ 84kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
77 book3s_hv_rm_xics.o 85 book3s_hv_rm_xics.o
78kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ 86
87ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
88kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
79 book3s_hv_rmhandlers.o \ 89 book3s_hv_rmhandlers.o \
80 book3s_hv_rm_mmu.o \ 90 book3s_hv_rm_mmu.o \
81 book3s_64_vio_hv.o \
82 book3s_hv_ras.o \ 91 book3s_hv_ras.o \
83 book3s_hv_builtin.o \ 92 book3s_hv_builtin.o \
84 book3s_hv_cma.o \ 93 book3s_hv_cma.o \
85 $(kvm-book3s_64-builtin-xics-objs-y) 94 $(kvm-book3s_64-builtin-xics-objs-y)
95endif
86 96
87kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ 97kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
88 book3s_xics.o 98 book3s_xics.o
89 99
90kvm-book3s_64-module-objs := \ 100kvm-book3s_64-module-objs += \
91 $(KVM)/kvm_main.o \ 101 $(KVM)/kvm_main.o \
92 $(KVM)/eventfd.o \ 102 $(KVM)/eventfd.o \
93 powerpc.o \ 103 powerpc.o \
@@ -123,4 +133,7 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o
123obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 133obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
124obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o 134obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
125 135
136obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
137obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
138
126obj-y += $(kvm-book3s_64-builtin-objs-y) 139obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 700df6f1d32c..8912608b7e1b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -34,6 +34,7 @@
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/highmem.h> 35#include <linux/highmem.h>
36 36
37#include "book3s.h"
37#include "trace.h" 38#include "trace.h"
38 39
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -69,6 +70,50 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
69{ 70{
70} 71}
71 72
73static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
74{
75 if (!is_kvmppc_hv_enabled(vcpu->kvm))
76 return to_book3s(vcpu)->hior;
77 return 0;
78}
79
80static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
81 unsigned long pending_now, unsigned long old_pending)
82{
83 if (is_kvmppc_hv_enabled(vcpu->kvm))
84 return;
85 if (pending_now)
86 vcpu->arch.shared->int_pending = 1;
87 else if (old_pending)
88 vcpu->arch.shared->int_pending = 0;
89}
90
91static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
92{
93 ulong crit_raw;
94 ulong crit_r1;
95 bool crit;
96
97 if (is_kvmppc_hv_enabled(vcpu->kvm))
98 return false;
99
100 crit_raw = vcpu->arch.shared->critical;
101 crit_r1 = kvmppc_get_gpr(vcpu, 1);
102
103 /* Truncate crit indicators in 32 bit mode */
104 if (!(vcpu->arch.shared->msr & MSR_SF)) {
105 crit_raw &= 0xffffffff;
106 crit_r1 &= 0xffffffff;
107 }
108
109 /* Critical section when crit == r1 */
110 crit = (crit_raw == crit_r1);
111 /* ... and we're in supervisor mode */
112 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
113
114 return crit;
115}
116
72void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 117void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
73{ 118{
74 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); 119 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
@@ -126,28 +171,32 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
126 printk(KERN_INFO "Queueing interrupt %x\n", vec); 171 printk(KERN_INFO "Queueing interrupt %x\n", vec);
127#endif 172#endif
128} 173}
129 174EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
130 175
131void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 176void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
132{ 177{
133 /* might as well deliver this straight away */ 178 /* might as well deliver this straight away */
134 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); 179 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
135} 180}
181EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
136 182
137void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 183void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
138{ 184{
139 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 185 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
140} 186}
187EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
141 188
142int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 189int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
143{ 190{
144 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 191 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
145} 192}
193EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
146 194
147void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 195void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
148{ 196{
149 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 197 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
150} 198}
199EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
151 200
152void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 201void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
153 struct kvm_interrupt *irq) 202 struct kvm_interrupt *irq)
@@ -285,8 +334,10 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
285 334
286 return 0; 335 return 0;
287} 336}
337EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
288 338
289pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 339pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
340 bool *writable)
290{ 341{
291 ulong mp_pa = vcpu->arch.magic_page_pa; 342 ulong mp_pa = vcpu->arch.magic_page_pa;
292 343
@@ -302,20 +353,23 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
302 353
303 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 354 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
304 get_page(pfn_to_page(pfn)); 355 get_page(pfn_to_page(pfn));
356 if (writable)
357 *writable = true;
305 return pfn; 358 return pfn;
306 } 359 }
307 360
308 return gfn_to_pfn(vcpu->kvm, gfn); 361 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
309} 362}
363EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
310 364
311static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 365static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
312 struct kvmppc_pte *pte) 366 bool iswrite, struct kvmppc_pte *pte)
313{ 367{
314 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); 368 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
315 int r; 369 int r;
316 370
317 if (relocated) { 371 if (relocated) {
318 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); 372 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
319 } else { 373 } else {
320 pte->eaddr = eaddr; 374 pte->eaddr = eaddr;
321 pte->raddr = eaddr & KVM_PAM; 375 pte->raddr = eaddr & KVM_PAM;
@@ -361,7 +415,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
361 415
362 vcpu->stat.st++; 416 vcpu->stat.st++;
363 417
364 if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) 418 if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
365 return -ENOENT; 419 return -ENOENT;
366 420
367 *eaddr = pte.raddr; 421 *eaddr = pte.raddr;
@@ -374,6 +428,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
374 428
375 return EMULATE_DONE; 429 return EMULATE_DONE;
376} 430}
431EXPORT_SYMBOL_GPL(kvmppc_st);
377 432
378int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 433int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
379 bool data) 434 bool data)
@@ -383,7 +438,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
383 438
384 vcpu->stat.ld++; 439 vcpu->stat.ld++;
385 440
386 if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) 441 if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
387 goto nopte; 442 goto nopte;
388 443
389 *eaddr = pte.raddr; 444 *eaddr = pte.raddr;
@@ -404,6 +459,7 @@ nopte:
404mmio: 459mmio:
405 return EMULATE_DO_MMIO; 460 return EMULATE_DO_MMIO;
406} 461}
462EXPORT_SYMBOL_GPL(kvmppc_ld);
407 463
408int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 464int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
409{ 465{
@@ -419,6 +475,18 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
419{ 475{
420} 476}
421 477
478int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
479 struct kvm_sregs *sregs)
480{
481 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
482}
483
484int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
485 struct kvm_sregs *sregs)
486{
487 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
488}
489
422int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 490int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
423{ 491{
424 int i; 492 int i;
@@ -495,8 +563,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
495 if (size > sizeof(val)) 563 if (size > sizeof(val))
496 return -EINVAL; 564 return -EINVAL;
497 565
498 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 566 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
499
500 if (r == -EINVAL) { 567 if (r == -EINVAL) {
501 r = 0; 568 r = 0;
502 switch (reg->id) { 569 switch (reg->id) {
@@ -528,6 +595,9 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
528 } 595 }
529 val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); 596 val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
530 break; 597 break;
598 case KVM_REG_PPC_VRSAVE:
599 val = get_reg_val(reg->id, vcpu->arch.vrsave);
600 break;
531#endif /* CONFIG_ALTIVEC */ 601#endif /* CONFIG_ALTIVEC */
532 case KVM_REG_PPC_DEBUG_INST: { 602 case KVM_REG_PPC_DEBUG_INST: {
533 u32 opcode = INS_TW; 603 u32 opcode = INS_TW;
@@ -572,8 +642,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
572 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 642 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
573 return -EFAULT; 643 return -EFAULT;
574 644
575 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 645 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
576
577 if (r == -EINVAL) { 646 if (r == -EINVAL) {
578 r = 0; 647 r = 0;
579 switch (reg->id) { 648 switch (reg->id) {
@@ -605,6 +674,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
605 } 674 }
606 vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); 675 vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
607 break; 676 break;
677 case KVM_REG_PPC_VRSAVE:
678 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
679 r = -ENXIO;
680 break;
681 }
682 vcpu->arch.vrsave = set_reg_val(reg->id, val);
683 break;
608#endif /* CONFIG_ALTIVEC */ 684#endif /* CONFIG_ALTIVEC */
609#ifdef CONFIG_KVM_XICS 685#ifdef CONFIG_KVM_XICS
610 case KVM_REG_PPC_ICP_STATE: 686 case KVM_REG_PPC_ICP_STATE:
@@ -625,6 +701,27 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
625 return r; 701 return r;
626} 702}
627 703
704void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
705{
706 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
707}
708
709void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
710{
711 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
712}
713
714void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
715{
716 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
717}
718EXPORT_SYMBOL_GPL(kvmppc_set_msr);
719
720int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
721{
722 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
723}
724
628int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 725int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
629 struct kvm_translation *tr) 726 struct kvm_translation *tr)
630{ 727{
@@ -644,3 +741,141 @@ void kvmppc_decrementer_func(unsigned long data)
644 kvmppc_core_queue_dec(vcpu); 741 kvmppc_core_queue_dec(vcpu);
645 kvm_vcpu_kick(vcpu); 742 kvm_vcpu_kick(vcpu);
646} 743}
744
745struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
746{
747 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
748}
749
750void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
751{
752 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
753}
754
755int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
756{
757 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
758}
759
760int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
761{
762 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
763}
764
765void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
766 struct kvm_memory_slot *dont)
767{
768 kvm->arch.kvm_ops->free_memslot(free, dont);
769}
770
771int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
772 unsigned long npages)
773{
774 return kvm->arch.kvm_ops->create_memslot(slot, npages);
775}
776
777void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
778{
779 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
780}
781
782int kvmppc_core_prepare_memory_region(struct kvm *kvm,
783 struct kvm_memory_slot *memslot,
784 struct kvm_userspace_memory_region *mem)
785{
786 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
787}
788
789void kvmppc_core_commit_memory_region(struct kvm *kvm,
790 struct kvm_userspace_memory_region *mem,
791 const struct kvm_memory_slot *old)
792{
793 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
794}
795
796int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
797{
798 return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
799}
800EXPORT_SYMBOL_GPL(kvm_unmap_hva);
801
802int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
803{
804 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
805}
806
807int kvm_age_hva(struct kvm *kvm, unsigned long hva)
808{
809 return kvm->arch.kvm_ops->age_hva(kvm, hva);
810}
811
812int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
813{
814 return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
815}
816
817void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
818{
819 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
820}
821
822void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
823{
824 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
825}
826
827int kvmppc_core_init_vm(struct kvm *kvm)
828{
829
830#ifdef CONFIG_PPC64
831 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
832 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
833#endif
834
835 return kvm->arch.kvm_ops->init_vm(kvm);
836}
837
838void kvmppc_core_destroy_vm(struct kvm *kvm)
839{
840 kvm->arch.kvm_ops->destroy_vm(kvm);
841
842#ifdef CONFIG_PPC64
843 kvmppc_rtas_tokens_free(kvm);
844 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
845#endif
846}
847
848int kvmppc_core_check_processor_compat(void)
849{
850 /*
851 * We always return 0 for book3s. We check
852 * for compatability while loading the HV
853 * or PR module
854 */
855 return 0;
856}
857
858static int kvmppc_book3s_init(void)
859{
860 int r;
861
862 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
863 if (r)
864 return r;
865#ifdef CONFIG_KVM_BOOK3S_32
866 r = kvmppc_book3s_init_pr();
867#endif
868 return r;
869
870}
871
872static void kvmppc_book3s_exit(void)
873{
874#ifdef CONFIG_KVM_BOOK3S_32
875 kvmppc_book3s_exit_pr();
876#endif
877 kvm_exit();
878}
879
880module_init(kvmppc_book3s_init);
881module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
new file mode 100644
index 000000000000..4bf956cf94d6
--- /dev/null
+++ b/arch/powerpc/kvm/book3s.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright IBM Corporation, 2013
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License or (at your optional) any later version of the license.
9 *
10 */
11
12#ifndef __POWERPC_KVM_BOOK3S_H__
13#define __POWERPC_KVM_BOOK3S_H__
14
15extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
16 struct kvm_memory_slot *memslot);
17extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
18extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
19 unsigned long end);
20extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva);
21extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
22extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
23
24extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
25extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
26 unsigned int inst, int *advance);
27extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
28 int sprn, ulong spr_val);
29extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
30 int sprn, ulong *spr_val);
31extern int kvmppc_book3s_init_pr(void);
32extern void kvmppc_book3s_exit_pr(void);
33
34#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index c8cefdd15fd8..76a64ce6a5b6 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw)
84} 84}
85 85
86static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 86static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
87 struct kvmppc_pte *pte, bool data); 87 struct kvmppc_pte *pte, bool data,
88 bool iswrite);
88static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 89static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
89 u64 *vsid); 90 u64 *vsid);
90 91
@@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
99 u64 vsid; 100 u64 vsid;
100 struct kvmppc_pte pte; 101 struct kvmppc_pte pte;
101 102
102 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) 103 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
103 return pte.vpage; 104 return pte.vpage;
104 105
105 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 106 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -111,10 +112,11 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
111 kvmppc_set_msr(vcpu, 0); 112 kvmppc_set_msr(vcpu, 0);
112} 113}
113 114
114static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, 115static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
115 u32 sre, gva_t eaddr, 116 u32 sre, gva_t eaddr,
116 bool primary) 117 bool primary)
117{ 118{
119 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
118 u32 page, hash, pteg, htabmask; 120 u32 page, hash, pteg, htabmask;
119 hva_t r; 121 hva_t r;
120 122
@@ -132,7 +134,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
132 kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, 134 kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
133 sr_vsid(sre)); 135 sr_vsid(sre));
134 136
135 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); 137 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
136 if (kvm_is_error_hva(r)) 138 if (kvm_is_error_hva(r))
137 return r; 139 return r;
138 return r | (pteg & ~PAGE_MASK); 140 return r | (pteg & ~PAGE_MASK);
@@ -145,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
145} 147}
146 148
147static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 149static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
148 struct kvmppc_pte *pte, bool data) 150 struct kvmppc_pte *pte, bool data,
151 bool iswrite)
149{ 152{
150 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 153 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
151 struct kvmppc_bat *bat; 154 struct kvmppc_bat *bat;
@@ -186,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
186 printk(KERN_INFO "BAT is not readable!\n"); 189 printk(KERN_INFO "BAT is not readable!\n");
187 continue; 190 continue;
188 } 191 }
189 if (!pte->may_write) { 192 if (iswrite && !pte->may_write) {
190 /* let's treat r/o BATs as not-readable for now */
191 dprintk_pte("BAT is read-only!\n"); 193 dprintk_pte("BAT is read-only!\n");
192 continue; 194 continue;
193 } 195 }
@@ -201,9 +203,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
201 203
202static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, 204static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
203 struct kvmppc_pte *pte, bool data, 205 struct kvmppc_pte *pte, bool data,
204 bool primary) 206 bool iswrite, bool primary)
205{ 207{
206 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
207 u32 sre; 208 u32 sre;
208 hva_t ptegp; 209 hva_t ptegp;
209 u32 pteg[16]; 210 u32 pteg[16];
@@ -218,7 +219,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
218 219
219 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); 220 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
220 221
221 ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary); 222 ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
222 if (kvm_is_error_hva(ptegp)) { 223 if (kvm_is_error_hva(ptegp)) {
223 printk(KERN_INFO "KVM: Invalid PTEG!\n"); 224 printk(KERN_INFO "KVM: Invalid PTEG!\n");
224 goto no_page_found; 225 goto no_page_found;
@@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
258 break; 259 break;
259 } 260 }
260 261
261 if ( !pte->may_read )
262 continue;
263
264 dprintk_pte("MMU: Found PTE -> %x %x - %x\n", 262 dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
265 pteg[i], pteg[i+1], pp); 263 pteg[i], pteg[i+1], pp);
266 found = 1; 264 found = 1;
@@ -271,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
271 /* Update PTE C and A bits, so the guest's swapper knows we used the 269 /* Update PTE C and A bits, so the guest's swapper knows we used the
272 page */ 270 page */
273 if (found) { 271 if (found) {
274 u32 oldpte = pteg[i+1]; 272 u32 pte_r = pteg[i+1];
275 273 char __user *addr = (char __user *) &pteg[i+1];
276 if (pte->may_read) 274
277 pteg[i+1] |= PTEG_FLAG_ACCESSED; 275 /*
278 if (pte->may_write) 276 * Use single-byte writes to update the HPTE, to
279 pteg[i+1] |= PTEG_FLAG_DIRTY; 277 * conform to what real hardware does.
280 else 278 */
281 dprintk_pte("KVM: Mapping read-only page!\n"); 279 if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
282 280 pte_r |= PTEG_FLAG_ACCESSED;
283 /* Write back into the PTEG */ 281 put_user(pte_r >> 8, addr + 2);
284 if (pteg[i+1] != oldpte) 282 }
285 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); 283 if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
286 284 pte_r |= PTEG_FLAG_DIRTY;
285 put_user(pte_r, addr + 3);
286 }
287 if (!pte->may_read || (iswrite && !pte->may_write))
288 return -EPERM;
287 return 0; 289 return 0;
288 } 290 }
289 291
@@ -302,12 +304,14 @@ no_page_found:
302} 304}
303 305
304static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 306static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
305 struct kvmppc_pte *pte, bool data) 307 struct kvmppc_pte *pte, bool data,
308 bool iswrite)
306{ 309{
307 int r; 310 int r;
308 ulong mp_ea = vcpu->arch.magic_page_ea; 311 ulong mp_ea = vcpu->arch.magic_page_ea;
309 312
310 pte->eaddr = eaddr; 313 pte->eaddr = eaddr;
314 pte->page_size = MMU_PAGE_4K;
311 315
312 /* Magic page override */ 316 /* Magic page override */
313 if (unlikely(mp_ea) && 317 if (unlikely(mp_ea) &&
@@ -323,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
323 return 0; 327 return 0;
324 } 328 }
325 329
326 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); 330 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
327 if (r < 0) 331 if (r < 0)
328 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); 332 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
333 data, iswrite, true);
329 if (r < 0) 334 if (r < 0)
330 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); 335 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
336 data, iswrite, false);
331 337
332 return r; 338 return r;
333} 339}
@@ -347,7 +353,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
347 353
348static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) 354static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
349{ 355{
350 kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); 356 int i;
357 struct kvm_vcpu *v;
358
359 /* flush this VA on all cpus */
360 kvm_for_each_vcpu(i, v, vcpu->kvm)
361 kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
351} 362}
352 363
353static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 364static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 00e619bf608e..3a0abd2e5a15 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
138 138
139extern char etext[]; 139extern char etext[];
140 140
141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
142 bool iswrite)
142{ 143{
143 pfn_t hpaddr; 144 pfn_t hpaddr;
144 u64 vpn; 145 u64 vpn;
@@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
152 bool evict = false; 153 bool evict = false;
153 struct hpte_cache *pte; 154 struct hpte_cache *pte;
154 int r = 0; 155 int r = 0;
156 bool writable;
155 157
156 /* Get host physical address for gpa */ 158 /* Get host physical address for gpa */
157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 159 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
160 iswrite, &writable);
158 if (is_error_noslot_pfn(hpaddr)) { 161 if (is_error_noslot_pfn(hpaddr)) {
159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 162 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
160 orig_pte->eaddr); 163 orig_pte->eaddr);
@@ -204,7 +207,7 @@ next_pteg:
204 (primary ? 0 : PTE_SEC); 207 (primary ? 0 : PTE_SEC);
205 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; 208 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
206 209
207 if (orig_pte->may_write) { 210 if (orig_pte->may_write && writable) {
208 pteg1 |= PP_RWRW; 211 pteg1 |= PP_RWRW;
209 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 212 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
210 } else { 213 } else {
@@ -259,6 +262,11 @@ out:
259 return r; 262 return r;
260} 263}
261 264
265void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
266{
267 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
268}
269
262static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 270static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
263{ 271{
264 struct kvmppc_sid_map *map; 272 struct kvmppc_sid_map *map;
@@ -341,7 +349,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
341 svcpu_put(svcpu); 349 svcpu_put(svcpu);
342} 350}
343 351
344void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 352void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
345{ 353{
346 int i; 354 int i;
347 355
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 7e345e00661a..83da1f868fd5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -107,9 +107,20 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
107 return kvmppc_slb_calc_vpn(slb, eaddr); 107 return kvmppc_slb_calc_vpn(slb, eaddr);
108} 108}
109 109
110static int mmu_pagesize(int mmu_pg)
111{
112 switch (mmu_pg) {
113 case MMU_PAGE_64K:
114 return 16;
115 case MMU_PAGE_16M:
116 return 24;
117 }
118 return 12;
119}
120
110static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) 121static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
111{ 122{
112 return slbe->large ? 24 : 12; 123 return mmu_pagesize(slbe->base_page_size);
113} 124}
114 125
115static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) 126static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
@@ -119,11 +130,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
119 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); 130 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
120} 131}
121 132
122static hva_t kvmppc_mmu_book3s_64_get_pteg( 133static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
123 struct kvmppc_vcpu_book3s *vcpu_book3s,
124 struct kvmppc_slb *slbe, gva_t eaddr, 134 struct kvmppc_slb *slbe, gva_t eaddr,
125 bool second) 135 bool second)
126{ 136{
137 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
127 u64 hash, pteg, htabsize; 138 u64 hash, pteg, htabsize;
128 u32 ssize; 139 u32 ssize;
129 hva_t r; 140 hva_t r;
@@ -148,10 +159,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(
148 159
149 /* When running a PAPR guest, SDR1 contains a HVA address instead 160 /* When running a PAPR guest, SDR1 contains a HVA address instead
150 of a GPA */ 161 of a GPA */
151 if (vcpu_book3s->vcpu.arch.papr_enabled) 162 if (vcpu->arch.papr_enabled)
152 r = pteg; 163 r = pteg;
153 else 164 else
154 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); 165 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
155 166
156 if (kvm_is_error_hva(r)) 167 if (kvm_is_error_hva(r))
157 return r; 168 return r;
@@ -166,18 +177,38 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
166 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); 177 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
167 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); 178 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
168 179
169 if (p < 24) 180 if (p < 16)
170 avpn >>= ((80 - p) - 56) - 8; 181 avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
171 else 182 else
172 avpn <<= 8; 183 avpn <<= p - 16;
173 184
174 return avpn; 185 return avpn;
175} 186}
176 187
188/*
189 * Return page size encoded in the second word of a HPTE, or
190 * -1 for an invalid encoding for the base page size indicated by
191 * the SLB entry. This doesn't handle mixed pagesize segments yet.
192 */
193static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
194{
195 switch (slbe->base_page_size) {
196 case MMU_PAGE_64K:
197 if ((r & 0xf000) == 0x1000)
198 return MMU_PAGE_64K;
199 break;
200 case MMU_PAGE_16M:
201 if ((r & 0xff000) == 0)
202 return MMU_PAGE_16M;
203 break;
204 }
205 return -1;
206}
207
177static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 208static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
178 struct kvmppc_pte *gpte, bool data) 209 struct kvmppc_pte *gpte, bool data,
210 bool iswrite)
179{ 211{
180 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
181 struct kvmppc_slb *slbe; 212 struct kvmppc_slb *slbe;
182 hva_t ptegp; 213 hva_t ptegp;
183 u64 pteg[16]; 214 u64 pteg[16];
@@ -189,6 +220,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
189 u8 pp, key = 0; 220 u8 pp, key = 0;
190 bool found = false; 221 bool found = false;
191 bool second = false; 222 bool second = false;
223 int pgsize;
192 ulong mp_ea = vcpu->arch.magic_page_ea; 224 ulong mp_ea = vcpu->arch.magic_page_ea;
193 225
194 /* Magic page override */ 226 /* Magic page override */
@@ -202,6 +234,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
202 gpte->may_execute = true; 234 gpte->may_execute = true;
203 gpte->may_read = true; 235 gpte->may_read = true;
204 gpte->may_write = true; 236 gpte->may_write = true;
237 gpte->page_size = MMU_PAGE_4K;
205 238
206 return 0; 239 return 0;
207 } 240 }
@@ -222,8 +255,12 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
222 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | 255 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
223 HPTE_V_SECONDARY; 256 HPTE_V_SECONDARY;
224 257
258 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
259
260 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
261
225do_second: 262do_second:
226 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); 263 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
227 if (kvm_is_error_hva(ptegp)) 264 if (kvm_is_error_hva(ptegp))
228 goto no_page_found; 265 goto no_page_found;
229 266
@@ -240,6 +277,13 @@ do_second:
240 for (i=0; i<16; i+=2) { 277 for (i=0; i<16; i+=2) {
241 /* Check all relevant fields of 1st dword */ 278 /* Check all relevant fields of 1st dword */
242 if ((pteg[i] & v_mask) == v_val) { 279 if ((pteg[i] & v_mask) == v_val) {
280 /* If large page bit is set, check pgsize encoding */
281 if (slbe->large &&
282 (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
283 pgsize = decode_pagesize(slbe, pteg[i+1]);
284 if (pgsize < 0)
285 continue;
286 }
243 found = true; 287 found = true;
244 break; 288 break;
245 } 289 }
@@ -256,13 +300,15 @@ do_second:
256 v = pteg[i]; 300 v = pteg[i];
257 r = pteg[i+1]; 301 r = pteg[i+1];
258 pp = (r & HPTE_R_PP) | key; 302 pp = (r & HPTE_R_PP) | key;
259 eaddr_mask = 0xFFF; 303 if (r & HPTE_R_PP0)
304 pp |= 8;
260 305
261 gpte->eaddr = eaddr; 306 gpte->eaddr = eaddr;
262 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); 307 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
263 if (slbe->large) 308
264 eaddr_mask = 0xFFFFFF; 309 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
265 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); 310 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
311 gpte->page_size = pgsize;
266 gpte->may_execute = ((r & HPTE_R_N) ? false : true); 312 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
267 gpte->may_read = false; 313 gpte->may_read = false;
268 gpte->may_write = false; 314 gpte->may_write = false;
@@ -277,6 +323,7 @@ do_second:
277 case 3: 323 case 3:
278 case 5: 324 case 5:
279 case 7: 325 case 7:
326 case 10:
280 gpte->may_read = true; 327 gpte->may_read = true;
281 break; 328 break;
282 } 329 }
@@ -287,30 +334,37 @@ do_second:
287 334
288 /* Update PTE R and C bits, so the guest's swapper knows we used the 335 /* Update PTE R and C bits, so the guest's swapper knows we used the
289 * page */ 336 * page */
290 if (gpte->may_read) { 337 if (gpte->may_read && !(r & HPTE_R_R)) {
291 /* Set the accessed flag */ 338 /*
339 * Set the accessed flag.
340 * We have to write this back with a single byte write
341 * because another vcpu may be accessing this on
342 * non-PAPR platforms such as mac99, and this is
343 * what real hardware does.
344 */
345 char __user *addr = (char __user *) &pteg[i+1];
292 r |= HPTE_R_R; 346 r |= HPTE_R_R;
347 put_user(r >> 8, addr + 6);
293 } 348 }
294 if (data && gpte->may_write) { 349 if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
295 /* Set the dirty flag -- XXX even if not writing */ 350 /* Set the dirty flag */
351 /* Use a single byte write */
352 char __user *addr = (char __user *) &pteg[i+1];
296 r |= HPTE_R_C; 353 r |= HPTE_R_C;
354 put_user(r, addr + 7);
297 } 355 }
298 356
299 /* Write back into the PTEG */ 357 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
300 if (pteg[i+1] != r) {
301 pteg[i+1] = r;
302 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
303 }
304 358
305 if (!gpte->may_read) 359 if (!gpte->may_read || (iswrite && !gpte->may_write))
306 return -EPERM; 360 return -EPERM;
307 return 0; 361 return 0;
308 362
309no_page_found: 363no_page_found:
364 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
310 return -ENOENT; 365 return -ENOENT;
311 366
312no_seg_found: 367no_seg_found:
313
314 dprintk("KVM MMU: Trigger segment fault\n"); 368 dprintk("KVM MMU: Trigger segment fault\n");
315 return -EINVAL; 369 return -EINVAL;
316} 370}
@@ -345,6 +399,21 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
345 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; 399 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
346 slbe->class = (rs & SLB_VSID_C) ? 1 : 0; 400 slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
347 401
402 slbe->base_page_size = MMU_PAGE_4K;
403 if (slbe->large) {
404 if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
405 switch (rs & SLB_VSID_LP) {
406 case SLB_VSID_LP_00:
407 slbe->base_page_size = MMU_PAGE_16M;
408 break;
409 case SLB_VSID_LP_01:
410 slbe->base_page_size = MMU_PAGE_64K;
411 break;
412 }
413 } else
414 slbe->base_page_size = MMU_PAGE_16M;
415 }
416
348 slbe->orige = rb & (ESID_MASK | SLB_ESID_V); 417 slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
349 slbe->origv = rs; 418 slbe->origv = rs;
350 419
@@ -460,14 +529,45 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
460 bool large) 529 bool large)
461{ 530{
462 u64 mask = 0xFFFFFFFFFULL; 531 u64 mask = 0xFFFFFFFFFULL;
532 long i;
533 struct kvm_vcpu *v;
463 534
464 dprintk("KVM MMU: tlbie(0x%lx)\n", va); 535 dprintk("KVM MMU: tlbie(0x%lx)\n", va);
465 536
466 if (large) 537 /*
467 mask = 0xFFFFFF000ULL; 538 * The tlbie instruction changed behaviour starting with
468 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); 539 * POWER6. POWER6 and later don't have the large page flag
540 * in the instruction but in the RB value, along with bits
541 * indicating page and segment sizes.
542 */
543 if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
544 /* POWER6 or later */
545 if (va & 1) { /* L bit */
546 if ((va & 0xf000) == 0x1000)
547 mask = 0xFFFFFFFF0ULL; /* 64k page */
548 else
549 mask = 0xFFFFFF000ULL; /* 16M page */
550 }
551 } else {
552 /* older processors, e.g. PPC970 */
553 if (large)
554 mask = 0xFFFFFF000ULL;
555 }
556 /* flush this VA on all vcpus */
557 kvm_for_each_vcpu(i, v, vcpu->kvm)
558 kvmppc_mmu_pte_vflush(v, va >> 12, mask);
469} 559}
470 560
561#ifdef CONFIG_PPC_64K_PAGES
562static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
563{
564 ulong mp_ea = vcpu->arch.magic_page_ea;
565
566 return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
567 (mp_ea >> SID_SHIFT) == esid;
568}
569#endif
570
471static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 571static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
472 u64 *vsid) 572 u64 *vsid)
473{ 573{
@@ -475,11 +575,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
475 struct kvmppc_slb *slb; 575 struct kvmppc_slb *slb;
476 u64 gvsid = esid; 576 u64 gvsid = esid;
477 ulong mp_ea = vcpu->arch.magic_page_ea; 577 ulong mp_ea = vcpu->arch.magic_page_ea;
578 int pagesize = MMU_PAGE_64K;
478 579
479 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 580 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
480 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 581 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
481 if (slb) { 582 if (slb) {
482 gvsid = slb->vsid; 583 gvsid = slb->vsid;
584 pagesize = slb->base_page_size;
483 if (slb->tb) { 585 if (slb->tb) {
484 gvsid <<= SID_SHIFT_1T - SID_SHIFT; 586 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
485 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); 587 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
@@ -490,28 +592,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
490 592
491 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 593 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
492 case 0: 594 case 0:
493 *vsid = VSID_REAL | esid; 595 gvsid = VSID_REAL | esid;
494 break; 596 break;
495 case MSR_IR: 597 case MSR_IR:
496 *vsid = VSID_REAL_IR | gvsid; 598 gvsid |= VSID_REAL_IR;
497 break; 599 break;
498 case MSR_DR: 600 case MSR_DR:
499 *vsid = VSID_REAL_DR | gvsid; 601 gvsid |= VSID_REAL_DR;
500 break; 602 break;
501 case MSR_DR|MSR_IR: 603 case MSR_DR|MSR_IR:
502 if (!slb) 604 if (!slb)
503 goto no_slb; 605 goto no_slb;
504 606
505 *vsid = gvsid;
506 break; 607 break;
507 default: 608 default:
508 BUG(); 609 BUG();
509 break; 610 break;
510 } 611 }
511 612
613#ifdef CONFIG_PPC_64K_PAGES
614 /*
615 * Mark this as a 64k segment if the host is using
616 * 64k pages, the host MMU supports 64k pages and
617 * the guest segment page size is >= 64k,
618 * but not if this segment contains the magic page.
619 */
620 if (pagesize >= MMU_PAGE_64K &&
621 mmu_psize_defs[MMU_PAGE_64K].shift &&
622 !segment_contains_magic_page(vcpu, esid))
623 gvsid |= VSID_64K;
624#endif
625
512 if (vcpu->arch.shared->msr & MSR_PR) 626 if (vcpu->arch.shared->msr & MSR_PR)
513 *vsid |= VSID_PR; 627 gvsid |= VSID_PR;
514 628
629 *vsid = gvsid;
515 return 0; 630 return 0;
516 631
517no_slb: 632no_slb:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index e5240524bf6c..0d513af62bba 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -27,14 +27,14 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29#include <asm/hw_irq.h> 29#include <asm/hw_irq.h>
30#include "trace.h" 30#include "trace_pr.h"
31 31
32#define PTE_SIZE 12 32#define PTE_SIZE 12
33 33
34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35{ 35{
36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, 36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
37 MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M, 37 pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
38 false); 38 false);
39} 39}
40 40
@@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
78 return NULL; 78 return NULL;
79} 79}
80 80
81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
82 bool iswrite)
82{ 83{
83 unsigned long vpn; 84 unsigned long vpn;
84 pfn_t hpaddr; 85 pfn_t hpaddr;
@@ -90,16 +91,26 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
90 int attempt = 0; 91 int attempt = 0;
91 struct kvmppc_sid_map *map; 92 struct kvmppc_sid_map *map;
92 int r = 0; 93 int r = 0;
94 int hpsize = MMU_PAGE_4K;
95 bool writable;
96 unsigned long mmu_seq;
97 struct kvm *kvm = vcpu->kvm;
98 struct hpte_cache *cpte;
99 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
100 unsigned long pfn;
101
102 /* used to check for invalidations in progress */
103 mmu_seq = kvm->mmu_notifier_seq;
104 smp_rmb();
93 105
94 /* Get host physical address for gpa */ 106 /* Get host physical address for gpa */
95 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 107 pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
96 if (is_error_noslot_pfn(hpaddr)) { 108 if (is_error_noslot_pfn(pfn)) {
97 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 109 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
98 r = -EINVAL; 110 r = -EINVAL;
99 goto out; 111 goto out;
100 } 112 }
101 hpaddr <<= PAGE_SHIFT; 113 hpaddr = pfn << PAGE_SHIFT;
102 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
103 114
104 /* and write the mapping ea -> hpa into the pt */ 115 /* and write the mapping ea -> hpa into the pt */
105 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 116 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -117,20 +128,39 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
117 goto out; 128 goto out;
118 } 129 }
119 130
120 vsid = map->host_vsid; 131 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
121 vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
122 132
123 if (!orig_pte->may_write) 133 kvm_set_pfn_accessed(pfn);
124 rflags |= HPTE_R_PP; 134 if (!orig_pte->may_write || !writable)
125 else 135 rflags |= PP_RXRX;
126 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 136 else {
137 mark_page_dirty(vcpu->kvm, gfn);
138 kvm_set_pfn_dirty(pfn);
139 }
127 140
128 if (!orig_pte->may_execute) 141 if (!orig_pte->may_execute)
129 rflags |= HPTE_R_N; 142 rflags |= HPTE_R_N;
130 else 143 else
131 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 144 kvmppc_mmu_flush_icache(pfn);
145
146 /*
147 * Use 64K pages if possible; otherwise, on 64K page kernels,
148 * we need to transfer 4 more bits from guest real to host real addr.
149 */
150 if (vsid & VSID_64K)
151 hpsize = MMU_PAGE_64K;
152 else
153 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
154
155 hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
132 156
133 hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M); 157 cpte = kvmppc_mmu_hpte_cache_next(vcpu);
158
159 spin_lock(&kvm->mmu_lock);
160 if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
161 r = -EAGAIN;
162 goto out_unlock;
163 }
134 164
135map_again: 165map_again:
136 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 166 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -139,11 +169,11 @@ map_again:
139 if (attempt > 1) 169 if (attempt > 1)
140 if (ppc_md.hpte_remove(hpteg) < 0) { 170 if (ppc_md.hpte_remove(hpteg) < 0) {
141 r = -1; 171 r = -1;
142 goto out; 172 goto out_unlock;
143 } 173 }
144 174
145 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, 175 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146 MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M); 176 hpsize, hpsize, MMU_SEGSIZE_256M);
147 177
148 if (ret < 0) { 178 if (ret < 0) {
149 /* If we couldn't map a primary PTE, try a secondary */ 179 /* If we couldn't map a primary PTE, try a secondary */
@@ -152,8 +182,6 @@ map_again:
152 attempt++; 182 attempt++;
153 goto map_again; 183 goto map_again;
154 } else { 184 } else {
155 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
156
157 trace_kvm_book3s_64_mmu_map(rflags, hpteg, 185 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
158 vpn, hpaddr, orig_pte); 186 vpn, hpaddr, orig_pte);
159 187
@@ -164,19 +192,37 @@ map_again:
164 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 192 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
165 } 193 }
166 194
167 pte->slot = hpteg + (ret & 7); 195 cpte->slot = hpteg + (ret & 7);
168 pte->host_vpn = vpn; 196 cpte->host_vpn = vpn;
169 pte->pte = *orig_pte; 197 cpte->pte = *orig_pte;
170 pte->pfn = hpaddr >> PAGE_SHIFT; 198 cpte->pfn = pfn;
199 cpte->pagesize = hpsize;
171 200
172 kvmppc_mmu_hpte_cache_map(vcpu, pte); 201 kvmppc_mmu_hpte_cache_map(vcpu, cpte);
202 cpte = NULL;
173 } 203 }
174 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); 204
205out_unlock:
206 spin_unlock(&kvm->mmu_lock);
207 kvm_release_pfn_clean(pfn);
208 if (cpte)
209 kvmppc_mmu_hpte_cache_free(cpte);
175 210
176out: 211out:
177 return r; 212 return r;
178} 213}
179 214
215void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
216{
217 u64 mask = 0xfffffffffULL;
218 u64 vsid;
219
220 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
221 if (vsid & VSID_64K)
222 mask = 0xffffffff0ULL;
223 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
224}
225
180static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 226static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
181{ 227{
182 struct kvmppc_sid_map *map; 228 struct kvmppc_sid_map *map;
@@ -291,6 +337,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
291 slb_vsid &= ~SLB_VSID_KP; 337 slb_vsid &= ~SLB_VSID_KP;
292 slb_esid |= slb_index; 338 slb_esid |= slb_index;
293 339
340#ifdef CONFIG_PPC_64K_PAGES
341 /* Set host segment base page size to 64K if possible */
342 if (gvsid & VSID_64K)
343 slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
344#endif
345
294 svcpu->slb[slb_index].esid = slb_esid; 346 svcpu->slb[slb_index].esid = slb_esid;
295 svcpu->slb[slb_index].vsid = slb_vsid; 347 svcpu->slb[slb_index].vsid = slb_vsid;
296 348
@@ -326,7 +378,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
326 svcpu_put(svcpu); 378 svcpu_put(svcpu);
327} 379}
328 380
329void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 381void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
330{ 382{
331 kvmppc_mmu_hpte_destroy(vcpu); 383 kvmppc_mmu_hpte_destroy(vcpu);
332 __destroy_context(to_book3s(vcpu)->context_id[0]); 384 __destroy_context(to_book3s(vcpu)->context_id[0]);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 043eec8461e7..f3ff587a8b7d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -260,10 +260,6 @@ int kvmppc_mmu_hv_init(void)
260 return 0; 260 return 0;
261} 261}
262 262
263void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
264{
265}
266
267static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) 263static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
268{ 264{
269 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); 265 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
@@ -451,7 +447,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
451} 447}
452 448
453static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 449static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
454 struct kvmppc_pte *gpte, bool data) 450 struct kvmppc_pte *gpte, bool data, bool iswrite)
455{ 451{
456 struct kvm *kvm = vcpu->kvm; 452 struct kvm *kvm = vcpu->kvm;
457 struct kvmppc_slb *slbe; 453 struct kvmppc_slb *slbe;
@@ -906,21 +902,22 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
906 return 0; 902 return 0;
907} 903}
908 904
909int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 905int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
910{ 906{
911 if (kvm->arch.using_mmu_notifiers) 907 if (kvm->arch.using_mmu_notifiers)
912 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); 908 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
913 return 0; 909 return 0;
914} 910}
915 911
916int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 912int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
917{ 913{
918 if (kvm->arch.using_mmu_notifiers) 914 if (kvm->arch.using_mmu_notifiers)
919 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); 915 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
920 return 0; 916 return 0;
921} 917}
922 918
923void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 919void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
920 struct kvm_memory_slot *memslot)
924{ 921{
925 unsigned long *rmapp; 922 unsigned long *rmapp;
926 unsigned long gfn; 923 unsigned long gfn;
@@ -994,7 +991,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
994 return ret; 991 return ret;
995} 992}
996 993
997int kvm_age_hva(struct kvm *kvm, unsigned long hva) 994int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
998{ 995{
999 if (!kvm->arch.using_mmu_notifiers) 996 if (!kvm->arch.using_mmu_notifiers)
1000 return 0; 997 return 0;
@@ -1032,14 +1029,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1032 return ret; 1029 return ret;
1033} 1030}
1034 1031
1035int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 1032int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
1036{ 1033{
1037 if (!kvm->arch.using_mmu_notifiers) 1034 if (!kvm->arch.using_mmu_notifiers)
1038 return 0; 1035 return 0;
1039 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); 1036 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
1040} 1037}
1041 1038
1042void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 1039void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
1043{ 1040{
1044 if (!kvm->arch.using_mmu_notifiers) 1041 if (!kvm->arch.using_mmu_notifiers)
1045 return; 1042 return;
@@ -1512,9 +1509,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1512 1509
1513 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 1510 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1514 (VRMA_VSID << SLB_VSID_SHIFT_1T); 1511 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1515 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; 1512 lpcr = senc << (LPCR_VRMASD_SH - 4);
1516 lpcr |= senc << (LPCR_VRMASD_SH - 4); 1513 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
1517 kvm->arch.lpcr = lpcr;
1518 rma_setup = 1; 1514 rma_setup = 1;
1519 } 1515 }
1520 ++i; 1516 ++i;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 30c2f3b134c6..2c25f5412bdb 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
74 /* Didn't find the liobn, punt it to userspace */ 74 /* Didn't find the liobn, punt it to userspace */
75 return H_TOO_HARD; 75 return H_TOO_HARD;
76} 76}
77EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 360ce68c9809..99d40f8977e8 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -86,8 +86,8 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
86 return true; 86 return true;
87} 87}
88 88
89int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 89int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
90 unsigned int inst, int *advance) 90 unsigned int inst, int *advance)
91{ 91{
92 int emulated = EMULATE_DONE; 92 int emulated = EMULATE_DONE;
93 int rt = get_rt(inst); 93 int rt = get_rt(inst);
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
172 vcpu->arch.mmu.tlbie(vcpu, addr, large); 172 vcpu->arch.mmu.tlbie(vcpu, addr, large);
173 break; 173 break;
174 } 174 }
175#ifdef CONFIG_KVM_BOOK3S_64_PR 175#ifdef CONFIG_PPC_BOOK3S_64
176 case OP_31_XOP_FAKE_SC1: 176 case OP_31_XOP_FAKE_SC1:
177 { 177 {
178 /* SC 1 papr hypercalls */ 178 /* SC 1 papr hypercalls */
@@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
267 267
268 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 268 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
269 if ((r == -ENOENT) || (r == -EPERM)) { 269 if ((r == -ENOENT) || (r == -EPERM)) {
270 struct kvmppc_book3s_shadow_vcpu *svcpu;
271
272 svcpu = svcpu_get(vcpu);
273 *advance = 0; 270 *advance = 0;
274 vcpu->arch.shared->dar = vaddr; 271 vcpu->arch.shared->dar = vaddr;
275 svcpu->fault_dar = vaddr; 272 vcpu->arch.fault_dar = vaddr;
276 273
277 dsisr = DSISR_ISSTORE; 274 dsisr = DSISR_ISSTORE;
278 if (r == -ENOENT) 275 if (r == -ENOENT)
@@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
281 dsisr |= DSISR_PROTFAULT; 278 dsisr |= DSISR_PROTFAULT;
282 279
283 vcpu->arch.shared->dsisr = dsisr; 280 vcpu->arch.shared->dsisr = dsisr;
284 svcpu->fault_dsisr = dsisr; 281 vcpu->arch.fault_dsisr = dsisr;
285 svcpu_put(svcpu);
286 282
287 kvmppc_book3s_queue_irqprio(vcpu, 283 kvmppc_book3s_queue_irqprio(vcpu,
288 BOOK3S_INTERRUPT_DATA_STORAGE); 284 BOOK3S_INTERRUPT_DATA_STORAGE);
@@ -349,7 +345,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
349 return bat; 345 return bat;
350} 346}
351 347
352int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 348int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
353{ 349{
354 int emulated = EMULATE_DONE; 350 int emulated = EMULATE_DONE;
355 351
@@ -472,7 +468,7 @@ unprivileged:
472 return emulated; 468 return emulated;
473} 469}
474 470
475int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 471int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
476{ 472{
477 int emulated = EMULATE_DONE; 473 int emulated = EMULATE_DONE;
478 474
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 7057a02f0906..852989a9bad3 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,9 +20,10 @@
20#include <linux/export.h> 20#include <linux/export.h>
21#include <asm/kvm_book3s.h> 21#include <asm/kvm_book3s.h>
22 22
23#ifdef CONFIG_KVM_BOOK3S_64_HV 23#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
24EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); 24EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
25#else 25#endif
26#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
26EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); 27EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
27EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); 28EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
28#ifdef CONFIG_ALTIVEC 29#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 62a2b5ab08ed..072287f1c3bc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -52,6 +52,9 @@
52#include <linux/vmalloc.h> 52#include <linux/vmalloc.h>
53#include <linux/highmem.h> 53#include <linux/highmem.h>
54#include <linux/hugetlb.h> 54#include <linux/hugetlb.h>
55#include <linux/module.h>
56
57#include "book3s.h"
55 58
56/* #define EXIT_DEBUG */ 59/* #define EXIT_DEBUG */
57/* #define EXIT_DEBUG_SIMPLE */ 60/* #define EXIT_DEBUG_SIMPLE */
@@ -66,7 +69,7 @@
66static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 69static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
67static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 70static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
68 71
69void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) 72static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
70{ 73{
71 int me; 74 int me;
72 int cpu = vcpu->cpu; 75 int cpu = vcpu->cpu;
@@ -125,7 +128,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
125 * purely defensive; they should never fail.) 128 * purely defensive; they should never fail.)
126 */ 129 */
127 130
128void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
129{ 132{
130 struct kvmppc_vcore *vc = vcpu->arch.vcore; 133 struct kvmppc_vcore *vc = vcpu->arch.vcore;
131 134
@@ -143,7 +146,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
143 spin_unlock(&vcpu->arch.tbacct_lock); 146 spin_unlock(&vcpu->arch.tbacct_lock);
144} 147}
145 148
146void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 149static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
147{ 150{
148 struct kvmppc_vcore *vc = vcpu->arch.vcore; 151 struct kvmppc_vcore *vc = vcpu->arch.vcore;
149 152
@@ -155,17 +158,46 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
155 spin_unlock(&vcpu->arch.tbacct_lock); 158 spin_unlock(&vcpu->arch.tbacct_lock);
156} 159}
157 160
158void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 161static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
159{ 162{
160 vcpu->arch.shregs.msr = msr; 163 vcpu->arch.shregs.msr = msr;
161 kvmppc_end_cede(vcpu); 164 kvmppc_end_cede(vcpu);
162} 165}
163 166
164void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 167void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
165{ 168{
166 vcpu->arch.pvr = pvr; 169 vcpu->arch.pvr = pvr;
167} 170}
168 171
172int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
173{
174 unsigned long pcr = 0;
175 struct kvmppc_vcore *vc = vcpu->arch.vcore;
176
177 if (arch_compat) {
178 if (!cpu_has_feature(CPU_FTR_ARCH_206))
179 return -EINVAL; /* 970 has no compat mode support */
180
181 switch (arch_compat) {
182 case PVR_ARCH_205:
183 pcr = PCR_ARCH_205;
184 break;
185 case PVR_ARCH_206:
186 case PVR_ARCH_206p:
187 break;
188 default:
189 return -EINVAL;
190 }
191 }
192
193 spin_lock(&vc->lock);
194 vc->arch_compat = arch_compat;
195 vc->pcr = pcr;
196 spin_unlock(&vc->lock);
197
198 return 0;
199}
200
169void kvmppc_dump_regs(struct kvm_vcpu *vcpu) 201void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
170{ 202{
171 int r; 203 int r;
@@ -195,7 +227,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
195 pr_err(" ESID = %.16llx VSID = %.16llx\n", 227 pr_err(" ESID = %.16llx VSID = %.16llx\n",
196 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); 228 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
197 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", 229 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
198 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1, 230 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
199 vcpu->arch.last_inst); 231 vcpu->arch.last_inst);
200} 232}
201 233
@@ -489,7 +521,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
489 memset(dt, 0, sizeof(struct dtl_entry)); 521 memset(dt, 0, sizeof(struct dtl_entry));
490 dt->dispatch_reason = 7; 522 dt->dispatch_reason = 7;
491 dt->processor_id = vc->pcpu + vcpu->arch.ptid; 523 dt->processor_id = vc->pcpu + vcpu->arch.ptid;
492 dt->timebase = now; 524 dt->timebase = now + vc->tb_offset;
493 dt->enqueue_to_dispatch_time = stolen; 525 dt->enqueue_to_dispatch_time = stolen;
494 dt->srr0 = kvmppc_get_pc(vcpu); 526 dt->srr0 = kvmppc_get_pc(vcpu);
495 dt->srr1 = vcpu->arch.shregs.msr; 527 dt->srr1 = vcpu->arch.shregs.msr;
@@ -538,6 +570,15 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
538 } 570 }
539 break; 571 break;
540 case H_CONFER: 572 case H_CONFER:
573 target = kvmppc_get_gpr(vcpu, 4);
574 if (target == -1)
575 break;
576 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
577 if (!tvcpu) {
578 ret = H_PARAMETER;
579 break;
580 }
581 kvm_vcpu_yield_to(tvcpu);
541 break; 582 break;
542 case H_REGISTER_VPA: 583 case H_REGISTER_VPA:
543 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), 584 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
@@ -576,8 +617,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
576 return RESUME_GUEST; 617 return RESUME_GUEST;
577} 618}
578 619
579static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 620static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
580 struct task_struct *tsk) 621 struct task_struct *tsk)
581{ 622{
582 int r = RESUME_HOST; 623 int r = RESUME_HOST;
583 624
@@ -671,16 +712,16 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
671 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 712 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
672 vcpu->arch.trap, kvmppc_get_pc(vcpu), 713 vcpu->arch.trap, kvmppc_get_pc(vcpu),
673 vcpu->arch.shregs.msr); 714 vcpu->arch.shregs.msr);
715 run->hw.hardware_exit_reason = vcpu->arch.trap;
674 r = RESUME_HOST; 716 r = RESUME_HOST;
675 BUG();
676 break; 717 break;
677 } 718 }
678 719
679 return r; 720 return r;
680} 721}
681 722
682int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 723static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
683 struct kvm_sregs *sregs) 724 struct kvm_sregs *sregs)
684{ 725{
685 int i; 726 int i;
686 727
@@ -694,12 +735,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
694 return 0; 735 return 0;
695} 736}
696 737
697int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 738static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
698 struct kvm_sregs *sregs) 739 struct kvm_sregs *sregs)
699{ 740{
700 int i, j; 741 int i, j;
701 742
702 kvmppc_set_pvr(vcpu, sregs->pvr); 743 kvmppc_set_pvr_hv(vcpu, sregs->pvr);
703 744
704 j = 0; 745 j = 0;
705 for (i = 0; i < vcpu->arch.slb_nr; i++) { 746 for (i = 0; i < vcpu->arch.slb_nr; i++) {
@@ -714,7 +755,23 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
714 return 0; 755 return 0;
715} 756}
716 757
717int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 758static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
759{
760 struct kvmppc_vcore *vc = vcpu->arch.vcore;
761 u64 mask;
762
763 spin_lock(&vc->lock);
764 /*
765 * Userspace can only modify DPFD (default prefetch depth),
766 * ILE (interrupt little-endian) and TC (translation control).
767 */
768 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
769 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
770 spin_unlock(&vc->lock);
771}
772
773static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
774 union kvmppc_one_reg *val)
718{ 775{
719 int r = 0; 776 int r = 0;
720 long int i; 777 long int i;
@@ -749,6 +806,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
749 i = id - KVM_REG_PPC_PMC1; 806 i = id - KVM_REG_PPC_PMC1;
750 *val = get_reg_val(id, vcpu->arch.pmc[i]); 807 *val = get_reg_val(id, vcpu->arch.pmc[i]);
751 break; 808 break;
809 case KVM_REG_PPC_SIAR:
810 *val = get_reg_val(id, vcpu->arch.siar);
811 break;
812 case KVM_REG_PPC_SDAR:
813 *val = get_reg_val(id, vcpu->arch.sdar);
814 break;
752#ifdef CONFIG_VSX 815#ifdef CONFIG_VSX
753 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 816 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
754 if (cpu_has_feature(CPU_FTR_VSX)) { 817 if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -787,6 +850,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
787 val->vpaval.length = vcpu->arch.dtl.len; 850 val->vpaval.length = vcpu->arch.dtl.len;
788 spin_unlock(&vcpu->arch.vpa_update_lock); 851 spin_unlock(&vcpu->arch.vpa_update_lock);
789 break; 852 break;
853 case KVM_REG_PPC_TB_OFFSET:
854 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
855 break;
856 case KVM_REG_PPC_LPCR:
857 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
858 break;
859 case KVM_REG_PPC_PPR:
860 *val = get_reg_val(id, vcpu->arch.ppr);
861 break;
862 case KVM_REG_PPC_ARCH_COMPAT:
863 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
864 break;
790 default: 865 default:
791 r = -EINVAL; 866 r = -EINVAL;
792 break; 867 break;
@@ -795,7 +870,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
795 return r; 870 return r;
796} 871}
797 872
798int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 873static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
874 union kvmppc_one_reg *val)
799{ 875{
800 int r = 0; 876 int r = 0;
801 long int i; 877 long int i;
@@ -833,6 +909,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
833 i = id - KVM_REG_PPC_PMC1; 909 i = id - KVM_REG_PPC_PMC1;
834 vcpu->arch.pmc[i] = set_reg_val(id, *val); 910 vcpu->arch.pmc[i] = set_reg_val(id, *val);
835 break; 911 break;
912 case KVM_REG_PPC_SIAR:
913 vcpu->arch.siar = set_reg_val(id, *val);
914 break;
915 case KVM_REG_PPC_SDAR:
916 vcpu->arch.sdar = set_reg_val(id, *val);
917 break;
836#ifdef CONFIG_VSX 918#ifdef CONFIG_VSX
837 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 919 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
838 if (cpu_has_feature(CPU_FTR_VSX)) { 920 if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -880,6 +962,20 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
880 len -= len % sizeof(struct dtl_entry); 962 len -= len % sizeof(struct dtl_entry);
881 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 963 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
882 break; 964 break;
965 case KVM_REG_PPC_TB_OFFSET:
966 /* round up to multiple of 2^24 */
967 vcpu->arch.vcore->tb_offset =
968 ALIGN(set_reg_val(id, *val), 1UL << 24);
969 break;
970 case KVM_REG_PPC_LPCR:
971 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
972 break;
973 case KVM_REG_PPC_PPR:
974 vcpu->arch.ppr = set_reg_val(id, *val);
975 break;
976 case KVM_REG_PPC_ARCH_COMPAT:
977 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
978 break;
883 default: 979 default:
884 r = -EINVAL; 980 r = -EINVAL;
885 break; 981 break;
@@ -888,14 +984,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
888 return r; 984 return r;
889} 985}
890 986
891int kvmppc_core_check_processor_compat(void) 987static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
892{ 988 unsigned int id)
893 if (cpu_has_feature(CPU_FTR_HVMODE))
894 return 0;
895 return -EIO;
896}
897
898struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
899{ 989{
900 struct kvm_vcpu *vcpu; 990 struct kvm_vcpu *vcpu;
901 int err = -EINVAL; 991 int err = -EINVAL;
@@ -919,8 +1009,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
919 vcpu->arch.mmcr[0] = MMCR0_FC; 1009 vcpu->arch.mmcr[0] = MMCR0_FC;
920 vcpu->arch.ctrl = CTRL_RUNLATCH; 1010 vcpu->arch.ctrl = CTRL_RUNLATCH;
921 /* default to host PVR, since we can't spoof it */ 1011 /* default to host PVR, since we can't spoof it */
922 vcpu->arch.pvr = mfspr(SPRN_PVR); 1012 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
923 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
924 spin_lock_init(&vcpu->arch.vpa_update_lock); 1013 spin_lock_init(&vcpu->arch.vpa_update_lock);
925 spin_lock_init(&vcpu->arch.tbacct_lock); 1014 spin_lock_init(&vcpu->arch.tbacct_lock);
926 vcpu->arch.busy_preempt = TB_NIL; 1015 vcpu->arch.busy_preempt = TB_NIL;
@@ -940,6 +1029,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
940 spin_lock_init(&vcore->lock); 1029 spin_lock_init(&vcore->lock);
941 init_waitqueue_head(&vcore->wq); 1030 init_waitqueue_head(&vcore->wq);
942 vcore->preempt_tb = TB_NIL; 1031 vcore->preempt_tb = TB_NIL;
1032 vcore->lpcr = kvm->arch.lpcr;
943 } 1033 }
944 kvm->arch.vcores[core] = vcore; 1034 kvm->arch.vcores[core] = vcore;
945 kvm->arch.online_vcores++; 1035 kvm->arch.online_vcores++;
@@ -972,7 +1062,7 @@ static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
972 vpa->dirty); 1062 vpa->dirty);
973} 1063}
974 1064
975void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 1065static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
976{ 1066{
977 spin_lock(&vcpu->arch.vpa_update_lock); 1067 spin_lock(&vcpu->arch.vpa_update_lock);
978 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); 1068 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
@@ -983,6 +1073,12 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
983 kmem_cache_free(kvm_vcpu_cache, vcpu); 1073 kmem_cache_free(kvm_vcpu_cache, vcpu);
984} 1074}
985 1075
1076static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
1077{
1078 /* Indicate we want to get back into the guest */
1079 return 1;
1080}
1081
986static void kvmppc_set_timer(struct kvm_vcpu *vcpu) 1082static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
987{ 1083{
988 unsigned long dec_nsec, now; 1084 unsigned long dec_nsec, now;
@@ -1264,8 +1360,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1264 1360
1265 ret = RESUME_GUEST; 1361 ret = RESUME_GUEST;
1266 if (vcpu->arch.trap) 1362 if (vcpu->arch.trap)
1267 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu, 1363 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1268 vcpu->arch.run_task); 1364 vcpu->arch.run_task);
1269 1365
1270 vcpu->arch.ret = ret; 1366 vcpu->arch.ret = ret;
1271 vcpu->arch.trap = 0; 1367 vcpu->arch.trap = 0;
@@ -1424,7 +1520,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1424 return vcpu->arch.ret; 1520 return vcpu->arch.ret;
1425} 1521}
1426 1522
1427int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) 1523static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
1428{ 1524{
1429 int r; 1525 int r;
1430 int srcu_idx; 1526 int srcu_idx;
@@ -1546,7 +1642,8 @@ static const struct file_operations kvm_rma_fops = {
1546 .release = kvm_rma_release, 1642 .release = kvm_rma_release,
1547}; 1643};
1548 1644
1549long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) 1645static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
1646 struct kvm_allocate_rma *ret)
1550{ 1647{
1551 long fd; 1648 long fd;
1552 struct kvm_rma_info *ri; 1649 struct kvm_rma_info *ri;
@@ -1592,7 +1689,8 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
1592 (*sps)++; 1689 (*sps)++;
1593} 1690}
1594 1691
1595int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) 1692static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
1693 struct kvm_ppc_smmu_info *info)
1596{ 1694{
1597 struct kvm_ppc_one_seg_page_size *sps; 1695 struct kvm_ppc_one_seg_page_size *sps;
1598 1696
@@ -1613,7 +1711,8 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1613/* 1711/*
1614 * Get (and clear) the dirty memory log for a memory slot. 1712 * Get (and clear) the dirty memory log for a memory slot.
1615 */ 1713 */
1616int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 1714static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
1715 struct kvm_dirty_log *log)
1617{ 1716{
1618 struct kvm_memory_slot *memslot; 1717 struct kvm_memory_slot *memslot;
1619 int r; 1718 int r;
@@ -1667,8 +1766,8 @@ static void unpin_slot(struct kvm_memory_slot *memslot)
1667 } 1766 }
1668} 1767}
1669 1768
1670void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 1769static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
1671 struct kvm_memory_slot *dont) 1770 struct kvm_memory_slot *dont)
1672{ 1771{
1673 if (!dont || free->arch.rmap != dont->arch.rmap) { 1772 if (!dont || free->arch.rmap != dont->arch.rmap) {
1674 vfree(free->arch.rmap); 1773 vfree(free->arch.rmap);
@@ -1681,8 +1780,8 @@ void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1681 } 1780 }
1682} 1781}
1683 1782
1684int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 1783static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
1685 unsigned long npages) 1784 unsigned long npages)
1686{ 1785{
1687 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); 1786 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
1688 if (!slot->arch.rmap) 1787 if (!slot->arch.rmap)
@@ -1692,9 +1791,9 @@ int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1692 return 0; 1791 return 0;
1693} 1792}
1694 1793
1695int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1794static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
1696 struct kvm_memory_slot *memslot, 1795 struct kvm_memory_slot *memslot,
1697 struct kvm_userspace_memory_region *mem) 1796 struct kvm_userspace_memory_region *mem)
1698{ 1797{
1699 unsigned long *phys; 1798 unsigned long *phys;
1700 1799
@@ -1710,9 +1809,9 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1710 return 0; 1809 return 0;
1711} 1810}
1712 1811
1713void kvmppc_core_commit_memory_region(struct kvm *kvm, 1812static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
1714 struct kvm_userspace_memory_region *mem, 1813 struct kvm_userspace_memory_region *mem,
1715 const struct kvm_memory_slot *old) 1814 const struct kvm_memory_slot *old)
1716{ 1815{
1717 unsigned long npages = mem->memory_size >> PAGE_SHIFT; 1816 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
1718 struct kvm_memory_slot *memslot; 1817 struct kvm_memory_slot *memslot;
@@ -1729,6 +1828,37 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
1729 } 1828 }
1730} 1829}
1731 1830
1831/*
1832 * Update LPCR values in kvm->arch and in vcores.
1833 * Caller must hold kvm->lock.
1834 */
1835void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
1836{
1837 long int i;
1838 u32 cores_done = 0;
1839
1840 if ((kvm->arch.lpcr & mask) == lpcr)
1841 return;
1842
1843 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
1844
1845 for (i = 0; i < KVM_MAX_VCORES; ++i) {
1846 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
1847 if (!vc)
1848 continue;
1849 spin_lock(&vc->lock);
1850 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
1851 spin_unlock(&vc->lock);
1852 if (++cores_done >= kvm->arch.online_vcores)
1853 break;
1854 }
1855}
1856
1857static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
1858{
1859 return;
1860}
1861
1732static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 1862static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1733{ 1863{
1734 int err = 0; 1864 int err = 0;
@@ -1737,7 +1867,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1737 unsigned long hva; 1867 unsigned long hva;
1738 struct kvm_memory_slot *memslot; 1868 struct kvm_memory_slot *memslot;
1739 struct vm_area_struct *vma; 1869 struct vm_area_struct *vma;
1740 unsigned long lpcr, senc; 1870 unsigned long lpcr = 0, senc;
1871 unsigned long lpcr_mask = 0;
1741 unsigned long psize, porder; 1872 unsigned long psize, porder;
1742 unsigned long rma_size; 1873 unsigned long rma_size;
1743 unsigned long rmls; 1874 unsigned long rmls;
@@ -1802,9 +1933,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1802 senc = slb_pgsize_encoding(psize); 1933 senc = slb_pgsize_encoding(psize);
1803 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 1934 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1804 (VRMA_VSID << SLB_VSID_SHIFT_1T); 1935 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1805 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; 1936 lpcr_mask = LPCR_VRMASD;
1806 lpcr |= senc << (LPCR_VRMASD_SH - 4); 1937 /* the -4 is to account for senc values starting at 0x10 */
1807 kvm->arch.lpcr = lpcr; 1938 lpcr = senc << (LPCR_VRMASD_SH - 4);
1808 1939
1809 /* Create HPTEs in the hash page table for the VRMA */ 1940 /* Create HPTEs in the hash page table for the VRMA */
1810 kvmppc_map_vrma(vcpu, memslot, porder); 1941 kvmppc_map_vrma(vcpu, memslot, porder);
@@ -1825,23 +1956,21 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1825 kvm->arch.rma = ri; 1956 kvm->arch.rma = ri;
1826 1957
1827 /* Update LPCR and RMOR */ 1958 /* Update LPCR and RMOR */
1828 lpcr = kvm->arch.lpcr;
1829 if (cpu_has_feature(CPU_FTR_ARCH_201)) { 1959 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1830 /* PPC970; insert RMLS value (split field) in HID4 */ 1960 /* PPC970; insert RMLS value (split field) in HID4 */
1831 lpcr &= ~((1ul << HID4_RMLS0_SH) | 1961 lpcr_mask = (1ul << HID4_RMLS0_SH) |
1832 (3ul << HID4_RMLS2_SH)); 1962 (3ul << HID4_RMLS2_SH) | HID4_RMOR;
1833 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) | 1963 lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
1834 ((rmls & 3) << HID4_RMLS2_SH); 1964 ((rmls & 3) << HID4_RMLS2_SH);
1835 /* RMOR is also in HID4 */ 1965 /* RMOR is also in HID4 */
1836 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) 1966 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1837 << HID4_RMOR_SH; 1967 << HID4_RMOR_SH;
1838 } else { 1968 } else {
1839 /* POWER7 */ 1969 /* POWER7 */
1840 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); 1970 lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
1841 lpcr |= rmls << LPCR_RMLS_SH; 1971 lpcr = rmls << LPCR_RMLS_SH;
1842 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; 1972 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
1843 } 1973 }
1844 kvm->arch.lpcr = lpcr;
1845 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", 1974 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1846 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1975 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1847 1976
@@ -1860,6 +1989,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1860 } 1989 }
1861 } 1990 }
1862 1991
1992 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
1993
1863 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ 1994 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1864 smp_wmb(); 1995 smp_wmb();
1865 kvm->arch.rma_setup_done = 1; 1996 kvm->arch.rma_setup_done = 1;
@@ -1875,7 +2006,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1875 goto out_srcu; 2006 goto out_srcu;
1876} 2007}
1877 2008
1878int kvmppc_core_init_vm(struct kvm *kvm) 2009static int kvmppc_core_init_vm_hv(struct kvm *kvm)
1879{ 2010{
1880 unsigned long lpcr, lpid; 2011 unsigned long lpcr, lpid;
1881 2012
@@ -1893,9 +2024,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1893 */ 2024 */
1894 cpumask_setall(&kvm->arch.need_tlb_flush); 2025 cpumask_setall(&kvm->arch.need_tlb_flush);
1895 2026
1896 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1897 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
1898
1899 kvm->arch.rma = NULL; 2027 kvm->arch.rma = NULL;
1900 2028
1901 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 2029 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -1931,61 +2059,162 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1931 return 0; 2059 return 0;
1932} 2060}
1933 2061
1934void kvmppc_core_destroy_vm(struct kvm *kvm) 2062static void kvmppc_free_vcores(struct kvm *kvm)
2063{
2064 long int i;
2065
2066 for (i = 0; i < KVM_MAX_VCORES; ++i)
2067 kfree(kvm->arch.vcores[i]);
2068 kvm->arch.online_vcores = 0;
2069}
2070
2071static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
1935{ 2072{
1936 uninhibit_secondary_onlining(); 2073 uninhibit_secondary_onlining();
1937 2074
2075 kvmppc_free_vcores(kvm);
1938 if (kvm->arch.rma) { 2076 if (kvm->arch.rma) {
1939 kvm_release_rma(kvm->arch.rma); 2077 kvm_release_rma(kvm->arch.rma);
1940 kvm->arch.rma = NULL; 2078 kvm->arch.rma = NULL;
1941 } 2079 }
1942 2080
1943 kvmppc_rtas_tokens_free(kvm);
1944
1945 kvmppc_free_hpt(kvm); 2081 kvmppc_free_hpt(kvm);
1946 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1947} 2082}
1948 2083
1949/* These are stubs for now */ 2084/* We don't need to emulate any privileged instructions or dcbz */
1950void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 2085static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
2086 unsigned int inst, int *advance)
1951{ 2087{
2088 return EMULATE_FAIL;
1952} 2089}
1953 2090
1954/* We don't need to emulate any privileged instructions or dcbz */ 2091static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
1955int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 2092 ulong spr_val)
1956 unsigned int inst, int *advance)
1957{ 2093{
1958 return EMULATE_FAIL; 2094 return EMULATE_FAIL;
1959} 2095}
1960 2096
1961int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 2097static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
2098 ulong *spr_val)
1962{ 2099{
1963 return EMULATE_FAIL; 2100 return EMULATE_FAIL;
1964} 2101}
1965 2102
1966int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 2103static int kvmppc_core_check_processor_compat_hv(void)
1967{ 2104{
1968 return EMULATE_FAIL; 2105 if (!cpu_has_feature(CPU_FTR_HVMODE))
2106 return -EIO;
2107 return 0;
1969} 2108}
1970 2109
1971static int kvmppc_book3s_hv_init(void) 2110static long kvm_arch_vm_ioctl_hv(struct file *filp,
2111 unsigned int ioctl, unsigned long arg)
1972{ 2112{
1973 int r; 2113 struct kvm *kvm __maybe_unused = filp->private_data;
2114 void __user *argp = (void __user *)arg;
2115 long r;
1974 2116
1975 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2117 switch (ioctl) {
1976 2118
1977 if (r) 2119 case KVM_ALLOCATE_RMA: {
2120 struct kvm_allocate_rma rma;
2121 struct kvm *kvm = filp->private_data;
2122
2123 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
2124 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
2125 r = -EFAULT;
2126 break;
2127 }
2128
2129 case KVM_PPC_ALLOCATE_HTAB: {
2130 u32 htab_order;
2131
2132 r = -EFAULT;
2133 if (get_user(htab_order, (u32 __user *)argp))
2134 break;
2135 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
2136 if (r)
2137 break;
2138 r = -EFAULT;
2139 if (put_user(htab_order, (u32 __user *)argp))
2140 break;
2141 r = 0;
2142 break;
2143 }
2144
2145 case KVM_PPC_GET_HTAB_FD: {
2146 struct kvm_get_htab_fd ghf;
2147
2148 r = -EFAULT;
2149 if (copy_from_user(&ghf, argp, sizeof(ghf)))
2150 break;
2151 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
2152 break;
2153 }
2154
2155 default:
2156 r = -ENOTTY;
2157 }
2158
2159 return r;
2160}
2161
2162static struct kvmppc_ops kvm_ops_hv = {
2163 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
2164 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
2165 .get_one_reg = kvmppc_get_one_reg_hv,
2166 .set_one_reg = kvmppc_set_one_reg_hv,
2167 .vcpu_load = kvmppc_core_vcpu_load_hv,
2168 .vcpu_put = kvmppc_core_vcpu_put_hv,
2169 .set_msr = kvmppc_set_msr_hv,
2170 .vcpu_run = kvmppc_vcpu_run_hv,
2171 .vcpu_create = kvmppc_core_vcpu_create_hv,
2172 .vcpu_free = kvmppc_core_vcpu_free_hv,
2173 .check_requests = kvmppc_core_check_requests_hv,
2174 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
2175 .flush_memslot = kvmppc_core_flush_memslot_hv,
2176 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
2177 .commit_memory_region = kvmppc_core_commit_memory_region_hv,
2178 .unmap_hva = kvm_unmap_hva_hv,
2179 .unmap_hva_range = kvm_unmap_hva_range_hv,
2180 .age_hva = kvm_age_hva_hv,
2181 .test_age_hva = kvm_test_age_hva_hv,
2182 .set_spte_hva = kvm_set_spte_hva_hv,
2183 .mmu_destroy = kvmppc_mmu_destroy_hv,
2184 .free_memslot = kvmppc_core_free_memslot_hv,
2185 .create_memslot = kvmppc_core_create_memslot_hv,
2186 .init_vm = kvmppc_core_init_vm_hv,
2187 .destroy_vm = kvmppc_core_destroy_vm_hv,
2188 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
2189 .emulate_op = kvmppc_core_emulate_op_hv,
2190 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
2191 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
2192 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
2193 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
2194};
2195
2196static int kvmppc_book3s_init_hv(void)
2197{
2198 int r;
2199 /*
2200 * FIXME!! Do we need to check on all cpus ?
2201 */
2202 r = kvmppc_core_check_processor_compat_hv();
2203 if (r < 0)
1978 return r; 2204 return r;
1979 2205
1980 r = kvmppc_mmu_hv_init(); 2206 kvm_ops_hv.owner = THIS_MODULE;
2207 kvmppc_hv_ops = &kvm_ops_hv;
1981 2208
2209 r = kvmppc_mmu_hv_init();
1982 return r; 2210 return r;
1983} 2211}
1984 2212
1985static void kvmppc_book3s_hv_exit(void) 2213static void kvmppc_book3s_exit_hv(void)
1986{ 2214{
1987 kvm_exit(); 2215 kvmppc_hv_ops = NULL;
1988} 2216}
1989 2217
1990module_init(kvmppc_book3s_hv_init); 2218module_init(kvmppc_book3s_init_hv);
1991module_exit(kvmppc_book3s_hv_exit); 2219module_exit(kvmppc_book3s_exit_hv);
2220MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 37f1cc417ca0..928142c64cb0 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -158,9 +158,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
158 * Interrupts are enabled again at this point. 158 * Interrupts are enabled again at this point.
159 */ 159 */
160 160
161.global kvmppc_handler_highmem
162kvmppc_handler_highmem:
163
164 /* 161 /*
165 * Register usage at this point: 162 * Register usage at this point:
166 * 163 *
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c71103b8a748..bc8de75b1925 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,30 +33,6 @@
33#error Need to fix lppaca and SLB shadow accesses in little endian mode 33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif 34#endif
35 35
36/*****************************************************************************
37 * *
38 * Real Mode handlers that need to be in the linear mapping *
39 * *
40 ****************************************************************************/
41
42 .globl kvmppc_skip_interrupt
43kvmppc_skip_interrupt:
44 mfspr r13,SPRN_SRR0
45 addi r13,r13,4
46 mtspr SPRN_SRR0,r13
47 GET_SCRATCH0(r13)
48 rfid
49 b .
50
51 .globl kvmppc_skip_Hinterrupt
52kvmppc_skip_Hinterrupt:
53 mfspr r13,SPRN_HSRR0
54 addi r13,r13,4
55 mtspr SPRN_HSRR0,r13
56 GET_SCRATCH0(r13)
57 hrfid
58 b .
59
60/* 36/*
61 * Call kvmppc_hv_entry in real mode. 37 * Call kvmppc_hv_entry in real mode.
62 * Must be called with interrupts hard-disabled. 38 * Must be called with interrupts hard-disabled.
@@ -66,8 +42,11 @@ kvmppc_skip_Hinterrupt:
66 * LR = return address to continue at after eventually re-enabling MMU 42 * LR = return address to continue at after eventually re-enabling MMU
67 */ 43 */
68_GLOBAL(kvmppc_hv_entry_trampoline) 44_GLOBAL(kvmppc_hv_entry_trampoline)
45 mflr r0
46 std r0, PPC_LR_STKOFF(r1)
47 stdu r1, -112(r1)
69 mfmsr r10 48 mfmsr r10
70 LOAD_REG_ADDR(r5, kvmppc_hv_entry) 49 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
71 li r0,MSR_RI 50 li r0,MSR_RI
72 andc r0,r10,r0 51 andc r0,r10,r0
73 li r6,MSR_IR | MSR_DR 52 li r6,MSR_IR | MSR_DR
@@ -77,11 +56,103 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
77 mtsrr1 r6 56 mtsrr1 r6
78 RFI 57 RFI
79 58
80/****************************************************************************** 59kvmppc_call_hv_entry:
81 * * 60 bl kvmppc_hv_entry
82 * Entry code * 61
83 * * 62 /* Back from guest - restore host state and return to caller */
84 *****************************************************************************/ 63
64 /* Restore host DABR and DABRX */
65 ld r5,HSTATE_DABR(r13)
66 li r6,7
67 mtspr SPRN_DABR,r5
68 mtspr SPRN_DABRX,r6
69
70 /* Restore SPRG3 */
71 ld r3,PACA_SPRG3(r13)
72 mtspr SPRN_SPRG3,r3
73
74 /*
75 * Reload DEC. HDEC interrupts were disabled when
76 * we reloaded the host's LPCR value.
77 */
78 ld r3, HSTATE_DECEXP(r13)
79 mftb r4
80 subf r4, r4, r3
81 mtspr SPRN_DEC, r4
82
83 /* Reload the host's PMU registers */
84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
85 lbz r4, LPPACA_PMCINUSE(r3)
86 cmpwi r4, 0
87 beq 23f /* skip if not */
88 lwz r3, HSTATE_PMC(r13)
89 lwz r4, HSTATE_PMC + 4(r13)
90 lwz r5, HSTATE_PMC + 8(r13)
91 lwz r6, HSTATE_PMC + 12(r13)
92 lwz r8, HSTATE_PMC + 16(r13)
93 lwz r9, HSTATE_PMC + 20(r13)
94BEGIN_FTR_SECTION
95 lwz r10, HSTATE_PMC + 24(r13)
96 lwz r11, HSTATE_PMC + 28(r13)
97END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
98 mtspr SPRN_PMC1, r3
99 mtspr SPRN_PMC2, r4
100 mtspr SPRN_PMC3, r5
101 mtspr SPRN_PMC4, r6
102 mtspr SPRN_PMC5, r8
103 mtspr SPRN_PMC6, r9
104BEGIN_FTR_SECTION
105 mtspr SPRN_PMC7, r10
106 mtspr SPRN_PMC8, r11
107END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
108 ld r3, HSTATE_MMCR(r13)
109 ld r4, HSTATE_MMCR + 8(r13)
110 ld r5, HSTATE_MMCR + 16(r13)
111 mtspr SPRN_MMCR1, r4
112 mtspr SPRN_MMCRA, r5
113 mtspr SPRN_MMCR0, r3
114 isync
11523:
116
117 /*
118 * For external and machine check interrupts, we need
119 * to call the Linux handler to process the interrupt.
120 * We do that by jumping to absolute address 0x500 for
121 * external interrupts, or the machine_check_fwnmi label
122 * for machine checks (since firmware might have patched
123 * the vector area at 0x200). The [h]rfid at the end of the
124 * handler will return to the book3s_hv_interrupts.S code.
125 * For other interrupts we do the rfid to get back
126 * to the book3s_hv_interrupts.S code here.
127 */
128 ld r8, 112+PPC_LR_STKOFF(r1)
129 addi r1, r1, 112
130 ld r7, HSTATE_HOST_MSR(r13)
131
132 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
133 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
134BEGIN_FTR_SECTION
135 beq 11f
136END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
137
138 /* RFI into the highmem handler, or branch to interrupt handler */
139 mfmsr r6
140 li r0, MSR_RI
141 andc r6, r6, r0
142 mtmsrd r6, 1 /* Clear RI in MSR */
143 mtsrr0 r8
144 mtsrr1 r7
145 beqa 0x500 /* external interrupt (PPC970) */
146 beq cr1, 13f /* machine check */
147 RFI
148
149 /* On POWER7, we have external interrupts set to use HSRR0/1 */
15011: mtspr SPRN_HSRR0, r8
151 mtspr SPRN_HSRR1, r7
152 ba 0x500
153
15413: b machine_check_fwnmi
155
85 156
86/* 157/*
87 * We come in here when wakened from nap mode on a secondary hw thread. 158 * We come in here when wakened from nap mode on a secondary hw thread.
@@ -137,7 +208,7 @@ kvm_start_guest:
137 cmpdi r4,0 208 cmpdi r4,0
138 /* if we have no vcpu to run, go back to sleep */ 209 /* if we have no vcpu to run, go back to sleep */
139 beq kvm_no_guest 210 beq kvm_no_guest
140 b kvmppc_hv_entry 211 b 30f
141 212
14227: /* XXX should handle hypervisor maintenance interrupts etc. here */ 21327: /* XXX should handle hypervisor maintenance interrupts etc. here */
143 b kvm_no_guest 214 b kvm_no_guest
@@ -147,6 +218,57 @@ kvm_start_guest:
147 stw r8,HSTATE_SAVED_XIRR(r13) 218 stw r8,HSTATE_SAVED_XIRR(r13)
148 b kvm_no_guest 219 b kvm_no_guest
149 220
22130: bl kvmppc_hv_entry
222
223 /* Back from the guest, go back to nap */
224 /* Clear our vcpu pointer so we don't come back in early */
225 li r0, 0
226 std r0, HSTATE_KVM_VCPU(r13)
227 lwsync
228 /* Clear any pending IPI - we're an offline thread */
229 ld r5, HSTATE_XICS_PHYS(r13)
230 li r7, XICS_XIRR
231 lwzcix r3, r5, r7 /* ack any pending interrupt */
232 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
233 beq 37f
234 sync
235 li r0, 0xff
236 li r6, XICS_MFRR
237 stbcix r0, r5, r6 /* clear the IPI */
238 stwcix r3, r5, r7 /* EOI it */
23937: sync
240
241 /* increment the nap count and then go to nap mode */
242 ld r4, HSTATE_KVM_VCORE(r13)
243 addi r4, r4, VCORE_NAP_COUNT
244 lwsync /* make previous updates visible */
24551: lwarx r3, 0, r4
246 addi r3, r3, 1
247 stwcx. r3, 0, r4
248 bne 51b
249
250kvm_no_guest:
251 li r0, KVM_HWTHREAD_IN_NAP
252 stb r0, HSTATE_HWTHREAD_STATE(r13)
253 li r3, LPCR_PECE0
254 mfspr r4, SPRN_LPCR
255 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
256 mtspr SPRN_LPCR, r4
257 isync
258 std r0, HSTATE_SCRATCH0(r13)
259 ptesync
260 ld r0, HSTATE_SCRATCH0(r13)
2611: cmpd r0, r0
262 bne 1b
263 nap
264 b .
265
266/******************************************************************************
267 * *
268 * Entry code *
269 * *
270 *****************************************************************************/
271
150.global kvmppc_hv_entry 272.global kvmppc_hv_entry
151kvmppc_hv_entry: 273kvmppc_hv_entry:
152 274
@@ -159,7 +281,8 @@ kvmppc_hv_entry:
159 * all other volatile GPRS = free 281 * all other volatile GPRS = free
160 */ 282 */
161 mflr r0 283 mflr r0
162 std r0, HSTATE_VMHANDLER(r13) 284 std r0, PPC_LR_STKOFF(r1)
285 stdu r1, -112(r1)
163 286
164 /* Set partition DABR */ 287 /* Set partition DABR */
165 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 288 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -200,8 +323,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
200 ld r3, VCPU_MMCR(r4) 323 ld r3, VCPU_MMCR(r4)
201 ld r5, VCPU_MMCR + 8(r4) 324 ld r5, VCPU_MMCR + 8(r4)
202 ld r6, VCPU_MMCR + 16(r4) 325 ld r6, VCPU_MMCR + 16(r4)
326 ld r7, VCPU_SIAR(r4)
327 ld r8, VCPU_SDAR(r4)
203 mtspr SPRN_MMCR1, r5 328 mtspr SPRN_MMCR1, r5
204 mtspr SPRN_MMCRA, r6 329 mtspr SPRN_MMCRA, r6
330 mtspr SPRN_SIAR, r7
331 mtspr SPRN_SDAR, r8
205 mtspr SPRN_MMCR0, r3 332 mtspr SPRN_MMCR0, r3
206 isync 333 isync
207 334
@@ -254,22 +381,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
254 /* Save R1 in the PACA */ 381 /* Save R1 in the PACA */
255 std r1, HSTATE_HOST_R1(r13) 382 std r1, HSTATE_HOST_R1(r13)
256 383
257 /* Increment yield count if they have a VPA */
258 ld r3, VCPU_VPA(r4)
259 cmpdi r3, 0
260 beq 25f
261 lwz r5, LPPACA_YIELDCOUNT(r3)
262 addi r5, r5, 1
263 stw r5, LPPACA_YIELDCOUNT(r3)
264 li r6, 1
265 stb r6, VCPU_VPA_DIRTY(r4)
26625:
267 /* Load up DAR and DSISR */ 384 /* Load up DAR and DSISR */
268 ld r5, VCPU_DAR(r4) 385 ld r5, VCPU_DAR(r4)
269 lwz r6, VCPU_DSISR(r4) 386 lwz r6, VCPU_DSISR(r4)
270 mtspr SPRN_DAR, r5 387 mtspr SPRN_DAR, r5
271 mtspr SPRN_DSISR, r6 388 mtspr SPRN_DSISR, r6
272 389
390 li r6, KVM_GUEST_MODE_HOST_HV
391 stb r6, HSTATE_IN_GUEST(r13)
392
273BEGIN_FTR_SECTION 393BEGIN_FTR_SECTION
274 /* Restore AMR and UAMOR, set AMOR to all 1s */ 394 /* Restore AMR and UAMOR, set AMOR to all 1s */
275 ld r5,VCPU_AMR(r4) 395 ld r5,VCPU_AMR(r4)
@@ -343,7 +463,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
343 bdnz 28b 463 bdnz 28b
344 ptesync 464 ptesync
345 465
34622: li r0,1 466 /* Add timebase offset onto timebase */
46722: ld r8,VCORE_TB_OFFSET(r5)
468 cmpdi r8,0
469 beq 37f
470 mftb r6 /* current host timebase */
471 add r8,r8,r6
472 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
473 mftb r7 /* check if lower 24 bits overflowed */
474 clrldi r6,r6,40
475 clrldi r7,r7,40
476 cmpld r7,r6
477 bge 37f
478 addis r8,r8,0x100 /* if so, increment upper 40 bits */
479 mtspr SPRN_TBU40,r8
480
481 /* Load guest PCR value to select appropriate compat mode */
48237: ld r7, VCORE_PCR(r5)
483 cmpdi r7, 0
484 beq 38f
485 mtspr SPRN_PCR, r7
48638:
487 li r0,1
347 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 488 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
348 b 10f 489 b 10f
349 490
@@ -353,12 +494,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
353 beq 20b 494 beq 20b
354 495
355 /* Set LPCR and RMOR. */ 496 /* Set LPCR and RMOR. */
35610: ld r8,KVM_LPCR(r9) 49710: ld r8,VCORE_LPCR(r5)
357 mtspr SPRN_LPCR,r8 498 mtspr SPRN_LPCR,r8
358 ld r8,KVM_RMOR(r9) 499 ld r8,KVM_RMOR(r9)
359 mtspr SPRN_RMOR,r8 500 mtspr SPRN_RMOR,r8
360 isync 501 isync
361 502
503 /* Increment yield count if they have a VPA */
504 ld r3, VCPU_VPA(r4)
505 cmpdi r3, 0
506 beq 25f
507 lwz r5, LPPACA_YIELDCOUNT(r3)
508 addi r5, r5, 1
509 stw r5, LPPACA_YIELDCOUNT(r3)
510 li r6, 1
511 stb r6, VCPU_VPA_DIRTY(r4)
51225:
362 /* Check if HDEC expires soon */ 513 /* Check if HDEC expires soon */
363 mfspr r3,SPRN_HDEC 514 mfspr r3,SPRN_HDEC
364 cmpwi r3,10 515 cmpwi r3,10
@@ -405,7 +556,8 @@ toc_tlbie_lock:
405 bne 24b 556 bne 24b
406 isync 557 isync
407 558
408 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ 559 ld r5,HSTATE_KVM_VCORE(r13)
560 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
409 li r0,0x18f 561 li r0,0x18f
410 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 562 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
411 or r0,r7,r0 563 or r0,r7,r0
@@ -541,7 +693,7 @@ fast_guest_return:
541 mtspr SPRN_HSRR1,r11 693 mtspr SPRN_HSRR1,r11
542 694
543 /* Activate guest mode, so faults get handled by KVM */ 695 /* Activate guest mode, so faults get handled by KVM */
544 li r9, KVM_GUEST_MODE_GUEST 696 li r9, KVM_GUEST_MODE_GUEST_HV
545 stb r9, HSTATE_IN_GUEST(r13) 697 stb r9, HSTATE_IN_GUEST(r13)
546 698
547 /* Enter guest */ 699 /* Enter guest */
@@ -550,13 +702,15 @@ BEGIN_FTR_SECTION
550 ld r5, VCPU_CFAR(r4) 702 ld r5, VCPU_CFAR(r4)
551 mtspr SPRN_CFAR, r5 703 mtspr SPRN_CFAR, r5
552END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 704END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
705BEGIN_FTR_SECTION
706 ld r0, VCPU_PPR(r4)
707END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
553 708
554 ld r5, VCPU_LR(r4) 709 ld r5, VCPU_LR(r4)
555 lwz r6, VCPU_CR(r4) 710 lwz r6, VCPU_CR(r4)
556 mtlr r5 711 mtlr r5
557 mtcr r6 712 mtcr r6
558 713
559 ld r0, VCPU_GPR(R0)(r4)
560 ld r1, VCPU_GPR(R1)(r4) 714 ld r1, VCPU_GPR(R1)(r4)
561 ld r2, VCPU_GPR(R2)(r4) 715 ld r2, VCPU_GPR(R2)(r4)
562 ld r3, VCPU_GPR(R3)(r4) 716 ld r3, VCPU_GPR(R3)(r4)
@@ -570,6 +724,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
570 ld r12, VCPU_GPR(R12)(r4) 724 ld r12, VCPU_GPR(R12)(r4)
571 ld r13, VCPU_GPR(R13)(r4) 725 ld r13, VCPU_GPR(R13)(r4)
572 726
727BEGIN_FTR_SECTION
728 mtspr SPRN_PPR, r0
729END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
730 ld r0, VCPU_GPR(R0)(r4)
573 ld r4, VCPU_GPR(R4)(r4) 731 ld r4, VCPU_GPR(R4)(r4)
574 732
575 hrfid 733 hrfid
@@ -584,8 +742,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
584/* 742/*
585 * We come here from the first-level interrupt handlers. 743 * We come here from the first-level interrupt handlers.
586 */ 744 */
587 .globl kvmppc_interrupt 745 .globl kvmppc_interrupt_hv
588kvmppc_interrupt: 746kvmppc_interrupt_hv:
589 /* 747 /*
590 * Register contents: 748 * Register contents:
591 * R12 = interrupt vector 749 * R12 = interrupt vector
@@ -595,6 +753,19 @@ kvmppc_interrupt:
595 */ 753 */
596 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 754 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
597 std r9, HSTATE_HOST_R2(r13) 755 std r9, HSTATE_HOST_R2(r13)
756
757 lbz r9, HSTATE_IN_GUEST(r13)
758 cmpwi r9, KVM_GUEST_MODE_HOST_HV
759 beq kvmppc_bad_host_intr
760#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
761 cmpwi r9, KVM_GUEST_MODE_GUEST
762 ld r9, HSTATE_HOST_R2(r13)
763 beq kvmppc_interrupt_pr
764#endif
765 /* We're now back in the host but in guest MMU context */
766 li r9, KVM_GUEST_MODE_HOST_HV
767 stb r9, HSTATE_IN_GUEST(r13)
768
598 ld r9, HSTATE_KVM_VCPU(r13) 769 ld r9, HSTATE_KVM_VCPU(r13)
599 770
600 /* Save registers */ 771 /* Save registers */
@@ -620,6 +791,10 @@ BEGIN_FTR_SECTION
620 ld r3, HSTATE_CFAR(r13) 791 ld r3, HSTATE_CFAR(r13)
621 std r3, VCPU_CFAR(r9) 792 std r3, VCPU_CFAR(r9)
622END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 793END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
794BEGIN_FTR_SECTION
795 ld r4, HSTATE_PPR(r13)
796 std r4, VCPU_PPR(r9)
797END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
623 798
624 /* Restore R1/R2 so we can handle faults */ 799 /* Restore R1/R2 so we can handle faults */
625 ld r1, HSTATE_HOST_R1(r13) 800 ld r1, HSTATE_HOST_R1(r13)
@@ -642,10 +817,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
642 std r3, VCPU_GPR(R13)(r9) 817 std r3, VCPU_GPR(R13)(r9)
643 std r4, VCPU_LR(r9) 818 std r4, VCPU_LR(r9)
644 819
645 /* Unset guest mode */
646 li r0, KVM_GUEST_MODE_NONE
647 stb r0, HSTATE_IN_GUEST(r13)
648
649 stw r12,VCPU_TRAP(r9) 820 stw r12,VCPU_TRAP(r9)
650 821
651 /* Save HEIR (HV emulation assist reg) in last_inst 822 /* Save HEIR (HV emulation assist reg) in last_inst
@@ -696,46 +867,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
696 * set, we know the host wants us out so let's do it now 867 * set, we know the host wants us out so let's do it now
697 */ 868 */
698do_ext_interrupt: 869do_ext_interrupt:
699 lbz r0, HSTATE_HOST_IPI(r13) 870 bl kvmppc_read_intr
700 cmpwi r0, 0 871 cmpdi r3, 0
701 bne ext_interrupt_to_host 872 bgt ext_interrupt_to_host
702
703 /* Now read the interrupt from the ICP */
704 ld r5, HSTATE_XICS_PHYS(r13)
705 li r7, XICS_XIRR
706 cmpdi r5, 0
707 beq- ext_interrupt_to_host
708 lwzcix r3, r5, r7
709 rlwinm. r0, r3, 0, 0xffffff
710 sync
711 beq 3f /* if nothing pending in the ICP */
712
713 /* We found something in the ICP...
714 *
715 * If it's not an IPI, stash it in the PACA and return to
716 * the host, we don't (yet) handle directing real external
717 * interrupts directly to the guest
718 */
719 cmpwi r0, XICS_IPI
720 bne ext_stash_for_host
721
722 /* It's an IPI, clear the MFRR and EOI it */
723 li r0, 0xff
724 li r6, XICS_MFRR
725 stbcix r0, r5, r6 /* clear the IPI */
726 stwcix r3, r5, r7 /* EOI it */
727 sync
728
729 /* We need to re-check host IPI now in case it got set in the
730 * meantime. If it's clear, we bounce the interrupt to the
731 * guest
732 */
733 lbz r0, HSTATE_HOST_IPI(r13)
734 cmpwi r0, 0
735 bne- 1f
736 873
737 /* Allright, looks like an IPI for the guest, we need to set MER */ 874 /* Allright, looks like an IPI for the guest, we need to set MER */
7383:
739 /* Check if any CPU is heading out to the host, if so head out too */ 875 /* Check if any CPU is heading out to the host, if so head out too */
740 ld r5, HSTATE_KVM_VCORE(r13) 876 ld r5, HSTATE_KVM_VCORE(r13)
741 lwz r0, VCORE_ENTRY_EXIT(r5) 877 lwz r0, VCORE_ENTRY_EXIT(r5)
@@ -764,27 +900,9 @@ do_ext_interrupt:
764 mtspr SPRN_LPCR, r8 900 mtspr SPRN_LPCR, r8
765 b fast_guest_return 901 b fast_guest_return
766 902
767 /* We raced with the host, we need to resend that IPI, bummer */
7681: li r0, IPI_PRIORITY
769 stbcix r0, r5, r6 /* set the IPI */
770 sync
771 b ext_interrupt_to_host
772
773ext_stash_for_host:
774 /* It's not an IPI and it's for the host, stash it in the PACA
775 * before exit, it will be picked up by the host ICP driver
776 */
777 stw r3, HSTATE_SAVED_XIRR(r13)
778ext_interrupt_to_host: 903ext_interrupt_to_host:
779 904
780guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 905guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
781 /* Save DEC */
782 mfspr r5,SPRN_DEC
783 mftb r6
784 extsw r5,r5
785 add r5,r5,r6
786 std r5,VCPU_DEC_EXPIRES(r9)
787
788 /* Save more register state */ 906 /* Save more register state */
789 mfdar r6 907 mfdar r6
790 mfdsisr r7 908 mfdsisr r7
@@ -954,7 +1072,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
954 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 1072 mtspr SPRN_SDR1,r6 /* switch to partition page table */
955 mtspr SPRN_LPID,r7 1073 mtspr SPRN_LPID,r7
956 isync 1074 isync
957 li r0,0 1075
1076 /* Subtract timebase offset from timebase */
1077 ld r8,VCORE_TB_OFFSET(r5)
1078 cmpdi r8,0
1079 beq 17f
1080 mftb r6 /* current host timebase */
1081 subf r8,r8,r6
1082 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1083 mftb r7 /* check if lower 24 bits overflowed */
1084 clrldi r6,r6,40
1085 clrldi r7,r7,40
1086 cmpld r7,r6
1087 bge 17f
1088 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1089 mtspr SPRN_TBU40,r8
1090
1091 /* Reset PCR */
109217: ld r0, VCORE_PCR(r5)
1093 cmpdi r0, 0
1094 beq 18f
1095 li r0, 0
1096 mtspr SPRN_PCR, r0
109718:
1098 /* Signal secondary CPUs to continue */
958 stb r0,VCORE_IN_GUEST(r5) 1099 stb r0,VCORE_IN_GUEST(r5)
959 lis r8,0x7fff /* MAX_INT@h */ 1100 lis r8,0x7fff /* MAX_INT@h */
960 mtspr SPRN_HDEC,r8 1101 mtspr SPRN_HDEC,r8
@@ -1052,6 +1193,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
10521: addi r8,r8,16 11931: addi r8,r8,16
1053 .endr 1194 .endr
1054 1195
1196 /* Save DEC */
1197 mfspr r5,SPRN_DEC
1198 mftb r6
1199 extsw r5,r5
1200 add r5,r5,r6
1201 std r5,VCPU_DEC_EXPIRES(r9)
1202
1055 /* Save and reset AMR and UAMOR before turning on the MMU */ 1203 /* Save and reset AMR and UAMOR before turning on the MMU */
1056BEGIN_FTR_SECTION 1204BEGIN_FTR_SECTION
1057 mfspr r5,SPRN_AMR 1205 mfspr r5,SPRN_AMR
@@ -1062,6 +1210,10 @@ BEGIN_FTR_SECTION
1062 mtspr SPRN_AMR,r6 1210 mtspr SPRN_AMR,r6
1063END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1211END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1064 1212
1213 /* Unset guest mode */
1214 li r0, KVM_GUEST_MODE_NONE
1215 stb r0, HSTATE_IN_GUEST(r13)
1216
1065 /* Switch DSCR back to host value */ 1217 /* Switch DSCR back to host value */
1066BEGIN_FTR_SECTION 1218BEGIN_FTR_SECTION
1067 mfspr r8, SPRN_DSCR 1219 mfspr r8, SPRN_DSCR
@@ -1134,9 +1286,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1134 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1286 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1135 b 22f 1287 b 22f
113621: mfspr r5, SPRN_MMCR1 128821: mfspr r5, SPRN_MMCR1
1289 mfspr r7, SPRN_SIAR
1290 mfspr r8, SPRN_SDAR
1137 std r4, VCPU_MMCR(r9) 1291 std r4, VCPU_MMCR(r9)
1138 std r5, VCPU_MMCR + 8(r9) 1292 std r5, VCPU_MMCR + 8(r9)
1139 std r6, VCPU_MMCR + 16(r9) 1293 std r6, VCPU_MMCR + 16(r9)
1294 std r7, VCPU_SIAR(r9)
1295 std r8, VCPU_SDAR(r9)
1140 mfspr r3, SPRN_PMC1 1296 mfspr r3, SPRN_PMC1
1141 mfspr r4, SPRN_PMC2 1297 mfspr r4, SPRN_PMC2
1142 mfspr r5, SPRN_PMC3 1298 mfspr r5, SPRN_PMC3
@@ -1158,103 +1314,30 @@ BEGIN_FTR_SECTION
1158 stw r11, VCPU_PMC + 28(r9) 1314 stw r11, VCPU_PMC + 28(r9)
1159END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
116022: 131622:
1317 ld r0, 112+PPC_LR_STKOFF(r1)
1318 addi r1, r1, 112
1319 mtlr r0
1320 blr
1321secondary_too_late:
1322 ld r5,HSTATE_KVM_VCORE(r13)
1323 HMT_LOW
132413: lbz r3,VCORE_IN_GUEST(r5)
1325 cmpwi r3,0
1326 bne 13b
1327 HMT_MEDIUM
1328 li r0, KVM_GUEST_MODE_NONE
1329 stb r0, HSTATE_IN_GUEST(r13)
1330 ld r11,PACA_SLBSHADOWPTR(r13)
1161 1331
1162 /* Secondary threads go off to take a nap on POWER7 */ 1332 .rept SLB_NUM_BOLTED
1163BEGIN_FTR_SECTION 1333 ld r5,SLBSHADOW_SAVEAREA(r11)
1164 lwz r0,VCPU_PTID(r9) 1334 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1165 cmpwi r0,0 1335 andis. r7,r5,SLB_ESID_V@h
1166 bne secondary_nap 1336 beq 1f
1167END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1337 slbmte r6,r5
1168 13381: addi r11,r11,16
1169 /* Restore host DABR and DABRX */ 1339 .endr
1170 ld r5,HSTATE_DABR(r13) 1340 b 22b
1171 li r6,7
1172 mtspr SPRN_DABR,r5
1173 mtspr SPRN_DABRX,r6
1174
1175 /* Restore SPRG3 */
1176 ld r3,PACA_SPRG3(r13)
1177 mtspr SPRN_SPRG3,r3
1178
1179 /*
1180 * Reload DEC. HDEC interrupts were disabled when
1181 * we reloaded the host's LPCR value.
1182 */
1183 ld r3, HSTATE_DECEXP(r13)
1184 mftb r4
1185 subf r4, r4, r3
1186 mtspr SPRN_DEC, r4
1187
1188 /* Reload the host's PMU registers */
1189 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
1190 lbz r4, LPPACA_PMCINUSE(r3)
1191 cmpwi r4, 0
1192 beq 23f /* skip if not */
1193 lwz r3, HSTATE_PMC(r13)
1194 lwz r4, HSTATE_PMC + 4(r13)
1195 lwz r5, HSTATE_PMC + 8(r13)
1196 lwz r6, HSTATE_PMC + 12(r13)
1197 lwz r8, HSTATE_PMC + 16(r13)
1198 lwz r9, HSTATE_PMC + 20(r13)
1199BEGIN_FTR_SECTION
1200 lwz r10, HSTATE_PMC + 24(r13)
1201 lwz r11, HSTATE_PMC + 28(r13)
1202END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1203 mtspr SPRN_PMC1, r3
1204 mtspr SPRN_PMC2, r4
1205 mtspr SPRN_PMC3, r5
1206 mtspr SPRN_PMC4, r6
1207 mtspr SPRN_PMC5, r8
1208 mtspr SPRN_PMC6, r9
1209BEGIN_FTR_SECTION
1210 mtspr SPRN_PMC7, r10
1211 mtspr SPRN_PMC8, r11
1212END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1213 ld r3, HSTATE_MMCR(r13)
1214 ld r4, HSTATE_MMCR + 8(r13)
1215 ld r5, HSTATE_MMCR + 16(r13)
1216 mtspr SPRN_MMCR1, r4
1217 mtspr SPRN_MMCRA, r5
1218 mtspr SPRN_MMCR0, r3
1219 isync
122023:
1221 /*
1222 * For external and machine check interrupts, we need
1223 * to call the Linux handler to process the interrupt.
1224 * We do that by jumping to absolute address 0x500 for
1225 * external interrupts, or the machine_check_fwnmi label
1226 * for machine checks (since firmware might have patched
1227 * the vector area at 0x200). The [h]rfid at the end of the
1228 * handler will return to the book3s_hv_interrupts.S code.
1229 * For other interrupts we do the rfid to get back
1230 * to the book3s_hv_interrupts.S code here.
1231 */
1232 ld r8, HSTATE_VMHANDLER(r13)
1233 ld r7, HSTATE_HOST_MSR(r13)
1234
1235 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1236 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1237BEGIN_FTR_SECTION
1238 beq 11f
1239END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1240
1241 /* RFI into the highmem handler, or branch to interrupt handler */
1242 mfmsr r6
1243 li r0, MSR_RI
1244 andc r6, r6, r0
1245 mtmsrd r6, 1 /* Clear RI in MSR */
1246 mtsrr0 r8
1247 mtsrr1 r7
1248 beqa 0x500 /* external interrupt (PPC970) */
1249 beq cr1, 13f /* machine check */
1250 RFI
1251
1252 /* On POWER7, we have external interrupts set to use HSRR0/1 */
125311: mtspr SPRN_HSRR0, r8
1254 mtspr SPRN_HSRR1, r7
1255 ba 0x500
1256
125713: b machine_check_fwnmi
1258 1341
1259/* 1342/*
1260 * Check whether an HDSI is an HPTE not found fault or something else. 1343 * Check whether an HDSI is an HPTE not found fault or something else.
@@ -1333,7 +1416,7 @@ fast_interrupt_c_return:
1333 stw r8, VCPU_LAST_INST(r9) 1416 stw r8, VCPU_LAST_INST(r9)
1334 1417
1335 /* Unset guest mode. */ 1418 /* Unset guest mode. */
1336 li r0, KVM_GUEST_MODE_NONE 1419 li r0, KVM_GUEST_MODE_HOST_HV
1337 stb r0, HSTATE_IN_GUEST(r13) 1420 stb r0, HSTATE_IN_GUEST(r13)
1338 b guest_exit_cont 1421 b guest_exit_cont
1339 1422
@@ -1701,67 +1784,70 @@ machine_check_realmode:
1701 rotldi r11, r11, 63 1784 rotldi r11, r11, 63
1702 b fast_interrupt_c_return 1785 b fast_interrupt_c_return
1703 1786
1704secondary_too_late: 1787/*
1705 ld r5,HSTATE_KVM_VCORE(r13) 1788 * Determine what sort of external interrupt is pending (if any).
1706 HMT_LOW 1789 * Returns:
170713: lbz r3,VCORE_IN_GUEST(r5) 1790 * 0 if no interrupt is pending
1708 cmpwi r3,0 1791 * 1 if an interrupt is pending that needs to be handled by the host
1709 bne 13b 1792 * -1 if there was a guest wakeup IPI (which has now been cleared)
1710 HMT_MEDIUM 1793 */
1711 ld r11,PACA_SLBSHADOWPTR(r13) 1794kvmppc_read_intr:
1712 1795 /* see if a host IPI is pending */
1713 .rept SLB_NUM_BOLTED 1796 li r3, 1
1714 ld r5,SLBSHADOW_SAVEAREA(r11) 1797 lbz r0, HSTATE_HOST_IPI(r13)
1715 ld r6,SLBSHADOW_SAVEAREA+8(r11) 1798 cmpwi r0, 0
1716 andis. r7,r5,SLB_ESID_V@h 1799 bne 1f
1717 beq 1f
1718 slbmte r6,r5
17191: addi r11,r11,16
1720 .endr
1721 1800
1722secondary_nap: 1801 /* Now read the interrupt from the ICP */
1723 /* Clear our vcpu pointer so we don't come back in early */ 1802 ld r6, HSTATE_XICS_PHYS(r13)
1724 li r0, 0
1725 std r0, HSTATE_KVM_VCPU(r13)
1726 lwsync
1727 /* Clear any pending IPI - assume we're a secondary thread */
1728 ld r5, HSTATE_XICS_PHYS(r13)
1729 li r7, XICS_XIRR 1803 li r7, XICS_XIRR
1730 lwzcix r3, r5, r7 /* ack any pending interrupt */ 1804 cmpdi r6, 0
1731 rlwinm. r0, r3, 0, 0xffffff /* any pending? */ 1805 beq- 1f
1732 beq 37f 1806 lwzcix r0, r6, r7
1807 rlwinm. r3, r0, 0, 0xffffff
1733 sync 1808 sync
1734 li r0, 0xff 1809 beq 1f /* if nothing pending in the ICP */
1735 li r6, XICS_MFRR
1736 stbcix r0, r5, r6 /* clear the IPI */
1737 stwcix r3, r5, r7 /* EOI it */
173837: sync
1739 1810
1740 /* increment the nap count and then go to nap mode */ 1811 /* We found something in the ICP...
1741 ld r4, HSTATE_KVM_VCORE(r13) 1812 *
1742 addi r4, r4, VCORE_NAP_COUNT 1813 * If it's not an IPI, stash it in the PACA and return to
1743 lwsync /* make previous updates visible */ 1814 * the host, we don't (yet) handle directing real external
174451: lwarx r3, 0, r4 1815 * interrupts directly to the guest
1745 addi r3, r3, 1 1816 */
1746 stwcx. r3, 0, r4 1817 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
1747 bne 51b 1818 li r3, 1
1819 bne 42f
1748 1820
1749kvm_no_guest: 1821 /* It's an IPI, clear the MFRR and EOI it */
1750 li r0, KVM_HWTHREAD_IN_NAP 1822 li r3, 0xff
1751 stb r0, HSTATE_HWTHREAD_STATE(r13) 1823 li r8, XICS_MFRR
1824 stbcix r3, r6, r8 /* clear the IPI */
1825 stwcix r0, r6, r7 /* EOI it */
1826 sync
1752 1827
1753 li r3, LPCR_PECE0 1828 /* We need to re-check host IPI now in case it got set in the
1754 mfspr r4, SPRN_LPCR 1829 * meantime. If it's clear, we bounce the interrupt to the
1755 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 1830 * guest
1756 mtspr SPRN_LPCR, r4 1831 */
1757 isync 1832 lbz r0, HSTATE_HOST_IPI(r13)
1758 std r0, HSTATE_SCRATCH0(r13) 1833 cmpwi r0, 0
1759 ptesync 1834 bne- 43f
1760 ld r0, HSTATE_SCRATCH0(r13) 1835
17611: cmpd r0, r0 1836 /* OK, it's an IPI for us */
1762 bne 1b 1837 li r3, -1
1763 nap 18381: blr
1764 b . 1839
184042: /* It's not an IPI and it's for the host, stash it in the PACA
1841 * before exit, it will be picked up by the host ICP driver
1842 */
1843 stw r0, HSTATE_SAVED_XIRR(r13)
1844 b 1b
1845
184643: /* We raced with the host, we need to resend that IPI, bummer */
1847 li r0, IPI_PRIORITY
1848 stbcix r0, r6, r8 /* set the IPI */
1849 sync
1850 b 1b
1765 1851
1766/* 1852/*
1767 * Save away FP, VMX and VSX registers. 1853 * Save away FP, VMX and VSX registers.
@@ -1879,3 +1965,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1879 lwz r7,VCPU_VRSAVE(r4) 1965 lwz r7,VCPU_VRSAVE(r4)
1880 mtspr SPRN_VRSAVE,r7 1966 mtspr SPRN_VRSAVE,r7
1881 blr 1967 blr
1968
1969/*
1970 * We come here if we get any exception or interrupt while we are
1971 * executing host real mode code while in guest MMU context.
1972 * For now just spin, but we should do something better.
1973 */
1974kvmppc_bad_host_intr:
1975 b .
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 17cfae5497a3..f4dd041c14ea 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -26,8 +26,12 @@
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28#define FUNC(name) GLUE(.,name) 28#define FUNC(name) GLUE(.,name)
29#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
30
29#elif defined(CONFIG_PPC_BOOK3S_32) 31#elif defined(CONFIG_PPC_BOOK3S_32)
30#define FUNC(name) name 32#define FUNC(name) name
33#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
34
31#endif /* CONFIG_PPC_BOOK3S_XX */ 35#endif /* CONFIG_PPC_BOOK3S_XX */
32 36
33#define VCPU_LOAD_NVGPRS(vcpu) \ 37#define VCPU_LOAD_NVGPRS(vcpu) \
@@ -87,8 +91,14 @@ kvm_start_entry:
87 VCPU_LOAD_NVGPRS(r4) 91 VCPU_LOAD_NVGPRS(r4)
88 92
89kvm_start_lightweight: 93kvm_start_lightweight:
94 /* Copy registers into shadow vcpu so we can access them in real mode */
95 GET_SHADOW_VCPU(r3)
96 bl FUNC(kvmppc_copy_to_svcpu)
97 nop
98 REST_GPR(4, r1)
90 99
91#ifdef CONFIG_PPC_BOOK3S_64 100#ifdef CONFIG_PPC_BOOK3S_64
101 /* Get the dcbz32 flag */
92 PPC_LL r3, VCPU_HFLAGS(r4) 102 PPC_LL r3, VCPU_HFLAGS(r4)
93 rldicl r3, r3, 0, 63 /* r3 &= 1 */ 103 rldicl r3, r3, 0, 63 /* r3 &= 1 */
94 stb r3, HSTATE_RESTORE_HID5(r13) 104 stb r3, HSTATE_RESTORE_HID5(r13)
@@ -111,9 +121,6 @@ kvm_start_lightweight:
111 * 121 *
112 */ 122 */
113 123
114.global kvmppc_handler_highmem
115kvmppc_handler_highmem:
116
117 /* 124 /*
118 * Register usage at this point: 125 * Register usage at this point:
119 * 126 *
@@ -125,18 +132,31 @@ kvmppc_handler_highmem:
125 * 132 *
126 */ 133 */
127 134
128 /* R7 = vcpu */ 135 /* Transfer reg values from shadow vcpu back to vcpu struct */
129 PPC_LL r7, GPR4(r1) 136 /* On 64-bit, interrupts are still off at this point */
137 PPC_LL r3, GPR4(r1) /* vcpu pointer */
138 GET_SHADOW_VCPU(r4)
139 bl FUNC(kvmppc_copy_from_svcpu)
140 nop
130 141
131#ifdef CONFIG_PPC_BOOK3S_64 142#ifdef CONFIG_PPC_BOOK3S_64
143 /* Re-enable interrupts */
144 ld r3, HSTATE_HOST_MSR(r13)
145 ori r3, r3, MSR_EE
146 MTMSR_EERI(r3)
147
132 /* 148 /*
133 * Reload kernel SPRG3 value. 149 * Reload kernel SPRG3 value.
134 * No need to save guest value as usermode can't modify SPRG3. 150 * No need to save guest value as usermode can't modify SPRG3.
135 */ 151 */
136 ld r3, PACA_SPRG3(r13) 152 ld r3, PACA_SPRG3(r13)
137 mtspr SPRN_SPRG3, r3 153 mtspr SPRN_SPRG3, r3
154
138#endif /* CONFIG_PPC_BOOK3S_64 */ 155#endif /* CONFIG_PPC_BOOK3S_64 */
139 156
157 /* R7 = vcpu */
158 PPC_LL r7, GPR4(r1)
159
140 PPC_STL r14, VCPU_GPR(R14)(r7) 160 PPC_STL r14, VCPU_GPR(R14)(r7)
141 PPC_STL r15, VCPU_GPR(R15)(r7) 161 PPC_STL r15, VCPU_GPR(R15)(r7)
142 PPC_STL r16, VCPU_GPR(R16)(r7) 162 PPC_STL r16, VCPU_GPR(R16)(r7)
@@ -161,7 +181,7 @@ kvmppc_handler_highmem:
161 181
162 /* Restore r3 (kvm_run) and r4 (vcpu) */ 182 /* Restore r3 (kvm_run) and r4 (vcpu) */
163 REST_2GPRS(3, r1) 183 REST_2GPRS(3, r1)
164 bl FUNC(kvmppc_handle_exit) 184 bl FUNC(kvmppc_handle_exit_pr)
165 185
166 /* If RESUME_GUEST, get back in the loop */ 186 /* If RESUME_GUEST, get back in the loop */
167 cmpwi r3, RESUME_GUEST 187 cmpwi r3, RESUME_GUEST
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index da8b13c4b776..5a1ab1250a05 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -28,7 +28,7 @@
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29#include <asm/hw_irq.h> 29#include <asm/hw_irq.h>
30 30
31#include "trace.h" 31#include "trace_pr.h"
32 32
33#define PTE_SIZE 12 33#define PTE_SIZE 12
34 34
@@ -56,6 +56,14 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
56 HPTEG_HASH_BITS_VPTE_LONG); 56 HPTEG_HASH_BITS_VPTE_LONG);
57} 57}
58 58
59#ifdef CONFIG_PPC_BOOK3S_64
60static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
61{
62 return hash_64((vpage & 0xffffffff0ULL) >> 4,
63 HPTEG_HASH_BITS_VPTE_64K);
64}
65#endif
66
59void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 67void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
60{ 68{
61 u64 index; 69 u64 index;
@@ -83,6 +91,15 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
83 hlist_add_head_rcu(&pte->list_vpte_long, 91 hlist_add_head_rcu(&pte->list_vpte_long,
84 &vcpu3s->hpte_hash_vpte_long[index]); 92 &vcpu3s->hpte_hash_vpte_long[index]);
85 93
94#ifdef CONFIG_PPC_BOOK3S_64
95 /* Add to vPTE_64k list */
96 index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
97 hlist_add_head_rcu(&pte->list_vpte_64k,
98 &vcpu3s->hpte_hash_vpte_64k[index]);
99#endif
100
101 vcpu3s->hpte_cache_count++;
102
86 spin_unlock(&vcpu3s->mmu_lock); 103 spin_unlock(&vcpu3s->mmu_lock);
87} 104}
88 105
@@ -113,10 +130,13 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
113 hlist_del_init_rcu(&pte->list_pte_long); 130 hlist_del_init_rcu(&pte->list_pte_long);
114 hlist_del_init_rcu(&pte->list_vpte); 131 hlist_del_init_rcu(&pte->list_vpte);
115 hlist_del_init_rcu(&pte->list_vpte_long); 132 hlist_del_init_rcu(&pte->list_vpte_long);
133#ifdef CONFIG_PPC_BOOK3S_64
134 hlist_del_init_rcu(&pte->list_vpte_64k);
135#endif
136 vcpu3s->hpte_cache_count--;
116 137
117 spin_unlock(&vcpu3s->mmu_lock); 138 spin_unlock(&vcpu3s->mmu_lock);
118 139
119 vcpu3s->hpte_cache_count--;
120 call_rcu(&pte->rcu_head, free_pte_rcu); 140 call_rcu(&pte->rcu_head, free_pte_rcu);
121} 141}
122 142
@@ -219,6 +239,29 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
219 rcu_read_unlock(); 239 rcu_read_unlock();
220} 240}
221 241
242#ifdef CONFIG_PPC_BOOK3S_64
243/* Flush with mask 0xffffffff0 */
244static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
245{
246 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
247 struct hlist_head *list;
248 struct hpte_cache *pte;
249 u64 vp_mask = 0xffffffff0ULL;
250
251 list = &vcpu3s->hpte_hash_vpte_64k[
252 kvmppc_mmu_hash_vpte_64k(guest_vp)];
253
254 rcu_read_lock();
255
256 /* Check the list for matching entries and invalidate */
257 hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
258 if ((pte->pte.vpage & vp_mask) == guest_vp)
259 invalidate_pte(vcpu, pte);
260
261 rcu_read_unlock();
262}
263#endif
264
222/* Flush with mask 0xffffff000 */ 265/* Flush with mask 0xffffff000 */
223static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) 266static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
224{ 267{
@@ -249,6 +292,11 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
249 case 0xfffffffffULL: 292 case 0xfffffffffULL:
250 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); 293 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
251 break; 294 break;
295#ifdef CONFIG_PPC_BOOK3S_64
296 case 0xffffffff0ULL:
297 kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
298 break;
299#endif
252 case 0xffffff000ULL: 300 case 0xffffff000ULL:
253 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); 301 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
254 break; 302 break;
@@ -285,15 +333,19 @@ struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
285 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 333 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
286 struct hpte_cache *pte; 334 struct hpte_cache *pte;
287 335
288 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
289 vcpu3s->hpte_cache_count++;
290
291 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM) 336 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
292 kvmppc_mmu_pte_flush_all(vcpu); 337 kvmppc_mmu_pte_flush_all(vcpu);
293 338
339 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
340
294 return pte; 341 return pte;
295} 342}
296 343
344void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
345{
346 kmem_cache_free(hpte_cache, pte);
347}
348
297void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) 349void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
298{ 350{
299 kvmppc_mmu_pte_flush(vcpu, 0, 0); 351 kvmppc_mmu_pte_flush(vcpu, 0, 0);
@@ -320,6 +372,10 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
320 ARRAY_SIZE(vcpu3s->hpte_hash_vpte)); 372 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
321 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long, 373 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
322 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long)); 374 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
375#ifdef CONFIG_PPC_BOOK3S_64
376 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
377 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
378#endif
323 379
324 spin_lock_init(&vcpu3s->mmu_lock); 380 spin_lock_init(&vcpu3s->mmu_lock);
325 381
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 27db1e665959..df36cf2ed22b 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -40,8 +40,12 @@
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/module.h>
43 44
44#include "trace.h" 45#include "book3s.h"
46
47#define CREATE_TRACE_POINTS
48#include "trace_pr.h"
45 49
46/* #define EXIT_DEBUG */ 50/* #define EXIT_DEBUG */
47/* #define DEBUG_EXT */ 51/* #define DEBUG_EXT */
@@ -56,29 +60,25 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56#define HW_PAGE_SIZE PAGE_SIZE 60#define HW_PAGE_SIZE PAGE_SIZE
57#endif 61#endif
58 62
59void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 63static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
60{ 64{
61#ifdef CONFIG_PPC_BOOK3S_64 65#ifdef CONFIG_PPC_BOOK3S_64
62 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
63 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); 67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
64 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
65 sizeof(get_paca()->shadow_vcpu));
66 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
67 svcpu_put(svcpu); 69 svcpu_put(svcpu);
68#endif 70#endif
69 vcpu->cpu = smp_processor_id(); 71 vcpu->cpu = smp_processor_id();
70#ifdef CONFIG_PPC_BOOK3S_32 72#ifdef CONFIG_PPC_BOOK3S_32
71 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; 73 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
72#endif 74#endif
73} 75}
74 76
75void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 77static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
76{ 78{
77#ifdef CONFIG_PPC_BOOK3S_64 79#ifdef CONFIG_PPC_BOOK3S_64
78 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 80 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
79 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 81 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
80 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
81 sizeof(get_paca()->shadow_vcpu));
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83 svcpu_put(svcpu); 83 svcpu_put(svcpu);
84#endif 84#endif
@@ -87,7 +87,61 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
87 vcpu->cpu = -1; 87 vcpu->cpu = -1;
88} 88}
89 89
90int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 90/* Copy data needed by real-mode code from vcpu to shadow vcpu */
91void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
92 struct kvm_vcpu *vcpu)
93{
94 svcpu->gpr[0] = vcpu->arch.gpr[0];
95 svcpu->gpr[1] = vcpu->arch.gpr[1];
96 svcpu->gpr[2] = vcpu->arch.gpr[2];
97 svcpu->gpr[3] = vcpu->arch.gpr[3];
98 svcpu->gpr[4] = vcpu->arch.gpr[4];
99 svcpu->gpr[5] = vcpu->arch.gpr[5];
100 svcpu->gpr[6] = vcpu->arch.gpr[6];
101 svcpu->gpr[7] = vcpu->arch.gpr[7];
102 svcpu->gpr[8] = vcpu->arch.gpr[8];
103 svcpu->gpr[9] = vcpu->arch.gpr[9];
104 svcpu->gpr[10] = vcpu->arch.gpr[10];
105 svcpu->gpr[11] = vcpu->arch.gpr[11];
106 svcpu->gpr[12] = vcpu->arch.gpr[12];
107 svcpu->gpr[13] = vcpu->arch.gpr[13];
108 svcpu->cr = vcpu->arch.cr;
109 svcpu->xer = vcpu->arch.xer;
110 svcpu->ctr = vcpu->arch.ctr;
111 svcpu->lr = vcpu->arch.lr;
112 svcpu->pc = vcpu->arch.pc;
113}
114
115/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
116void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
117 struct kvmppc_book3s_shadow_vcpu *svcpu)
118{
119 vcpu->arch.gpr[0] = svcpu->gpr[0];
120 vcpu->arch.gpr[1] = svcpu->gpr[1];
121 vcpu->arch.gpr[2] = svcpu->gpr[2];
122 vcpu->arch.gpr[3] = svcpu->gpr[3];
123 vcpu->arch.gpr[4] = svcpu->gpr[4];
124 vcpu->arch.gpr[5] = svcpu->gpr[5];
125 vcpu->arch.gpr[6] = svcpu->gpr[6];
126 vcpu->arch.gpr[7] = svcpu->gpr[7];
127 vcpu->arch.gpr[8] = svcpu->gpr[8];
128 vcpu->arch.gpr[9] = svcpu->gpr[9];
129 vcpu->arch.gpr[10] = svcpu->gpr[10];
130 vcpu->arch.gpr[11] = svcpu->gpr[11];
131 vcpu->arch.gpr[12] = svcpu->gpr[12];
132 vcpu->arch.gpr[13] = svcpu->gpr[13];
133 vcpu->arch.cr = svcpu->cr;
134 vcpu->arch.xer = svcpu->xer;
135 vcpu->arch.ctr = svcpu->ctr;
136 vcpu->arch.lr = svcpu->lr;
137 vcpu->arch.pc = svcpu->pc;
138 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
139 vcpu->arch.fault_dar = svcpu->fault_dar;
140 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
141 vcpu->arch.last_inst = svcpu->last_inst;
142}
143
144static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
91{ 145{
92 int r = 1; /* Indicate we want to get back into the guest */ 146 int r = 1; /* Indicate we want to get back into the guest */
93 147
@@ -100,44 +154,69 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
100} 154}
101 155
102/************* MMU Notifiers *************/ 156/************* MMU Notifiers *************/
157static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
158 unsigned long end)
159{
160 long i;
161 struct kvm_vcpu *vcpu;
162 struct kvm_memslots *slots;
163 struct kvm_memory_slot *memslot;
164
165 slots = kvm_memslots(kvm);
166 kvm_for_each_memslot(memslot, slots) {
167 unsigned long hva_start, hva_end;
168 gfn_t gfn, gfn_end;
169
170 hva_start = max(start, memslot->userspace_addr);
171 hva_end = min(end, memslot->userspace_addr +
172 (memslot->npages << PAGE_SHIFT));
173 if (hva_start >= hva_end)
174 continue;
175 /*
176 * {gfn(page) | page intersects with [hva_start, hva_end)} =
177 * {gfn, gfn+1, ..., gfn_end-1}.
178 */
179 gfn = hva_to_gfn_memslot(hva_start, memslot);
180 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
181 kvm_for_each_vcpu(i, vcpu, kvm)
182 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
183 gfn_end << PAGE_SHIFT);
184 }
185}
103 186
104int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 187static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
105{ 188{
106 trace_kvm_unmap_hva(hva); 189 trace_kvm_unmap_hva(hva);
107 190
108 /* 191 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
109 * Flush all shadow tlb entries everywhere. This is slow, but
110 * we are 100% sure that we catch the to be unmapped page
111 */
112 kvm_flush_remote_tlbs(kvm);
113 192
114 return 0; 193 return 0;
115} 194}
116 195
117int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 196static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
197 unsigned long end)
118{ 198{
119 /* kvm_unmap_hva flushes everything anyways */ 199 do_kvm_unmap_hva(kvm, start, end);
120 kvm_unmap_hva(kvm, start);
121 200
122 return 0; 201 return 0;
123} 202}
124 203
125int kvm_age_hva(struct kvm *kvm, unsigned long hva) 204static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
126{ 205{
127 /* XXX could be more clever ;) */ 206 /* XXX could be more clever ;) */
128 return 0; 207 return 0;
129} 208}
130 209
131int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 210static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
132{ 211{
133 /* XXX could be more clever ;) */ 212 /* XXX could be more clever ;) */
134 return 0; 213 return 0;
135} 214}
136 215
137void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 216static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
138{ 217{
139 /* The page will get remapped properly on its next fault */ 218 /* The page will get remapped properly on its next fault */
140 kvm_unmap_hva(kvm, hva); 219 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
141} 220}
142 221
143/*****************************************/ 222/*****************************************/
@@ -159,7 +238,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
159 vcpu->arch.shadow_msr = smsr; 238 vcpu->arch.shadow_msr = smsr;
160} 239}
161 240
162void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 241static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
163{ 242{
164 ulong old_msr = vcpu->arch.shared->msr; 243 ulong old_msr = vcpu->arch.shared->msr;
165 244
@@ -219,7 +298,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
219 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 298 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
220} 299}
221 300
222void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 301void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
223{ 302{
224 u32 host_pvr; 303 u32 host_pvr;
225 304
@@ -256,6 +335,23 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
256 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) 335 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
257 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); 336 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
258 337
338 /*
339 * If they're asking for POWER6 or later, set the flag
340 * indicating that we can do multiple large page sizes
341 * and 1TB segments.
342 * Also set the flag that indicates that tlbie has the large
343 * page bit in the RB operand instead of the instruction.
344 */
345 switch (PVR_VER(pvr)) {
346 case PVR_POWER6:
347 case PVR_POWER7:
348 case PVR_POWER7p:
349 case PVR_POWER8:
350 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
351 BOOK3S_HFLAG_NEW_TLBIE;
352 break;
353 }
354
259#ifdef CONFIG_PPC_BOOK3S_32 355#ifdef CONFIG_PPC_BOOK3S_32
260 /* 32 bit Book3S always has 32 byte dcbz */ 356 /* 32 bit Book3S always has 32 byte dcbz */
261 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 357 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
@@ -334,6 +430,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
334 ulong eaddr, int vec) 430 ulong eaddr, int vec)
335{ 431{
336 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); 432 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
433 bool iswrite = false;
337 int r = RESUME_GUEST; 434 int r = RESUME_GUEST;
338 int relocated; 435 int relocated;
339 int page_found = 0; 436 int page_found = 0;
@@ -344,10 +441,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
344 u64 vsid; 441 u64 vsid;
345 442
346 relocated = data ? dr : ir; 443 relocated = data ? dr : ir;
444 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
445 iswrite = true;
347 446
348 /* Resolve real address if translation turned on */ 447 /* Resolve real address if translation turned on */
349 if (relocated) { 448 if (relocated) {
350 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); 449 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
351 } else { 450 } else {
352 pte.may_execute = true; 451 pte.may_execute = true;
353 pte.may_read = true; 452 pte.may_read = true;
@@ -355,6 +454,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
355 pte.raddr = eaddr & KVM_PAM; 454 pte.raddr = eaddr & KVM_PAM;
356 pte.eaddr = eaddr; 455 pte.eaddr = eaddr;
357 pte.vpage = eaddr >> 12; 456 pte.vpage = eaddr >> 12;
457 pte.page_size = MMU_PAGE_64K;
358 } 458 }
359 459
360 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 460 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
@@ -388,22 +488,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
388 488
389 if (page_found == -ENOENT) { 489 if (page_found == -ENOENT) {
390 /* Page not found in guest PTE entries */ 490 /* Page not found in guest PTE entries */
391 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
392 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 491 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
393 vcpu->arch.shared->dsisr = svcpu->fault_dsisr; 492 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
394 vcpu->arch.shared->msr |= 493 vcpu->arch.shared->msr |=
395 (svcpu->shadow_srr1 & 0x00000000f8000000ULL); 494 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
396 svcpu_put(svcpu);
397 kvmppc_book3s_queue_irqprio(vcpu, vec); 495 kvmppc_book3s_queue_irqprio(vcpu, vec);
398 } else if (page_found == -EPERM) { 496 } else if (page_found == -EPERM) {
399 /* Storage protection */ 497 /* Storage protection */
400 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
401 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 498 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
402 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; 499 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
403 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; 500 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
404 vcpu->arch.shared->msr |= 501 vcpu->arch.shared->msr |=
405 svcpu->shadow_srr1 & 0x00000000f8000000ULL; 502 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
406 svcpu_put(svcpu);
407 kvmppc_book3s_queue_irqprio(vcpu, vec); 503 kvmppc_book3s_queue_irqprio(vcpu, vec);
408 } else if (page_found == -EINVAL) { 504 } else if (page_found == -EINVAL) {
409 /* Page not found in guest SLB */ 505 /* Page not found in guest SLB */
@@ -411,12 +507,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
411 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 507 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
412 } else if (!is_mmio && 508 } else if (!is_mmio &&
413 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 509 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
510 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
511 /*
512 * There is already a host HPTE there, presumably
513 * a read-only one for a page the guest thinks
514 * is writable, so get rid of it first.
515 */
516 kvmppc_mmu_unmap_page(vcpu, &pte);
517 }
414 /* The guest's PTE is not mapped yet. Map on the host */ 518 /* The guest's PTE is not mapped yet. Map on the host */
415 kvmppc_mmu_map_page(vcpu, &pte); 519 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
416 if (data) 520 if (data)
417 vcpu->stat.sp_storage++; 521 vcpu->stat.sp_storage++;
418 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 522 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
419 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) 523 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
420 kvmppc_patch_dcbz(vcpu, &pte); 524 kvmppc_patch_dcbz(vcpu, &pte);
421 } else { 525 } else {
422 /* MMIO */ 526 /* MMIO */
@@ -619,13 +723,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
619 723
620 if (lost_ext & MSR_FP) 724 if (lost_ext & MSR_FP)
621 kvmppc_load_up_fpu(); 725 kvmppc_load_up_fpu();
726#ifdef CONFIG_ALTIVEC
622 if (lost_ext & MSR_VEC) 727 if (lost_ext & MSR_VEC)
623 kvmppc_load_up_altivec(); 728 kvmppc_load_up_altivec();
729#endif
624 current->thread.regs->msr |= lost_ext; 730 current->thread.regs->msr |= lost_ext;
625} 731}
626 732
627int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 733int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
628 unsigned int exit_nr) 734 unsigned int exit_nr)
629{ 735{
630 int r = RESUME_HOST; 736 int r = RESUME_HOST;
631 int s; 737 int s;
@@ -643,25 +749,32 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
643 switch (exit_nr) { 749 switch (exit_nr) {
644 case BOOK3S_INTERRUPT_INST_STORAGE: 750 case BOOK3S_INTERRUPT_INST_STORAGE:
645 { 751 {
646 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 752 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
647 ulong shadow_srr1 = svcpu->shadow_srr1;
648 vcpu->stat.pf_instruc++; 753 vcpu->stat.pf_instruc++;
649 754
650#ifdef CONFIG_PPC_BOOK3S_32 755#ifdef CONFIG_PPC_BOOK3S_32
651 /* We set segments as unused segments when invalidating them. So 756 /* We set segments as unused segments when invalidating them. So
652 * treat the respective fault as segment fault. */ 757 * treat the respective fault as segment fault. */
653 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { 758 {
654 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 759 struct kvmppc_book3s_shadow_vcpu *svcpu;
655 r = RESUME_GUEST; 760 u32 sr;
761
762 svcpu = svcpu_get(vcpu);
763 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
656 svcpu_put(svcpu); 764 svcpu_put(svcpu);
657 break; 765 if (sr == SR_INVALID) {
766 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
767 r = RESUME_GUEST;
768 break;
769 }
658 } 770 }
659#endif 771#endif
660 svcpu_put(svcpu);
661 772
662 /* only care about PTEG not found errors, but leave NX alone */ 773 /* only care about PTEG not found errors, but leave NX alone */
663 if (shadow_srr1 & 0x40000000) { 774 if (shadow_srr1 & 0x40000000) {
775 int idx = srcu_read_lock(&vcpu->kvm->srcu);
664 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 776 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
777 srcu_read_unlock(&vcpu->kvm->srcu, idx);
665 vcpu->stat.sp_instruc++; 778 vcpu->stat.sp_instruc++;
666 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 779 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
667 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 780 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -682,25 +795,36 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
682 case BOOK3S_INTERRUPT_DATA_STORAGE: 795 case BOOK3S_INTERRUPT_DATA_STORAGE:
683 { 796 {
684 ulong dar = kvmppc_get_fault_dar(vcpu); 797 ulong dar = kvmppc_get_fault_dar(vcpu);
685 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 798 u32 fault_dsisr = vcpu->arch.fault_dsisr;
686 u32 fault_dsisr = svcpu->fault_dsisr;
687 vcpu->stat.pf_storage++; 799 vcpu->stat.pf_storage++;
688 800
689#ifdef CONFIG_PPC_BOOK3S_32 801#ifdef CONFIG_PPC_BOOK3S_32
690 /* We set segments as unused segments when invalidating them. So 802 /* We set segments as unused segments when invalidating them. So
691 * treat the respective fault as segment fault. */ 803 * treat the respective fault as segment fault. */
692 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { 804 {
693 kvmppc_mmu_map_segment(vcpu, dar); 805 struct kvmppc_book3s_shadow_vcpu *svcpu;
694 r = RESUME_GUEST; 806 u32 sr;
807
808 svcpu = svcpu_get(vcpu);
809 sr = svcpu->sr[dar >> SID_SHIFT];
695 svcpu_put(svcpu); 810 svcpu_put(svcpu);
696 break; 811 if (sr == SR_INVALID) {
812 kvmppc_mmu_map_segment(vcpu, dar);
813 r = RESUME_GUEST;
814 break;
815 }
697 } 816 }
698#endif 817#endif
699 svcpu_put(svcpu);
700 818
701 /* The only case we need to handle is missing shadow PTEs */ 819 /*
702 if (fault_dsisr & DSISR_NOHPTE) { 820 * We need to handle missing shadow PTEs, and
821 * protection faults due to us mapping a page read-only
822 * when the guest thinks it is writable.
823 */
824 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
825 int idx = srcu_read_lock(&vcpu->kvm->srcu);
703 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 826 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
827 srcu_read_unlock(&vcpu->kvm->srcu, idx);
704 } else { 828 } else {
705 vcpu->arch.shared->dar = dar; 829 vcpu->arch.shared->dar = dar;
706 vcpu->arch.shared->dsisr = fault_dsisr; 830 vcpu->arch.shared->dsisr = fault_dsisr;
@@ -743,13 +867,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
743 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 867 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
744 { 868 {
745 enum emulation_result er; 869 enum emulation_result er;
746 struct kvmppc_book3s_shadow_vcpu *svcpu;
747 ulong flags; 870 ulong flags;
748 871
749program_interrupt: 872program_interrupt:
750 svcpu = svcpu_get(vcpu); 873 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
751 flags = svcpu->shadow_srr1 & 0x1f0000ull;
752 svcpu_put(svcpu);
753 874
754 if (vcpu->arch.shared->msr & MSR_PR) { 875 if (vcpu->arch.shared->msr & MSR_PR) {
755#ifdef EXIT_DEBUG 876#ifdef EXIT_DEBUG
@@ -798,7 +919,7 @@ program_interrupt:
798 ulong cmd = kvmppc_get_gpr(vcpu, 3); 919 ulong cmd = kvmppc_get_gpr(vcpu, 3);
799 int i; 920 int i;
800 921
801#ifdef CONFIG_KVM_BOOK3S_64_PR 922#ifdef CONFIG_PPC_BOOK3S_64
802 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { 923 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
803 r = RESUME_GUEST; 924 r = RESUME_GUEST;
804 break; 925 break;
@@ -881,9 +1002,7 @@ program_interrupt:
881 break; 1002 break;
882 default: 1003 default:
883 { 1004 {
884 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 1005 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
885 ulong shadow_srr1 = svcpu->shadow_srr1;
886 svcpu_put(svcpu);
887 /* Ugh - bork here! What did we get? */ 1006 /* Ugh - bork here! What did we get? */
888 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 1007 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
889 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); 1008 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
@@ -920,8 +1039,8 @@ program_interrupt:
920 return r; 1039 return r;
921} 1040}
922 1041
923int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1042static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
924 struct kvm_sregs *sregs) 1043 struct kvm_sregs *sregs)
925{ 1044{
926 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1045 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
927 int i; 1046 int i;
@@ -947,13 +1066,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
947 return 0; 1066 return 0;
948} 1067}
949 1068
950int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1069static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
951 struct kvm_sregs *sregs) 1070 struct kvm_sregs *sregs)
952{ 1071{
953 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1072 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
954 int i; 1073 int i;
955 1074
956 kvmppc_set_pvr(vcpu, sregs->pvr); 1075 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
957 1076
958 vcpu3s->sdr1 = sregs->u.s.sdr1; 1077 vcpu3s->sdr1 = sregs->u.s.sdr1;
959 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1078 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
@@ -983,7 +1102,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
983 return 0; 1102 return 0;
984} 1103}
985 1104
986int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 1105static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1106 union kvmppc_one_reg *val)
987{ 1107{
988 int r = 0; 1108 int r = 0;
989 1109
@@ -1012,7 +1132,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1012 return r; 1132 return r;
1013} 1133}
1014 1134
1015int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 1135static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1136 union kvmppc_one_reg *val)
1016{ 1137{
1017 int r = 0; 1138 int r = 0;
1018 1139
@@ -1042,28 +1163,30 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1042 return r; 1163 return r;
1043} 1164}
1044 1165
1045int kvmppc_core_check_processor_compat(void) 1166static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1046{ 1167 unsigned int id)
1047 return 0;
1048}
1049
1050struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1051{ 1168{
1052 struct kvmppc_vcpu_book3s *vcpu_book3s; 1169 struct kvmppc_vcpu_book3s *vcpu_book3s;
1053 struct kvm_vcpu *vcpu; 1170 struct kvm_vcpu *vcpu;
1054 int err = -ENOMEM; 1171 int err = -ENOMEM;
1055 unsigned long p; 1172 unsigned long p;
1056 1173
1057 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); 1174 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1058 if (!vcpu_book3s) 1175 if (!vcpu)
1059 goto out; 1176 goto out;
1060 1177
1061 vcpu_book3s->shadow_vcpu = 1178 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1062 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); 1179 if (!vcpu_book3s)
1063 if (!vcpu_book3s->shadow_vcpu)
1064 goto free_vcpu; 1180 goto free_vcpu;
1181 vcpu->arch.book3s = vcpu_book3s;
1182
1183#ifdef CONFIG_KVM_BOOK3S_32
1184 vcpu->arch.shadow_vcpu =
1185 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1186 if (!vcpu->arch.shadow_vcpu)
1187 goto free_vcpu3s;
1188#endif
1065 1189
1066 vcpu = &vcpu_book3s->vcpu;
1067 err = kvm_vcpu_init(vcpu, kvm, id); 1190 err = kvm_vcpu_init(vcpu, kvm, id);
1068 if (err) 1191 if (err)
1069 goto free_shadow_vcpu; 1192 goto free_shadow_vcpu;
@@ -1076,13 +1199,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1076 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); 1199 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1077 1200
1078#ifdef CONFIG_PPC_BOOK3S_64 1201#ifdef CONFIG_PPC_BOOK3S_64
1079 /* default to book3s_64 (970fx) */ 1202 /*
1203 * Default to the same as the host if we're on sufficiently
1204 * recent machine that we have 1TB segments;
1205 * otherwise default to PPC970FX.
1206 */
1080 vcpu->arch.pvr = 0x3C0301; 1207 vcpu->arch.pvr = 0x3C0301;
1208 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1209 vcpu->arch.pvr = mfspr(SPRN_PVR);
1081#else 1210#else
1082 /* default to book3s_32 (750) */ 1211 /* default to book3s_32 (750) */
1083 vcpu->arch.pvr = 0x84202; 1212 vcpu->arch.pvr = 0x84202;
1084#endif 1213#endif
1085 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 1214 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1086 vcpu->arch.slb_nr = 64; 1215 vcpu->arch.slb_nr = 64;
1087 1216
1088 vcpu->arch.shadow_msr = MSR_USER64; 1217 vcpu->arch.shadow_msr = MSR_USER64;
@@ -1096,24 +1225,31 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1096uninit_vcpu: 1225uninit_vcpu:
1097 kvm_vcpu_uninit(vcpu); 1226 kvm_vcpu_uninit(vcpu);
1098free_shadow_vcpu: 1227free_shadow_vcpu:
1099 kfree(vcpu_book3s->shadow_vcpu); 1228#ifdef CONFIG_KVM_BOOK3S_32
1100free_vcpu: 1229 kfree(vcpu->arch.shadow_vcpu);
1230free_vcpu3s:
1231#endif
1101 vfree(vcpu_book3s); 1232 vfree(vcpu_book3s);
1233free_vcpu:
1234 kmem_cache_free(kvm_vcpu_cache, vcpu);
1102out: 1235out:
1103 return ERR_PTR(err); 1236 return ERR_PTR(err);
1104} 1237}
1105 1238
1106void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 1239static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1107{ 1240{
1108 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1241 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1109 1242
1110 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); 1243 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1111 kvm_vcpu_uninit(vcpu); 1244 kvm_vcpu_uninit(vcpu);
1112 kfree(vcpu_book3s->shadow_vcpu); 1245#ifdef CONFIG_KVM_BOOK3S_32
1246 kfree(vcpu->arch.shadow_vcpu);
1247#endif
1113 vfree(vcpu_book3s); 1248 vfree(vcpu_book3s);
1249 kmem_cache_free(kvm_vcpu_cache, vcpu);
1114} 1250}
1115 1251
1116int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1252static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1117{ 1253{
1118 int ret; 1254 int ret;
1119 double fpr[32][TS_FPRWIDTH]; 1255 double fpr[32][TS_FPRWIDTH];
@@ -1222,8 +1358,8 @@ out:
1222/* 1358/*
1223 * Get (and clear) the dirty memory log for a memory slot. 1359 * Get (and clear) the dirty memory log for a memory slot.
1224 */ 1360 */
1225int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 1361static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1226 struct kvm_dirty_log *log) 1362 struct kvm_dirty_log *log)
1227{ 1363{
1228 struct kvm_memory_slot *memslot; 1364 struct kvm_memory_slot *memslot;
1229 struct kvm_vcpu *vcpu; 1365 struct kvm_vcpu *vcpu;
@@ -1258,67 +1394,100 @@ out:
1258 return r; 1394 return r;
1259} 1395}
1260 1396
1261#ifdef CONFIG_PPC64 1397static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1262int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) 1398 struct kvm_memory_slot *memslot)
1263{ 1399{
1264 info->flags = KVM_PPC_1T_SEGMENTS; 1400 return;
1265 1401}
1266 /* SLB is always 64 entries */
1267 info->slb_size = 64;
1268
1269 /* Standard 4k base page size segment */
1270 info->sps[0].page_shift = 12;
1271 info->sps[0].slb_enc = 0;
1272 info->sps[0].enc[0].page_shift = 12;
1273 info->sps[0].enc[0].pte_enc = 0;
1274
1275 /* Standard 16M large page size segment */
1276 info->sps[1].page_shift = 24;
1277 info->sps[1].slb_enc = SLB_VSID_L;
1278 info->sps[1].enc[0].page_shift = 24;
1279 info->sps[1].enc[0].pte_enc = 0;
1280 1402
1403static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1404 struct kvm_memory_slot *memslot,
1405 struct kvm_userspace_memory_region *mem)
1406{
1281 return 0; 1407 return 0;
1282} 1408}
1283#endif /* CONFIG_PPC64 */
1284 1409
1285void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 1410static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1286 struct kvm_memory_slot *dont) 1411 struct kvm_userspace_memory_region *mem,
1412 const struct kvm_memory_slot *old)
1287{ 1413{
1414 return;
1288} 1415}
1289 1416
1290int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 1417static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1291 unsigned long npages) 1418 struct kvm_memory_slot *dont)
1292{ 1419{
1293 return 0; 1420 return;
1294} 1421}
1295 1422
1296int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1423static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1297 struct kvm_memory_slot *memslot, 1424 unsigned long npages)
1298 struct kvm_userspace_memory_region *mem)
1299{ 1425{
1300 return 0; 1426 return 0;
1301} 1427}
1302 1428
1303void kvmppc_core_commit_memory_region(struct kvm *kvm, 1429
1304 struct kvm_userspace_memory_region *mem, 1430#ifdef CONFIG_PPC64
1305 const struct kvm_memory_slot *old) 1431static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1432 struct kvm_ppc_smmu_info *info)
1306{ 1433{
1307} 1434 long int i;
1435 struct kvm_vcpu *vcpu;
1436
1437 info->flags = 0;
1438
1439 /* SLB is always 64 entries */
1440 info->slb_size = 64;
1441
1442 /* Standard 4k base page size segment */
1443 info->sps[0].page_shift = 12;
1444 info->sps[0].slb_enc = 0;
1445 info->sps[0].enc[0].page_shift = 12;
1446 info->sps[0].enc[0].pte_enc = 0;
1447
1448 /*
1449 * 64k large page size.
1450 * We only want to put this in if the CPUs we're emulating
1451 * support it, but unfortunately we don't have a vcpu easily
1452 * to hand here to test. Just pick the first vcpu, and if
1453 * that doesn't exist yet, report the minimum capability,
1454 * i.e., no 64k pages.
1455 * 1T segment support goes along with 64k pages.
1456 */
1457 i = 1;
1458 vcpu = kvm_get_vcpu(kvm, 0);
1459 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1460 info->flags = KVM_PPC_1T_SEGMENTS;
1461 info->sps[i].page_shift = 16;
1462 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1463 info->sps[i].enc[0].page_shift = 16;
1464 info->sps[i].enc[0].pte_enc = 1;
1465 ++i;
1466 }
1467
1468 /* Standard 16M large page size segment */
1469 info->sps[i].page_shift = 24;
1470 info->sps[i].slb_enc = SLB_VSID_L;
1471 info->sps[i].enc[0].page_shift = 24;
1472 info->sps[i].enc[0].pte_enc = 0;
1308 1473
1309void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 1474 return 0;
1475}
1476#else
1477static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1478 struct kvm_ppc_smmu_info *info)
1310{ 1479{
1480 /* We should not get called */
1481 BUG();
1311} 1482}
1483#endif /* CONFIG_PPC64 */
1312 1484
1313static unsigned int kvm_global_user_count = 0; 1485static unsigned int kvm_global_user_count = 0;
1314static DEFINE_SPINLOCK(kvm_global_user_count_lock); 1486static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1315 1487
1316int kvmppc_core_init_vm(struct kvm *kvm) 1488static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1317{ 1489{
1318#ifdef CONFIG_PPC64 1490 mutex_init(&kvm->arch.hpt_mutex);
1319 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1320 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
1321#endif
1322 1491
1323 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 1492 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1324 spin_lock(&kvm_global_user_count_lock); 1493 spin_lock(&kvm_global_user_count_lock);
@@ -1329,7 +1498,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1329 return 0; 1498 return 0;
1330} 1499}
1331 1500
1332void kvmppc_core_destroy_vm(struct kvm *kvm) 1501static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
1333{ 1502{
1334#ifdef CONFIG_PPC64 1503#ifdef CONFIG_PPC64
1335 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 1504 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
@@ -1344,26 +1513,81 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
1344 } 1513 }
1345} 1514}
1346 1515
1347static int kvmppc_book3s_init(void) 1516static int kvmppc_core_check_processor_compat_pr(void)
1348{ 1517{
1349 int r; 1518 /* we are always compatible */
1519 return 0;
1520}
1350 1521
1351 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, 1522static long kvm_arch_vm_ioctl_pr(struct file *filp,
1352 THIS_MODULE); 1523 unsigned int ioctl, unsigned long arg)
1524{
1525 return -ENOTTY;
1526}
1353 1527
1354 if (r) 1528static struct kvmppc_ops kvm_ops_pr = {
1529 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1530 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1531 .get_one_reg = kvmppc_get_one_reg_pr,
1532 .set_one_reg = kvmppc_set_one_reg_pr,
1533 .vcpu_load = kvmppc_core_vcpu_load_pr,
1534 .vcpu_put = kvmppc_core_vcpu_put_pr,
1535 .set_msr = kvmppc_set_msr_pr,
1536 .vcpu_run = kvmppc_vcpu_run_pr,
1537 .vcpu_create = kvmppc_core_vcpu_create_pr,
1538 .vcpu_free = kvmppc_core_vcpu_free_pr,
1539 .check_requests = kvmppc_core_check_requests_pr,
1540 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1541 .flush_memslot = kvmppc_core_flush_memslot_pr,
1542 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1543 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1544 .unmap_hva = kvm_unmap_hva_pr,
1545 .unmap_hva_range = kvm_unmap_hva_range_pr,
1546 .age_hva = kvm_age_hva_pr,
1547 .test_age_hva = kvm_test_age_hva_pr,
1548 .set_spte_hva = kvm_set_spte_hva_pr,
1549 .mmu_destroy = kvmppc_mmu_destroy_pr,
1550 .free_memslot = kvmppc_core_free_memslot_pr,
1551 .create_memslot = kvmppc_core_create_memslot_pr,
1552 .init_vm = kvmppc_core_init_vm_pr,
1553 .destroy_vm = kvmppc_core_destroy_vm_pr,
1554 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1555 .emulate_op = kvmppc_core_emulate_op_pr,
1556 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1557 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1558 .fast_vcpu_kick = kvm_vcpu_kick,
1559 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1560};
1561
1562
1563int kvmppc_book3s_init_pr(void)
1564{
1565 int r;
1566
1567 r = kvmppc_core_check_processor_compat_pr();
1568 if (r < 0)
1355 return r; 1569 return r;
1356 1570
1357 r = kvmppc_mmu_hpte_sysinit(); 1571 kvm_ops_pr.owner = THIS_MODULE;
1572 kvmppc_pr_ops = &kvm_ops_pr;
1358 1573
1574 r = kvmppc_mmu_hpte_sysinit();
1359 return r; 1575 return r;
1360} 1576}
1361 1577
1362static void kvmppc_book3s_exit(void) 1578void kvmppc_book3s_exit_pr(void)
1363{ 1579{
1580 kvmppc_pr_ops = NULL;
1364 kvmppc_mmu_hpte_sysexit(); 1581 kvmppc_mmu_hpte_sysexit();
1365 kvm_exit();
1366} 1582}
1367 1583
1368module_init(kvmppc_book3s_init); 1584/*
1369module_exit(kvmppc_book3s_exit); 1585 * We only support separate modules for book3s 64
1586 */
1587#ifdef CONFIG_PPC_BOOK3S_64
1588
1589module_init(kvmppc_book3s_init_pr);
1590module_exit(kvmppc_book3s_exit_pr);
1591
1592MODULE_LICENSE("GPL");
1593#endif
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index da0e0bc268bd..5efa97b993d8 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -21,6 +21,8 @@
21#include <asm/kvm_ppc.h> 21#include <asm/kvm_ppc.h>
22#include <asm/kvm_book3s.h> 22#include <asm/kvm_book3s.h>
23 23
24#define HPTE_SIZE 16 /* bytes per HPT entry */
25
24static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) 26static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
25{ 27{
26 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 28 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -40,32 +42,41 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
40 long pte_index = kvmppc_get_gpr(vcpu, 5); 42 long pte_index = kvmppc_get_gpr(vcpu, 5);
41 unsigned long pteg[2 * 8]; 43 unsigned long pteg[2 * 8];
42 unsigned long pteg_addr, i, *hpte; 44 unsigned long pteg_addr, i, *hpte;
45 long int ret;
43 46
47 i = pte_index & 7;
44 pte_index &= ~7UL; 48 pte_index &= ~7UL;
45 pteg_addr = get_pteg_addr(vcpu, pte_index); 49 pteg_addr = get_pteg_addr(vcpu, pte_index);
46 50
51 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
47 copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); 52 copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
48 hpte = pteg; 53 hpte = pteg;
49 54
55 ret = H_PTEG_FULL;
50 if (likely((flags & H_EXACT) == 0)) { 56 if (likely((flags & H_EXACT) == 0)) {
51 pte_index &= ~7UL;
52 for (i = 0; ; ++i) { 57 for (i = 0; ; ++i) {
53 if (i == 8) 58 if (i == 8)
54 return H_PTEG_FULL; 59 goto done;
55 if ((*hpte & HPTE_V_VALID) == 0) 60 if ((*hpte & HPTE_V_VALID) == 0)
56 break; 61 break;
57 hpte += 2; 62 hpte += 2;
58 } 63 }
59 } else { 64 } else {
60 i = kvmppc_get_gpr(vcpu, 5) & 7UL;
61 hpte += i * 2; 65 hpte += i * 2;
66 if (*hpte & HPTE_V_VALID)
67 goto done;
62 } 68 }
63 69
64 hpte[0] = kvmppc_get_gpr(vcpu, 6); 70 hpte[0] = kvmppc_get_gpr(vcpu, 6);
65 hpte[1] = kvmppc_get_gpr(vcpu, 7); 71 hpte[1] = kvmppc_get_gpr(vcpu, 7);
66 copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg)); 72 pteg_addr += i * HPTE_SIZE;
67 kvmppc_set_gpr(vcpu, 3, H_SUCCESS); 73 copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
68 kvmppc_set_gpr(vcpu, 4, pte_index | i); 74 kvmppc_set_gpr(vcpu, 4, pte_index | i);
75 ret = H_SUCCESS;
76
77 done:
78 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
79 kvmppc_set_gpr(vcpu, 3, ret);
69 80
70 return EMULATE_DONE; 81 return EMULATE_DONE;
71} 82}
@@ -77,26 +88,31 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
77 unsigned long avpn = kvmppc_get_gpr(vcpu, 6); 88 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
78 unsigned long v = 0, pteg, rb; 89 unsigned long v = 0, pteg, rb;
79 unsigned long pte[2]; 90 unsigned long pte[2];
91 long int ret;
80 92
81 pteg = get_pteg_addr(vcpu, pte_index); 93 pteg = get_pteg_addr(vcpu, pte_index);
94 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
82 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 95 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
83 96
97 ret = H_NOT_FOUND;
84 if ((pte[0] & HPTE_V_VALID) == 0 || 98 if ((pte[0] & HPTE_V_VALID) == 0 ||
85 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || 99 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
86 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) { 100 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
87 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); 101 goto done;
88 return EMULATE_DONE;
89 }
90 102
91 copy_to_user((void __user *)pteg, &v, sizeof(v)); 103 copy_to_user((void __user *)pteg, &v, sizeof(v));
92 104
93 rb = compute_tlbie_rb(pte[0], pte[1], pte_index); 105 rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
94 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 106 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
95 107
96 kvmppc_set_gpr(vcpu, 3, H_SUCCESS); 108 ret = H_SUCCESS;
97 kvmppc_set_gpr(vcpu, 4, pte[0]); 109 kvmppc_set_gpr(vcpu, 4, pte[0]);
98 kvmppc_set_gpr(vcpu, 5, pte[1]); 110 kvmppc_set_gpr(vcpu, 5, pte[1]);
99 111
112 done:
113 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
114 kvmppc_set_gpr(vcpu, 3, ret);
115
100 return EMULATE_DONE; 116 return EMULATE_DONE;
101} 117}
102 118
@@ -124,6 +140,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
124 int paramnr = 4; 140 int paramnr = 4;
125 int ret = H_SUCCESS; 141 int ret = H_SUCCESS;
126 142
143 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
127 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { 144 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
128 unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); 145 unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
129 unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); 146 unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
@@ -172,6 +189,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
172 } 189 }
173 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); 190 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
174 } 191 }
192 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
175 kvmppc_set_gpr(vcpu, 3, ret); 193 kvmppc_set_gpr(vcpu, 3, ret);
176 194
177 return EMULATE_DONE; 195 return EMULATE_DONE;
@@ -184,15 +202,16 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
184 unsigned long avpn = kvmppc_get_gpr(vcpu, 6); 202 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
185 unsigned long rb, pteg, r, v; 203 unsigned long rb, pteg, r, v;
186 unsigned long pte[2]; 204 unsigned long pte[2];
205 long int ret;
187 206
188 pteg = get_pteg_addr(vcpu, pte_index); 207 pteg = get_pteg_addr(vcpu, pte_index);
208 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
189 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 209 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
190 210
211 ret = H_NOT_FOUND;
191 if ((pte[0] & HPTE_V_VALID) == 0 || 212 if ((pte[0] & HPTE_V_VALID) == 0 ||
192 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) { 213 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
193 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); 214 goto done;
194 return EMULATE_DONE;
195 }
196 215
197 v = pte[0]; 216 v = pte[0];
198 r = pte[1]; 217 r = pte[1];
@@ -207,8 +226,11 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
207 rb = compute_tlbie_rb(v, r, pte_index); 226 rb = compute_tlbie_rb(v, r, pte_index);
208 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 227 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
209 copy_to_user((void __user *)pteg, pte, sizeof(pte)); 228 copy_to_user((void __user *)pteg, pte, sizeof(pte));
229 ret = H_SUCCESS;
210 230
211 kvmppc_set_gpr(vcpu, 3, H_SUCCESS); 231 done:
232 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
233 kvmppc_set_gpr(vcpu, 3, ret);
212 234
213 return EMULATE_DONE; 235 return EMULATE_DONE;
214} 236}
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 8f7633e3afb8..a38c4c9edab8 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -38,32 +38,6 @@
38 38
39#define FUNC(name) GLUE(.,name) 39#define FUNC(name) GLUE(.,name)
40 40
41 .globl kvmppc_skip_interrupt
42kvmppc_skip_interrupt:
43 /*
44 * Here all GPRs are unchanged from when the interrupt happened
45 * except for r13, which is saved in SPRG_SCRATCH0.
46 */
47 mfspr r13, SPRN_SRR0
48 addi r13, r13, 4
49 mtspr SPRN_SRR0, r13
50 GET_SCRATCH0(r13)
51 rfid
52 b .
53
54 .globl kvmppc_skip_Hinterrupt
55kvmppc_skip_Hinterrupt:
56 /*
57 * Here all GPRs are unchanged from when the interrupt happened
58 * except for r13, which is saved in SPRG_SCRATCH0.
59 */
60 mfspr r13, SPRN_HSRR0
61 addi r13, r13, 4
62 mtspr SPRN_HSRR0, r13
63 GET_SCRATCH0(r13)
64 hrfid
65 b .
66
67#elif defined(CONFIG_PPC_BOOK3S_32) 41#elif defined(CONFIG_PPC_BOOK3S_32)
68 42
69#define FUNC(name) name 43#define FUNC(name) name
@@ -179,11 +153,15 @@ _GLOBAL(kvmppc_entry_trampoline)
179 153
180 li r6, MSR_IR | MSR_DR 154 li r6, MSR_IR | MSR_DR
181 andc r6, r5, r6 /* Clear DR and IR in MSR value */ 155 andc r6, r5, r6 /* Clear DR and IR in MSR value */
156#ifdef CONFIG_PPC_BOOK3S_32
182 /* 157 /*
183 * Set EE in HOST_MSR so that it's enabled when we get into our 158 * Set EE in HOST_MSR so that it's enabled when we get into our
184 * C exit handler function 159 * C exit handler function. On 64-bit we delay enabling
160 * interrupts until we have finished transferring stuff
161 * to or from the PACA.
185 */ 162 */
186 ori r5, r5, MSR_EE 163 ori r5, r5, MSR_EE
164#endif
187 mtsrr0 r7 165 mtsrr0 r7
188 mtsrr1 r6 166 mtsrr1 r6
189 RFI 167 RFI
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 3219ba895246..cf95cdef73c9 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -260,6 +260,7 @@ fail:
260 */ 260 */
261 return rc; 261 return rc;
262} 262}
263EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
263 264
264void kvmppc_rtas_tokens_free(struct kvm *kvm) 265void kvmppc_rtas_tokens_free(struct kvm *kvm)
265{ 266{
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1abe4788191a..bc50c97751d3 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end:
161.global kvmppc_handler_trampoline_exit 161.global kvmppc_handler_trampoline_exit
162kvmppc_handler_trampoline_exit: 162kvmppc_handler_trampoline_exit:
163 163
164.global kvmppc_interrupt 164.global kvmppc_interrupt_pr
165kvmppc_interrupt: 165kvmppc_interrupt_pr:
166 166
167 /* Register usage at this point: 167 /* Register usage at this point:
168 * 168 *
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index a3a5cb8ee7ea..02a17dcf1610 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
818 } 818 }
819 819
820 /* Check for real mode returning too hard */ 820 /* Check for real mode returning too hard */
821 if (xics->real_mode) 821 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
822 return kvmppc_xics_rm_complete(vcpu, req); 822 return kvmppc_xics_rm_complete(vcpu, req);
823 823
824 switch (req) { 824 switch (req) {
@@ -840,6 +840,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
840 840
841 return rc; 841 return rc;
842} 842}
843EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
843 844
844 845
845/* -- Initialisation code etc. -- */ 846/* -- Initialisation code etc. -- */
@@ -1250,13 +1251,13 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1250 1251
1251 xics_debugfs_init(xics); 1252 xics_debugfs_init(xics);
1252 1253
1253#ifdef CONFIG_KVM_BOOK3S_64_HV 1254#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1254 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 1255 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1255 /* Enable real mode support */ 1256 /* Enable real mode support */
1256 xics->real_mode = ENABLE_REALMODE; 1257 xics->real_mode = ENABLE_REALMODE;
1257 xics->real_mode_dbg = DEBUG_REALMODE; 1258 xics->real_mode_dbg = DEBUG_REALMODE;
1258 } 1259 }
1259#endif /* CONFIG_KVM_BOOK3S_64_HV */ 1260#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1260 1261
1261 return 0; 1262 return 0;
1262} 1263}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 17722d82f1d1..15d0149511eb 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -40,7 +40,9 @@
40 40
41#include "timing.h" 41#include "timing.h"
42#include "booke.h" 42#include "booke.h"
43#include "trace.h" 43
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
44 46
45unsigned long kvmppc_booke_handlers; 47unsigned long kvmppc_booke_handlers;
46 48
@@ -133,6 +135,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
133#endif 135#endif
134} 136}
135 137
138static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
139{
140 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
141#ifndef CONFIG_KVM_BOOKE_HV
142 vcpu->arch.shadow_msr &= ~MSR_DE;
143 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
144#endif
145
146 /* Force enable debug interrupts when user space wants to debug */
147 if (vcpu->guest_debug) {
148#ifdef CONFIG_KVM_BOOKE_HV
149 /*
150 * Since there is no shadow MSR, sync MSR_DE into the guest
151 * visible MSR.
152 */
153 vcpu->arch.shared->msr |= MSR_DE;
154#else
155 vcpu->arch.shadow_msr |= MSR_DE;
156 vcpu->arch.shared->msr &= ~MSR_DE;
157#endif
158 }
159}
160
136/* 161/*
137 * Helper function for "full" MSR writes. No need to call this if only 162 * Helper function for "full" MSR writes. No need to call this if only
138 * EE/CE/ME/DE/RI are changing. 163 * EE/CE/ME/DE/RI are changing.
@@ -150,6 +175,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
150 kvmppc_mmu_msr_notify(vcpu, old_msr); 175 kvmppc_mmu_msr_notify(vcpu, old_msr);
151 kvmppc_vcpu_sync_spe(vcpu); 176 kvmppc_vcpu_sync_spe(vcpu);
152 kvmppc_vcpu_sync_fpu(vcpu); 177 kvmppc_vcpu_sync_fpu(vcpu);
178 kvmppc_vcpu_sync_debug(vcpu);
153} 179}
154 180
155static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 181static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
@@ -655,6 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
655int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
656{ 682{
657 int ret, s; 683 int ret, s;
684 struct thread_struct thread;
658#ifdef CONFIG_PPC_FPU 685#ifdef CONFIG_PPC_FPU
659 unsigned int fpscr; 686 unsigned int fpscr;
660 int fpexc_mode; 687 int fpexc_mode;
@@ -696,6 +723,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
696 kvmppc_load_guest_fp(vcpu); 723 kvmppc_load_guest_fp(vcpu);
697#endif 724#endif
698 725
726 /* Switch to guest debug context */
727 thread.debug = vcpu->arch.shadow_dbg_reg;
728 switch_booke_debug_regs(&thread);
729 thread.debug = current->thread.debug;
730 current->thread.debug = vcpu->arch.shadow_dbg_reg;
731
699 kvmppc_fix_ee_before_entry(); 732 kvmppc_fix_ee_before_entry();
700 733
701 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 734 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -703,6 +736,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
703 /* No need for kvm_guest_exit. It's done in handle_exit. 736 /* No need for kvm_guest_exit. It's done in handle_exit.
704 We also get here with interrupts enabled. */ 737 We also get here with interrupts enabled. */
705 738
739 /* Switch back to user space debug context */
740 switch_booke_debug_regs(&thread);
741 current->thread.debug = thread.debug;
742
706#ifdef CONFIG_PPC_FPU 743#ifdef CONFIG_PPC_FPU
707 kvmppc_save_guest_fp(vcpu); 744 kvmppc_save_guest_fp(vcpu);
708 745
@@ -758,6 +795,30 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
758 } 795 }
759} 796}
760 797
798static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
799{
800 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
801 u32 dbsr = vcpu->arch.dbsr;
802
803 run->debug.arch.status = 0;
804 run->debug.arch.address = vcpu->arch.pc;
805
806 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
807 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
808 } else {
809 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
810 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
811 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
812 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
813 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
814 run->debug.arch.address = dbg_reg->dac1;
815 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
816 run->debug.arch.address = dbg_reg->dac2;
817 }
818
819 return RESUME_HOST;
820}
821
761static void kvmppc_fill_pt_regs(struct pt_regs *regs) 822static void kvmppc_fill_pt_regs(struct pt_regs *regs)
762{ 823{
763 ulong r1, ip, msr, lr; 824 ulong r1, ip, msr, lr;
@@ -818,6 +879,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
818 case BOOKE_INTERRUPT_CRITICAL: 879 case BOOKE_INTERRUPT_CRITICAL:
819 unknown_exception(&regs); 880 unknown_exception(&regs);
820 break; 881 break;
882 case BOOKE_INTERRUPT_DEBUG:
883 /* Save DBSR before preemption is enabled */
884 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
885 kvmppc_clear_dbsr();
886 break;
821 } 887 }
822} 888}
823 889
@@ -1135,18 +1201,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1135 } 1201 }
1136 1202
1137 case BOOKE_INTERRUPT_DEBUG: { 1203 case BOOKE_INTERRUPT_DEBUG: {
1138 u32 dbsr; 1204 r = kvmppc_handle_debug(run, vcpu);
1139 1205 if (r == RESUME_HOST)
1140 vcpu->arch.pc = mfspr(SPRN_CSRR0); 1206 run->exit_reason = KVM_EXIT_DEBUG;
1141
1142 /* clear IAC events in DBSR register */
1143 dbsr = mfspr(SPRN_DBSR);
1144 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
1145 mtspr(SPRN_DBSR, dbsr);
1146
1147 run->exit_reason = KVM_EXIT_DEBUG;
1148 kvmppc_account_exit(vcpu, DEBUG_EXITS); 1207 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1149 r = RESUME_HOST;
1150 break; 1208 break;
1151 } 1209 }
1152 1210
@@ -1197,7 +1255,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1197 kvmppc_set_msr(vcpu, 0); 1255 kvmppc_set_msr(vcpu, 0);
1198 1256
1199#ifndef CONFIG_KVM_BOOKE_HV 1257#ifndef CONFIG_KVM_BOOKE_HV
1200 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; 1258 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
1201 vcpu->arch.shadow_pid = 1; 1259 vcpu->arch.shadow_pid = 1;
1202 vcpu->arch.shared->msr = 0; 1260 vcpu->arch.shared->msr = 0;
1203#endif 1261#endif
@@ -1359,7 +1417,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1359 return 0; 1417 return 0;
1360} 1418}
1361 1419
1362void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1420int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1363{ 1421{
1364 sregs->u.e.features |= KVM_SREGS_E_IVOR; 1422 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1365 1423
@@ -1379,6 +1437,7 @@ void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1379 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; 1437 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1380 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; 1438 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1381 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; 1439 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1440 return 0;
1382} 1441}
1383 1442
1384int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1443int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
@@ -1413,8 +1472,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1413 1472
1414 get_sregs_base(vcpu, sregs); 1473 get_sregs_base(vcpu, sregs);
1415 get_sregs_arch206(vcpu, sregs); 1474 get_sregs_arch206(vcpu, sregs);
1416 kvmppc_core_get_sregs(vcpu, sregs); 1475 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1417 return 0;
1418} 1476}
1419 1477
1420int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1478int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -1433,7 +1491,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1433 if (ret < 0) 1491 if (ret < 0)
1434 return ret; 1492 return ret;
1435 1493
1436 return kvmppc_core_set_sregs(vcpu, sregs); 1494 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1437} 1495}
1438 1496
1439int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1497int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
@@ -1441,7 +1499,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1441 int r = 0; 1499 int r = 0;
1442 union kvmppc_one_reg val; 1500 union kvmppc_one_reg val;
1443 int size; 1501 int size;
1444 long int i;
1445 1502
1446 size = one_reg_size(reg->id); 1503 size = one_reg_size(reg->id);
1447 if (size > sizeof(val)) 1504 if (size > sizeof(val))
@@ -1449,16 +1506,24 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1449 1506
1450 switch (reg->id) { 1507 switch (reg->id) {
1451 case KVM_REG_PPC_IAC1: 1508 case KVM_REG_PPC_IAC1:
1509 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
1510 break;
1452 case KVM_REG_PPC_IAC2: 1511 case KVM_REG_PPC_IAC2:
1512 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1513 break;
1514#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1453 case KVM_REG_PPC_IAC3: 1515 case KVM_REG_PPC_IAC3:
1516 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1517 break;
1454 case KVM_REG_PPC_IAC4: 1518 case KVM_REG_PPC_IAC4:
1455 i = reg->id - KVM_REG_PPC_IAC1; 1519 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
1456 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]);
1457 break; 1520 break;
1521#endif
1458 case KVM_REG_PPC_DAC1: 1522 case KVM_REG_PPC_DAC1:
1523 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1524 break;
1459 case KVM_REG_PPC_DAC2: 1525 case KVM_REG_PPC_DAC2:
1460 i = reg->id - KVM_REG_PPC_DAC1; 1526 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
1461 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]);
1462 break; 1527 break;
1463 case KVM_REG_PPC_EPR: { 1528 case KVM_REG_PPC_EPR: {
1464 u32 epr = get_guest_epr(vcpu); 1529 u32 epr = get_guest_epr(vcpu);
@@ -1477,10 +1542,13 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1477 val = get_reg_val(reg->id, vcpu->arch.tsr); 1542 val = get_reg_val(reg->id, vcpu->arch.tsr);
1478 break; 1543 break;
1479 case KVM_REG_PPC_DEBUG_INST: 1544 case KVM_REG_PPC_DEBUG_INST:
1480 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV); 1545 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
1546 break;
1547 case KVM_REG_PPC_VRSAVE:
1548 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1481 break; 1549 break;
1482 default: 1550 default:
1483 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 1551 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
1484 break; 1552 break;
1485 } 1553 }
1486 1554
@@ -1498,7 +1566,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1498 int r = 0; 1566 int r = 0;
1499 union kvmppc_one_reg val; 1567 union kvmppc_one_reg val;
1500 int size; 1568 int size;
1501 long int i;
1502 1569
1503 size = one_reg_size(reg->id); 1570 size = one_reg_size(reg->id);
1504 if (size > sizeof(val)) 1571 if (size > sizeof(val))
@@ -1509,16 +1576,24 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1509 1576
1510 switch (reg->id) { 1577 switch (reg->id) {
1511 case KVM_REG_PPC_IAC1: 1578 case KVM_REG_PPC_IAC1:
1579 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
1580 break;
1512 case KVM_REG_PPC_IAC2: 1581 case KVM_REG_PPC_IAC2:
1582 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1583 break;
1584#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1513 case KVM_REG_PPC_IAC3: 1585 case KVM_REG_PPC_IAC3:
1586 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1587 break;
1514 case KVM_REG_PPC_IAC4: 1588 case KVM_REG_PPC_IAC4:
1515 i = reg->id - KVM_REG_PPC_IAC1; 1589 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
1516 vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val);
1517 break; 1590 break;
1591#endif
1518 case KVM_REG_PPC_DAC1: 1592 case KVM_REG_PPC_DAC1:
1593 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1594 break;
1519 case KVM_REG_PPC_DAC2: 1595 case KVM_REG_PPC_DAC2:
1520 i = reg->id - KVM_REG_PPC_DAC1; 1596 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
1521 vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val);
1522 break; 1597 break;
1523 case KVM_REG_PPC_EPR: { 1598 case KVM_REG_PPC_EPR: {
1524 u32 new_epr = set_reg_val(reg->id, val); 1599 u32 new_epr = set_reg_val(reg->id, val);
@@ -1552,20 +1627,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1552 kvmppc_set_tcr(vcpu, tcr); 1627 kvmppc_set_tcr(vcpu, tcr);
1553 break; 1628 break;
1554 } 1629 }
1630 case KVM_REG_PPC_VRSAVE:
1631 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1632 break;
1555 default: 1633 default:
1556 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 1634 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
1557 break; 1635 break;
1558 } 1636 }
1559 1637
1560 return r; 1638 return r;
1561} 1639}
1562 1640
1563int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1564 struct kvm_guest_debug *dbg)
1565{
1566 return -EINVAL;
1567}
1568
1569int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1641int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1570{ 1642{
1571 return -ENOTSUPP; 1643 return -ENOTSUPP;
@@ -1590,12 +1662,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1590 return -ENOTSUPP; 1662 return -ENOTSUPP;
1591} 1663}
1592 1664
1593void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 1665void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1594 struct kvm_memory_slot *dont) 1666 struct kvm_memory_slot *dont)
1595{ 1667{
1596} 1668}
1597 1669
1598int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 1670int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1599 unsigned long npages) 1671 unsigned long npages)
1600{ 1672{
1601 return 0; 1673 return 0;
@@ -1671,6 +1743,157 @@ void kvmppc_decrementer_func(unsigned long data)
1671 kvmppc_set_tsr_bits(vcpu, TSR_DIS); 1743 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1672} 1744}
1673 1745
1746static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1747 uint64_t addr, int index)
1748{
1749 switch (index) {
1750 case 0:
1751 dbg_reg->dbcr0 |= DBCR0_IAC1;
1752 dbg_reg->iac1 = addr;
1753 break;
1754 case 1:
1755 dbg_reg->dbcr0 |= DBCR0_IAC2;
1756 dbg_reg->iac2 = addr;
1757 break;
1758#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1759 case 2:
1760 dbg_reg->dbcr0 |= DBCR0_IAC3;
1761 dbg_reg->iac3 = addr;
1762 break;
1763 case 3:
1764 dbg_reg->dbcr0 |= DBCR0_IAC4;
1765 dbg_reg->iac4 = addr;
1766 break;
1767#endif
1768 default:
1769 return -EINVAL;
1770 }
1771
1772 dbg_reg->dbcr0 |= DBCR0_IDM;
1773 return 0;
1774}
1775
1776static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1777 int type, int index)
1778{
1779 switch (index) {
1780 case 0:
1781 if (type & KVMPPC_DEBUG_WATCH_READ)
1782 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1783 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1784 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1785 dbg_reg->dac1 = addr;
1786 break;
1787 case 1:
1788 if (type & KVMPPC_DEBUG_WATCH_READ)
1789 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1790 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1791 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1792 dbg_reg->dac2 = addr;
1793 break;
1794 default:
1795 return -EINVAL;
1796 }
1797
1798 dbg_reg->dbcr0 |= DBCR0_IDM;
1799 return 0;
1800}
1801void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1802{
1803 /* XXX: Add similar MSR protection for BookE-PR */
1804#ifdef CONFIG_KVM_BOOKE_HV
1805 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1806 if (set) {
1807 if (prot_bitmap & MSR_UCLE)
1808 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1809 if (prot_bitmap & MSR_DE)
1810 vcpu->arch.shadow_msrp |= MSRP_DEP;
1811 if (prot_bitmap & MSR_PMM)
1812 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1813 } else {
1814 if (prot_bitmap & MSR_UCLE)
1815 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1816 if (prot_bitmap & MSR_DE)
1817 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1818 if (prot_bitmap & MSR_PMM)
1819 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1820 }
1821#endif
1822}
1823
1824int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1825 struct kvm_guest_debug *dbg)
1826{
1827 struct debug_reg *dbg_reg;
1828 int n, b = 0, w = 0;
1829
1830 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1831 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1832 vcpu->guest_debug = 0;
1833 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1834 return 0;
1835 }
1836
1837 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1838 vcpu->guest_debug = dbg->control;
1839 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1840 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1841 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1842
1843 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1844 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1845
1846 /* Code below handles only HW breakpoints */
1847 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1848
1849#ifdef CONFIG_KVM_BOOKE_HV
1850 /*
1851 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1852 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1853 */
1854 dbg_reg->dbcr1 = 0;
1855 dbg_reg->dbcr2 = 0;
1856#else
1857 /*
1858 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1859 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1860 * is set.
1861 */
1862 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1863 DBCR1_IAC4US;
1864 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1865#endif
1866
1867 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1868 return 0;
1869
1870 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1871 uint64_t addr = dbg->arch.bp[n].addr;
1872 uint32_t type = dbg->arch.bp[n].type;
1873
1874 if (type == KVMPPC_DEBUG_NONE)
1875 continue;
1876
1877 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1878 KVMPPC_DEBUG_WATCH_WRITE |
1879 KVMPPC_DEBUG_BREAKPOINT))
1880 return -EINVAL;
1881
1882 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1883 /* Setting H/W breakpoint */
1884 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1885 return -EINVAL;
1886 } else {
1887 /* Setting H/W watchpoint */
1888 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1889 type, w++))
1890 return -EINVAL;
1891 }
1892 }
1893
1894 return 0;
1895}
1896
1674void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1897void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1675{ 1898{
1676 vcpu->cpu = smp_processor_id(); 1899 vcpu->cpu = smp_processor_id();
@@ -1681,6 +1904,44 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1681{ 1904{
1682 current->thread.kvm_vcpu = NULL; 1905 current->thread.kvm_vcpu = NULL;
1683 vcpu->cpu = -1; 1906 vcpu->cpu = -1;
1907
1908 /* Clear pending debug event in DBSR */
1909 kvmppc_clear_dbsr();
1910}
1911
1912void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1913{
1914 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
1915}
1916
1917int kvmppc_core_init_vm(struct kvm *kvm)
1918{
1919 return kvm->arch.kvm_ops->init_vm(kvm);
1920}
1921
1922struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1923{
1924 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
1925}
1926
1927void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1928{
1929 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
1930}
1931
1932void kvmppc_core_destroy_vm(struct kvm *kvm)
1933{
1934 kvm->arch.kvm_ops->destroy_vm(kvm);
1935}
1936
1937void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1938{
1939 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
1940}
1941
1942void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
1943{
1944 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
1684} 1945}
1685 1946
1686int __init kvmppc_booke_init(void) 1947int __init kvmppc_booke_init(void)
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 5fd1ba693579..09bfd9bc7cf8 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -99,6 +99,30 @@ enum int_class {
99 99
100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); 100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
101 101
102extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
103extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
104 unsigned int inst, int *advance);
105extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
106 ulong spr_val);
107extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
108 ulong *spr_val);
109extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
110extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
111 struct kvm_vcpu *vcpu,
112 unsigned int inst, int *advance);
113extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
114 ulong spr_val);
115extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
116 ulong *spr_val);
117extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
118extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
119 struct kvm_vcpu *vcpu,
120 unsigned int inst, int *advance);
121extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
122 ulong spr_val);
123extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
124 ulong *spr_val);
125
102/* 126/*
103 * Load up guest vcpu FP state if it's needed. 127 * Load up guest vcpu FP state if it's needed.
104 * It also set the MSR_FP in thread so that host know 128 * It also set the MSR_FP in thread so that host know
@@ -129,4 +153,9 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
129 giveup_fpu(current); 153 giveup_fpu(current);
130#endif 154#endif
131} 155}
156
157static inline void kvmppc_clear_dbsr(void)
158{
159 mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
160}
132#endif /* __KVM_BOOKE_H__ */ 161#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ce6b73c29612..497b142f651c 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -305,7 +305,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
305{ 305{
306} 306}
307 307
308void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 308static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
309{ 309{
310 kvmppc_booke_vcpu_load(vcpu, cpu); 310 kvmppc_booke_vcpu_load(vcpu, cpu);
311 311
@@ -313,7 +313,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
313 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); 313 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
314} 314}
315 315
316void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 316static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
317{ 317{
318#ifdef CONFIG_SPE 318#ifdef CONFIG_SPE
319 if (vcpu->arch.shadow_msr & MSR_SPE) 319 if (vcpu->arch.shadow_msr & MSR_SPE)
@@ -367,7 +367,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
367 return 0; 367 return 0;
368} 368}
369 369
370void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 370static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
371 struct kvm_sregs *sregs)
371{ 372{
372 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 373 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
373 374
@@ -388,9 +389,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
388 389
389 kvmppc_get_sregs_ivor(vcpu, sregs); 390 kvmppc_get_sregs_ivor(vcpu, sregs);
390 kvmppc_get_sregs_e500_tlb(vcpu, sregs); 391 kvmppc_get_sregs_e500_tlb(vcpu, sregs);
392 return 0;
391} 393}
392 394
393int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 395static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
396 struct kvm_sregs *sregs)
394{ 397{
395 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 398 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
396 int ret; 399 int ret;
@@ -425,21 +428,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
425 return kvmppc_set_sregs_ivor(vcpu, sregs); 428 return kvmppc_set_sregs_ivor(vcpu, sregs);
426} 429}
427 430
428int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 431static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
429 union kvmppc_one_reg *val) 432 union kvmppc_one_reg *val)
430{ 433{
431 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 434 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
432 return r; 435 return r;
433} 436}
434 437
435int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 438static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
436 union kvmppc_one_reg *val) 439 union kvmppc_one_reg *val)
437{ 440{
438 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 441 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
439 return r; 442 return r;
440} 443}
441 444
442struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 445static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
446 unsigned int id)
443{ 447{
444 struct kvmppc_vcpu_e500 *vcpu_e500; 448 struct kvmppc_vcpu_e500 *vcpu_e500;
445 struct kvm_vcpu *vcpu; 449 struct kvm_vcpu *vcpu;
@@ -481,7 +485,7 @@ out:
481 return ERR_PTR(err); 485 return ERR_PTR(err);
482} 486}
483 487
484void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 488static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
485{ 489{
486 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 490 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
487 491
@@ -492,15 +496,32 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
492 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 496 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
493} 497}
494 498
495int kvmppc_core_init_vm(struct kvm *kvm) 499static int kvmppc_core_init_vm_e500(struct kvm *kvm)
496{ 500{
497 return 0; 501 return 0;
498} 502}
499 503
500void kvmppc_core_destroy_vm(struct kvm *kvm) 504static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
501{ 505{
502} 506}
503 507
508static struct kvmppc_ops kvm_ops_e500 = {
509 .get_sregs = kvmppc_core_get_sregs_e500,
510 .set_sregs = kvmppc_core_set_sregs_e500,
511 .get_one_reg = kvmppc_get_one_reg_e500,
512 .set_one_reg = kvmppc_set_one_reg_e500,
513 .vcpu_load = kvmppc_core_vcpu_load_e500,
514 .vcpu_put = kvmppc_core_vcpu_put_e500,
515 .vcpu_create = kvmppc_core_vcpu_create_e500,
516 .vcpu_free = kvmppc_core_vcpu_free_e500,
517 .mmu_destroy = kvmppc_mmu_destroy_e500,
518 .init_vm = kvmppc_core_init_vm_e500,
519 .destroy_vm = kvmppc_core_destroy_vm_e500,
520 .emulate_op = kvmppc_core_emulate_op_e500,
521 .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
522 .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
523};
524
504static int __init kvmppc_e500_init(void) 525static int __init kvmppc_e500_init(void)
505{ 526{
506 int r, i; 527 int r, i;
@@ -512,11 +533,11 @@ static int __init kvmppc_e500_init(void)
512 533
513 r = kvmppc_core_check_processor_compat(); 534 r = kvmppc_core_check_processor_compat();
514 if (r) 535 if (r)
515 return r; 536 goto err_out;
516 537
517 r = kvmppc_booke_init(); 538 r = kvmppc_booke_init();
518 if (r) 539 if (r)
519 return r; 540 goto err_out;
520 541
521 /* copy extra E500 exception handlers */ 542 /* copy extra E500 exception handlers */
522 ivor[0] = mfspr(SPRN_IVOR32); 543 ivor[0] = mfspr(SPRN_IVOR32);
@@ -534,11 +555,19 @@ static int __init kvmppc_e500_init(void)
534 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + 555 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
535 ivor[max_ivor] + handler_len); 556 ivor[max_ivor] + handler_len);
536 557
537 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); 558 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
559 if (r)
560 goto err_out;
561 kvm_ops_e500.owner = THIS_MODULE;
562 kvmppc_pr_ops = &kvm_ops_e500;
563
564err_out:
565 return r;
538} 566}
539 567
540static void __exit kvmppc_e500_exit(void) 568static void __exit kvmppc_e500_exit(void)
541{ 569{
570 kvmppc_pr_ops = NULL;
542 kvmppc_booke_exit(); 571 kvmppc_booke_exit();
543} 572}
544 573
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index c2e5e98453a6..4fd9650eb018 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -117,7 +117,7 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
117#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) 117#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
118#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) 118#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
119#define MAS2_ATTRIB_MASK \ 119#define MAS2_ATTRIB_MASK \
120 (MAS2_X0 | MAS2_X1) 120 (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
121#define MAS3_ATTRIB_MASK \ 121#define MAS3_ATTRIB_MASK \
122 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ 122 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
123 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) 123 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index b10a01243abd..89b7f821f6c4 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -26,6 +26,7 @@
26#define XOP_TLBRE 946 26#define XOP_TLBRE 946
27#define XOP_TLBWE 978 27#define XOP_TLBWE 978
28#define XOP_TLBILX 18 28#define XOP_TLBILX 18
29#define XOP_EHPRIV 270
29 30
30#ifdef CONFIG_KVM_E500MC 31#ifdef CONFIG_KVM_E500MC
31static int dbell2prio(ulong param) 32static int dbell2prio(ulong param)
@@ -82,8 +83,28 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
82} 83}
83#endif 84#endif
84 85
85int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 86static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
86 unsigned int inst, int *advance) 87 unsigned int inst, int *advance)
88{
89 int emulated = EMULATE_DONE;
90
91 switch (get_oc(inst)) {
92 case EHPRIV_OC_DEBUG:
93 run->exit_reason = KVM_EXIT_DEBUG;
94 run->debug.arch.address = vcpu->arch.pc;
95 run->debug.arch.status = 0;
96 kvmppc_account_exit(vcpu, DEBUG_EXITS);
97 emulated = EMULATE_EXIT_USER;
98 *advance = 0;
99 break;
100 default:
101 emulated = EMULATE_FAIL;
102 }
103 return emulated;
104}
105
106int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
107 unsigned int inst, int *advance)
87{ 108{
88 int emulated = EMULATE_DONE; 109 int emulated = EMULATE_DONE;
89 int ra = get_ra(inst); 110 int ra = get_ra(inst);
@@ -130,6 +151,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
130 emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); 151 emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
131 break; 152 break;
132 153
154 case XOP_EHPRIV:
155 emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
156 advance);
157 break;
158
133 default: 159 default:
134 emulated = EMULATE_FAIL; 160 emulated = EMULATE_FAIL;
135 } 161 }
@@ -146,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
146 return emulated; 172 return emulated;
147} 173}
148 174
149int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 175int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
150{ 176{
151 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 177 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
152 int emulated = EMULATE_DONE; 178 int emulated = EMULATE_DONE;
@@ -237,7 +263,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
237 return emulated; 263 return emulated;
238} 264}
239 265
240int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 266int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
241{ 267{
242 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 268 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
243 int emulated = EMULATE_DONE; 269 int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 6d6f153b6c1d..ebca6b88ea5e 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -32,7 +32,7 @@
32#include <asm/kvm_ppc.h> 32#include <asm/kvm_ppc.h>
33 33
34#include "e500.h" 34#include "e500.h"
35#include "trace.h" 35#include "trace_booke.h"
36#include "timing.h" 36#include "timing.h"
37#include "e500_mmu_host.h" 37#include "e500_mmu_host.h"
38 38
@@ -536,7 +536,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
536 return get_tlb_raddr(gtlbe) | (eaddr & pgmask); 536 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
537} 537}
538 538
539void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 539void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
540{ 540{
541} 541}
542 542
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index c65593abae8e..ecf2247b13be 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -32,10 +32,11 @@
32#include <asm/kvm_ppc.h> 32#include <asm/kvm_ppc.h>
33 33
34#include "e500.h" 34#include "e500.h"
35#include "trace.h"
36#include "timing.h" 35#include "timing.h"
37#include "e500_mmu_host.h" 36#include "e500_mmu_host.h"
38 37
38#include "trace_booke.h"
39
39#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) 40#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
40 41
41static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; 42static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
@@ -253,6 +254,9 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
253 ref->pfn = pfn; 254 ref->pfn = pfn;
254 ref->flags |= E500_TLB_VALID; 255 ref->flags |= E500_TLB_VALID;
255 256
257 /* Mark the page accessed */
258 kvm_set_pfn_accessed(pfn);
259
256 if (tlbe_is_writable(gtlbe)) 260 if (tlbe_is_writable(gtlbe))
257 kvm_set_pfn_dirty(pfn); 261 kvm_set_pfn_dirty(pfn);
258} 262}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 19c8379575f7..4132cd2fc171 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
110 110
111static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); 111static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
112 112
113void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 113static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
114{ 114{
115 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 115 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
116 116
@@ -147,7 +147,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
147 kvmppc_load_guest_fp(vcpu); 147 kvmppc_load_guest_fp(vcpu);
148} 148}
149 149
150void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 150static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
151{ 151{
152 vcpu->arch.eplc = mfspr(SPRN_EPLC); 152 vcpu->arch.eplc = mfspr(SPRN_EPLC);
153 vcpu->arch.epsc = mfspr(SPRN_EPSC); 153 vcpu->arch.epsc = mfspr(SPRN_EPSC);
@@ -204,7 +204,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
204 return 0; 204 return 0;
205} 205}
206 206
207void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 207static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
208 struct kvm_sregs *sregs)
208{ 209{
209 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 210 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
210 211
@@ -224,10 +225,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
224 sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; 225 sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
225 sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; 226 sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
226 227
227 kvmppc_get_sregs_ivor(vcpu, sregs); 228 return kvmppc_get_sregs_ivor(vcpu, sregs);
228} 229}
229 230
230int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 231static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
232 struct kvm_sregs *sregs)
231{ 233{
232 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 234 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
233 int ret; 235 int ret;
@@ -260,21 +262,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
260 return kvmppc_set_sregs_ivor(vcpu, sregs); 262 return kvmppc_set_sregs_ivor(vcpu, sregs);
261} 263}
262 264
263int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 265static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
264 union kvmppc_one_reg *val) 266 union kvmppc_one_reg *val)
265{ 267{
266 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 268 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
267 return r; 269 return r;
268} 270}
269 271
270int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 272static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
271 union kvmppc_one_reg *val) 273 union kvmppc_one_reg *val)
272{ 274{
273 int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); 275 int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
274 return r; 276 return r;
275} 277}
276 278
277struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 279static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
280 unsigned int id)
278{ 281{
279 struct kvmppc_vcpu_e500 *vcpu_e500; 282 struct kvmppc_vcpu_e500 *vcpu_e500;
280 struct kvm_vcpu *vcpu; 283 struct kvm_vcpu *vcpu;
@@ -315,7 +318,7 @@ out:
315 return ERR_PTR(err); 318 return ERR_PTR(err);
316} 319}
317 320
318void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 321static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
319{ 322{
320 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 323 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
321 324
@@ -325,7 +328,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
325 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 328 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
326} 329}
327 330
328int kvmppc_core_init_vm(struct kvm *kvm) 331static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
329{ 332{
330 int lpid; 333 int lpid;
331 334
@@ -337,27 +340,52 @@ int kvmppc_core_init_vm(struct kvm *kvm)
337 return 0; 340 return 0;
338} 341}
339 342
340void kvmppc_core_destroy_vm(struct kvm *kvm) 343static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
341{ 344{
342 kvmppc_free_lpid(kvm->arch.lpid); 345 kvmppc_free_lpid(kvm->arch.lpid);
343} 346}
344 347
348static struct kvmppc_ops kvm_ops_e500mc = {
349 .get_sregs = kvmppc_core_get_sregs_e500mc,
350 .set_sregs = kvmppc_core_set_sregs_e500mc,
351 .get_one_reg = kvmppc_get_one_reg_e500mc,
352 .set_one_reg = kvmppc_set_one_reg_e500mc,
353 .vcpu_load = kvmppc_core_vcpu_load_e500mc,
354 .vcpu_put = kvmppc_core_vcpu_put_e500mc,
355 .vcpu_create = kvmppc_core_vcpu_create_e500mc,
356 .vcpu_free = kvmppc_core_vcpu_free_e500mc,
357 .mmu_destroy = kvmppc_mmu_destroy_e500,
358 .init_vm = kvmppc_core_init_vm_e500mc,
359 .destroy_vm = kvmppc_core_destroy_vm_e500mc,
360 .emulate_op = kvmppc_core_emulate_op_e500,
361 .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
362 .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
363};
364
345static int __init kvmppc_e500mc_init(void) 365static int __init kvmppc_e500mc_init(void)
346{ 366{
347 int r; 367 int r;
348 368
349 r = kvmppc_booke_init(); 369 r = kvmppc_booke_init();
350 if (r) 370 if (r)
351 return r; 371 goto err_out;
352 372
353 kvmppc_init_lpid(64); 373 kvmppc_init_lpid(64);
354 kvmppc_claim_lpid(0); /* host */ 374 kvmppc_claim_lpid(0); /* host */
355 375
356 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); 376 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
377 if (r)
378 goto err_out;
379 kvm_ops_e500mc.owner = THIS_MODULE;
380 kvmppc_pr_ops = &kvm_ops_e500mc;
381
382err_out:
383 return r;
357} 384}
358 385
359static void __exit kvmppc_e500mc_exit(void) 386static void __exit kvmppc_e500mc_exit(void)
360{ 387{
388 kvmppc_pr_ops = NULL;
361 kvmppc_booke_exit(); 389 kvmppc_booke_exit();
362} 390}
363 391
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 751cd45f65a0..2f9a0873b44f 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
130 case SPRN_PIR: break; 130 case SPRN_PIR: break;
131 131
132 default: 132 default:
133 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, 133 emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
134 spr_val); 134 spr_val);
135 if (emulated == EMULATE_FAIL) 135 if (emulated == EMULATE_FAIL)
136 printk(KERN_INFO "mtspr: unknown spr " 136 printk(KERN_INFO "mtspr: unknown spr "
137 "0x%x\n", sprn); 137 "0x%x\n", sprn);
@@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
191 spr_val = kvmppc_get_dec(vcpu, get_tb()); 191 spr_val = kvmppc_get_dec(vcpu, get_tb());
192 break; 192 break;
193 default: 193 default:
194 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, 194 emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
195 &spr_val); 195 &spr_val);
196 if (unlikely(emulated == EMULATE_FAIL)) { 196 if (unlikely(emulated == EMULATE_FAIL)) {
197 printk(KERN_INFO "mfspr: unknown spr " 197 printk(KERN_INFO "mfspr: unknown spr "
198 "0x%x\n", sprn); 198 "0x%x\n", sprn);
@@ -464,7 +464,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
464 } 464 }
465 465
466 if (emulated == EMULATE_FAIL) { 466 if (emulated == EMULATE_FAIL) {
467 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 467 emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
468 &advance);
468 if (emulated == EMULATE_AGAIN) { 469 if (emulated == EMULATE_AGAIN) {
469 advance = 0; 470 advance = 0;
470 } else if (emulated == EMULATE_FAIL) { 471 } else if (emulated == EMULATE_FAIL) {
@@ -483,3 +484,4 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
483 484
484 return emulated; 485 return emulated;
485} 486}
487EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 07c0106fab76..9ae97686e9f4 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -26,6 +26,7 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/file.h> 28#include <linux/file.h>
29#include <linux/module.h>
29#include <asm/cputable.h> 30#include <asm/cputable.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/kvm_ppc.h> 32#include <asm/kvm_ppc.h>
@@ -39,6 +40,12 @@
39#define CREATE_TRACE_POINTS 40#define CREATE_TRACE_POINTS
40#include "trace.h" 41#include "trace.h"
41 42
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
48
42int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
43{ 50{
44 return !!(v->arch.pending_exceptions) || 51 return !!(v->arch.pending_exceptions) ||
@@ -50,7 +57,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
50 return 1; 57 return 1;
51} 58}
52 59
53#ifndef CONFIG_KVM_BOOK3S_64_HV
54/* 60/*
55 * Common checks before entering the guest world. Call with interrupts 61 * Common checks before entering the guest world. Call with interrupts
56 * disabled. 62 * disabled.
@@ -125,7 +131,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
125 131
126 return r; 132 return r;
127} 133}
128#endif /* CONFIG_KVM_BOOK3S_64_HV */ 134EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
129 135
130int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 136int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
131{ 137{
@@ -179,6 +185,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
179 185
180 return r; 186 return r;
181} 187}
188EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
182 189
183int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 190int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
184{ 191{
@@ -192,11 +199,9 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
192 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 199 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
193 goto out; 200 goto out;
194 201
195#ifdef CONFIG_KVM_BOOK3S_64_HV
196 /* HV KVM can only do PAPR mode for now */ 202 /* HV KVM can only do PAPR mode for now */
197 if (!vcpu->arch.papr_enabled) 203 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
198 goto out; 204 goto out;
199#endif
200 205
201#ifdef CONFIG_KVM_BOOKE_HV 206#ifdef CONFIG_KVM_BOOKE_HV
202 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 207 if (!cpu_has_feature(CPU_FTR_EMB_HV))
@@ -209,6 +214,7 @@ out:
209 vcpu->arch.sane = r; 214 vcpu->arch.sane = r;
210 return r ? 0 : -EINVAL; 215 return r ? 0 : -EINVAL;
211} 216}
217EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
212 218
213int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 219int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
214{ 220{
@@ -243,6 +249,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
243 249
244 return r; 250 return r;
245} 251}
252EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
246 253
247int kvm_arch_hardware_enable(void *garbage) 254int kvm_arch_hardware_enable(void *garbage)
248{ 255{
@@ -269,10 +276,35 @@ void kvm_arch_check_processor_compat(void *rtn)
269 276
270int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 277int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
271{ 278{
272 if (type) 279 struct kvmppc_ops *kvm_ops = NULL;
273 return -EINVAL; 280 /*
274 281 * if we have both HV and PR enabled, default is HV
282 */
283 if (type == 0) {
284 if (kvmppc_hv_ops)
285 kvm_ops = kvmppc_hv_ops;
286 else
287 kvm_ops = kvmppc_pr_ops;
288 if (!kvm_ops)
289 goto err_out;
290 } else if (type == KVM_VM_PPC_HV) {
291 if (!kvmppc_hv_ops)
292 goto err_out;
293 kvm_ops = kvmppc_hv_ops;
294 } else if (type == KVM_VM_PPC_PR) {
295 if (!kvmppc_pr_ops)
296 goto err_out;
297 kvm_ops = kvmppc_pr_ops;
298 } else
299 goto err_out;
300
301 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
302 return -ENOENT;
303
304 kvm->arch.kvm_ops = kvm_ops;
275 return kvmppc_core_init_vm(kvm); 305 return kvmppc_core_init_vm(kvm);
306err_out:
307 return -EINVAL;
276} 308}
277 309
278void kvm_arch_destroy_vm(struct kvm *kvm) 310void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -292,6 +324,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
292 kvmppc_core_destroy_vm(kvm); 324 kvmppc_core_destroy_vm(kvm);
293 325
294 mutex_unlock(&kvm->lock); 326 mutex_unlock(&kvm->lock);
327
328 /* drop the module reference */
329 module_put(kvm->arch.kvm_ops->owner);
295} 330}
296 331
297void kvm_arch_sync_events(struct kvm *kvm) 332void kvm_arch_sync_events(struct kvm *kvm)
@@ -301,6 +336,10 @@ void kvm_arch_sync_events(struct kvm *kvm)
301int kvm_dev_ioctl_check_extension(long ext) 336int kvm_dev_ioctl_check_extension(long ext)
302{ 337{
303 int r; 338 int r;
339 /* FIXME!!
340 * Should some of this be vm ioctl ? is it possible now ?
341 */
342 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
304 343
305 switch (ext) { 344 switch (ext) {
306#ifdef CONFIG_BOOKE 345#ifdef CONFIG_BOOKE
@@ -320,22 +359,26 @@ int kvm_dev_ioctl_check_extension(long ext)
320 case KVM_CAP_DEVICE_CTRL: 359 case KVM_CAP_DEVICE_CTRL:
321 r = 1; 360 r = 1;
322 break; 361 break;
323#ifndef CONFIG_KVM_BOOK3S_64_HV
324 case KVM_CAP_PPC_PAIRED_SINGLES: 362 case KVM_CAP_PPC_PAIRED_SINGLES:
325 case KVM_CAP_PPC_OSI: 363 case KVM_CAP_PPC_OSI:
326 case KVM_CAP_PPC_GET_PVINFO: 364 case KVM_CAP_PPC_GET_PVINFO:
327#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 365#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
328 case KVM_CAP_SW_TLB: 366 case KVM_CAP_SW_TLB:
329#endif 367#endif
330#ifdef CONFIG_KVM_MPIC 368 /* We support this only for PR */
331 case KVM_CAP_IRQ_MPIC: 369 r = !hv_enabled;
332#endif
333 r = 1;
334 break; 370 break;
371#ifdef CONFIG_KVM_MMIO
335 case KVM_CAP_COALESCED_MMIO: 372 case KVM_CAP_COALESCED_MMIO:
336 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 373 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
337 break; 374 break;
338#endif 375#endif
376#ifdef CONFIG_KVM_MPIC
377 case KVM_CAP_IRQ_MPIC:
378 r = 1;
379 break;
380#endif
381
339#ifdef CONFIG_PPC_BOOK3S_64 382#ifdef CONFIG_PPC_BOOK3S_64
340 case KVM_CAP_SPAPR_TCE: 383 case KVM_CAP_SPAPR_TCE:
341 case KVM_CAP_PPC_ALLOC_HTAB: 384 case KVM_CAP_PPC_ALLOC_HTAB:
@@ -346,32 +389,37 @@ int kvm_dev_ioctl_check_extension(long ext)
346 r = 1; 389 r = 1;
347 break; 390 break;
348#endif /* CONFIG_PPC_BOOK3S_64 */ 391#endif /* CONFIG_PPC_BOOK3S_64 */
349#ifdef CONFIG_KVM_BOOK3S_64_HV 392#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
350 case KVM_CAP_PPC_SMT: 393 case KVM_CAP_PPC_SMT:
351 r = threads_per_core; 394 if (hv_enabled)
395 r = threads_per_core;
396 else
397 r = 0;
352 break; 398 break;
353 case KVM_CAP_PPC_RMA: 399 case KVM_CAP_PPC_RMA:
354 r = 1; 400 r = hv_enabled;
355 /* PPC970 requires an RMA */ 401 /* PPC970 requires an RMA */
356 if (cpu_has_feature(CPU_FTR_ARCH_201)) 402 if (r && cpu_has_feature(CPU_FTR_ARCH_201))
357 r = 2; 403 r = 2;
358 break; 404 break;
359#endif 405#endif
360 case KVM_CAP_SYNC_MMU: 406 case KVM_CAP_SYNC_MMU:
361#ifdef CONFIG_KVM_BOOK3S_64_HV 407#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
362 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 408 if (hv_enabled)
409 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
410 else
411 r = 0;
363#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 412#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
364 r = 1; 413 r = 1;
365#else 414#else
366 r = 0; 415 r = 0;
367 break;
368#endif 416#endif
369#ifdef CONFIG_KVM_BOOK3S_64_HV 417 break;
418#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
370 case KVM_CAP_PPC_HTAB_FD: 419 case KVM_CAP_PPC_HTAB_FD:
371 r = 1; 420 r = hv_enabled;
372 break; 421 break;
373#endif 422#endif
374 break;
375 case KVM_CAP_NR_VCPUS: 423 case KVM_CAP_NR_VCPUS:
376 /* 424 /*
377 * Recommending a number of CPUs is somewhat arbitrary; we 425 * Recommending a number of CPUs is somewhat arbitrary; we
@@ -379,11 +427,10 @@ int kvm_dev_ioctl_check_extension(long ext)
379 * will have secondary threads "offline"), and for other KVM 427 * will have secondary threads "offline"), and for other KVM
380 * implementations just count online CPUs. 428 * implementations just count online CPUs.
381 */ 429 */
382#ifdef CONFIG_KVM_BOOK3S_64_HV 430 if (hv_enabled)
383 r = num_present_cpus(); 431 r = num_present_cpus();
384#else 432 else
385 r = num_online_cpus(); 433 r = num_online_cpus();
386#endif
387 break; 434 break;
388 case KVM_CAP_MAX_VCPUS: 435 case KVM_CAP_MAX_VCPUS:
389 r = KVM_MAX_VCPUS; 436 r = KVM_MAX_VCPUS;
@@ -407,15 +454,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
407 return -EINVAL; 454 return -EINVAL;
408} 455}
409 456
410void kvm_arch_free_memslot(struct kvm_memory_slot *free, 457void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
411 struct kvm_memory_slot *dont) 458 struct kvm_memory_slot *dont)
412{ 459{
413 kvmppc_core_free_memslot(free, dont); 460 kvmppc_core_free_memslot(kvm, free, dont);
414} 461}
415 462
416int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 463int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
464 unsigned long npages)
417{ 465{
418 return kvmppc_core_create_memslot(slot, npages); 466 return kvmppc_core_create_memslot(kvm, slot, npages);
419} 467}
420 468
421void kvm_arch_memslots_updated(struct kvm *kvm) 469void kvm_arch_memslots_updated(struct kvm *kvm)
@@ -659,6 +707,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
659 707
660 return EMULATE_DO_MMIO; 708 return EMULATE_DO_MMIO;
661} 709}
710EXPORT_SYMBOL_GPL(kvmppc_handle_load);
662 711
663/* Same as above, but sign extends */ 712/* Same as above, but sign extends */
664int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 713int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -720,6 +769,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
720 769
721 return EMULATE_DO_MMIO; 770 return EMULATE_DO_MMIO;
722} 771}
772EXPORT_SYMBOL_GPL(kvmppc_handle_store);
723 773
724int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 774int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
725{ 775{
@@ -1024,52 +1074,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
1024 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 1074 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1025 goto out; 1075 goto out;
1026 } 1076 }
1027#endif /* CONFIG_PPC_BOOK3S_64 */
1028
1029#ifdef CONFIG_KVM_BOOK3S_64_HV
1030 case KVM_ALLOCATE_RMA: {
1031 struct kvm_allocate_rma rma;
1032 struct kvm *kvm = filp->private_data;
1033
1034 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
1035 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
1036 r = -EFAULT;
1037 break;
1038 }
1039
1040 case KVM_PPC_ALLOCATE_HTAB: {
1041 u32 htab_order;
1042
1043 r = -EFAULT;
1044 if (get_user(htab_order, (u32 __user *)argp))
1045 break;
1046 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
1047 if (r)
1048 break;
1049 r = -EFAULT;
1050 if (put_user(htab_order, (u32 __user *)argp))
1051 break;
1052 r = 0;
1053 break;
1054 }
1055
1056 case KVM_PPC_GET_HTAB_FD: {
1057 struct kvm_get_htab_fd ghf;
1058
1059 r = -EFAULT;
1060 if (copy_from_user(&ghf, argp, sizeof(ghf)))
1061 break;
1062 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
1063 break;
1064 }
1065#endif /* CONFIG_KVM_BOOK3S_64_HV */
1066
1067#ifdef CONFIG_PPC_BOOK3S_64
1068 case KVM_PPC_GET_SMMU_INFO: { 1077 case KVM_PPC_GET_SMMU_INFO: {
1069 struct kvm_ppc_smmu_info info; 1078 struct kvm_ppc_smmu_info info;
1079 struct kvm *kvm = filp->private_data;
1070 1080
1071 memset(&info, 0, sizeof(info)); 1081 memset(&info, 0, sizeof(info));
1072 r = kvm_vm_ioctl_get_smmu_info(kvm, &info); 1082 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1073 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 1083 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1074 r = -EFAULT; 1084 r = -EFAULT;
1075 break; 1085 break;
@@ -1080,11 +1090,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
1080 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 1090 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1081 break; 1091 break;
1082 } 1092 }
1083#endif /* CONFIG_PPC_BOOK3S_64 */ 1093 default: {
1094 struct kvm *kvm = filp->private_data;
1095 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1096 }
1097#else /* CONFIG_PPC_BOOK3S_64 */
1084 default: 1098 default:
1085 r = -ENOTTY; 1099 r = -ENOTTY;
1100#endif
1086 } 1101 }
1087
1088out: 1102out:
1089 return r; 1103 return r;
1090} 1104}
@@ -1106,22 +1120,26 @@ long kvmppc_alloc_lpid(void)
1106 1120
1107 return lpid; 1121 return lpid;
1108} 1122}
1123EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1109 1124
1110void kvmppc_claim_lpid(long lpid) 1125void kvmppc_claim_lpid(long lpid)
1111{ 1126{
1112 set_bit(lpid, lpid_inuse); 1127 set_bit(lpid, lpid_inuse);
1113} 1128}
1129EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1114 1130
1115void kvmppc_free_lpid(long lpid) 1131void kvmppc_free_lpid(long lpid)
1116{ 1132{
1117 clear_bit(lpid, lpid_inuse); 1133 clear_bit(lpid, lpid_inuse);
1118} 1134}
1135EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1119 1136
1120void kvmppc_init_lpid(unsigned long nr_lpids_param) 1137void kvmppc_init_lpid(unsigned long nr_lpids_param)
1121{ 1138{
1122 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 1139 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1123 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 1140 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1124} 1141}
1142EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1125 1143
1126int kvm_arch_init(void *opaque) 1144int kvm_arch_init(void *opaque)
1127{ 1145{
@@ -1130,4 +1148,5 @@ int kvm_arch_init(void *opaque)
1130 1148
1131void kvm_arch_exit(void) 1149void kvm_arch_exit(void)
1132{ 1150{
1151
1133} 1152}
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index e326489a5420..2e0e67ef3544 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -31,126 +31,6 @@ TRACE_EVENT(kvm_ppc_instr,
31 __entry->inst, __entry->pc, __entry->emulate) 31 __entry->inst, __entry->pc, __entry->emulate)
32); 32);
33 33
34#ifdef CONFIG_PPC_BOOK3S
35#define kvm_trace_symbol_exit \
36 {0x100, "SYSTEM_RESET"}, \
37 {0x200, "MACHINE_CHECK"}, \
38 {0x300, "DATA_STORAGE"}, \
39 {0x380, "DATA_SEGMENT"}, \
40 {0x400, "INST_STORAGE"}, \
41 {0x480, "INST_SEGMENT"}, \
42 {0x500, "EXTERNAL"}, \
43 {0x501, "EXTERNAL_LEVEL"}, \
44 {0x502, "EXTERNAL_HV"}, \
45 {0x600, "ALIGNMENT"}, \
46 {0x700, "PROGRAM"}, \
47 {0x800, "FP_UNAVAIL"}, \
48 {0x900, "DECREMENTER"}, \
49 {0x980, "HV_DECREMENTER"}, \
50 {0xc00, "SYSCALL"}, \
51 {0xd00, "TRACE"}, \
52 {0xe00, "H_DATA_STORAGE"}, \
53 {0xe20, "H_INST_STORAGE"}, \
54 {0xe40, "H_EMUL_ASSIST"}, \
55 {0xf00, "PERFMON"}, \
56 {0xf20, "ALTIVEC"}, \
57 {0xf40, "VSX"}
58#else
59#define kvm_trace_symbol_exit \
60 {0, "CRITICAL"}, \
61 {1, "MACHINE_CHECK"}, \
62 {2, "DATA_STORAGE"}, \
63 {3, "INST_STORAGE"}, \
64 {4, "EXTERNAL"}, \
65 {5, "ALIGNMENT"}, \
66 {6, "PROGRAM"}, \
67 {7, "FP_UNAVAIL"}, \
68 {8, "SYSCALL"}, \
69 {9, "AP_UNAVAIL"}, \
70 {10, "DECREMENTER"}, \
71 {11, "FIT"}, \
72 {12, "WATCHDOG"}, \
73 {13, "DTLB_MISS"}, \
74 {14, "ITLB_MISS"}, \
75 {15, "DEBUG"}, \
76 {32, "SPE_UNAVAIL"}, \
77 {33, "SPE_FP_DATA"}, \
78 {34, "SPE_FP_ROUND"}, \
79 {35, "PERFORMANCE_MONITOR"}, \
80 {36, "DOORBELL"}, \
81 {37, "DOORBELL_CRITICAL"}, \
82 {38, "GUEST_DBELL"}, \
83 {39, "GUEST_DBELL_CRIT"}, \
84 {40, "HV_SYSCALL"}, \
85 {41, "HV_PRIV"}
86#endif
87
88TRACE_EVENT(kvm_exit,
89 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
90 TP_ARGS(exit_nr, vcpu),
91
92 TP_STRUCT__entry(
93 __field( unsigned int, exit_nr )
94 __field( unsigned long, pc )
95 __field( unsigned long, msr )
96 __field( unsigned long, dar )
97#ifdef CONFIG_KVM_BOOK3S_PR
98 __field( unsigned long, srr1 )
99#endif
100 __field( unsigned long, last_inst )
101 ),
102
103 TP_fast_assign(
104#ifdef CONFIG_KVM_BOOK3S_PR
105 struct kvmppc_book3s_shadow_vcpu *svcpu;
106#endif
107 __entry->exit_nr = exit_nr;
108 __entry->pc = kvmppc_get_pc(vcpu);
109 __entry->dar = kvmppc_get_fault_dar(vcpu);
110 __entry->msr = vcpu->arch.shared->msr;
111#ifdef CONFIG_KVM_BOOK3S_PR
112 svcpu = svcpu_get(vcpu);
113 __entry->srr1 = svcpu->shadow_srr1;
114 svcpu_put(svcpu);
115#endif
116 __entry->last_inst = vcpu->arch.last_inst;
117 ),
118
119 TP_printk("exit=%s"
120 " | pc=0x%lx"
121 " | msr=0x%lx"
122 " | dar=0x%lx"
123#ifdef CONFIG_KVM_BOOK3S_PR
124 " | srr1=0x%lx"
125#endif
126 " | last_inst=0x%lx"
127 ,
128 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
129 __entry->pc,
130 __entry->msr,
131 __entry->dar,
132#ifdef CONFIG_KVM_BOOK3S_PR
133 __entry->srr1,
134#endif
135 __entry->last_inst
136 )
137);
138
139TRACE_EVENT(kvm_unmap_hva,
140 TP_PROTO(unsigned long hva),
141 TP_ARGS(hva),
142
143 TP_STRUCT__entry(
144 __field( unsigned long, hva )
145 ),
146
147 TP_fast_assign(
148 __entry->hva = hva;
149 ),
150
151 TP_printk("unmap hva 0x%lx\n", __entry->hva)
152);
153
154TRACE_EVENT(kvm_stlb_inval, 34TRACE_EVENT(kvm_stlb_inval,
155 TP_PROTO(unsigned int stlb_index), 35 TP_PROTO(unsigned int stlb_index),
156 TP_ARGS(stlb_index), 36 TP_ARGS(stlb_index),
@@ -236,315 +116,6 @@ TRACE_EVENT(kvm_check_requests,
236 __entry->cpu_nr, __entry->requests) 116 __entry->cpu_nr, __entry->requests)
237); 117);
238 118
239
240/*************************************************************************
241 * Book3S trace points *
242 *************************************************************************/
243
244#ifdef CONFIG_KVM_BOOK3S_PR
245
246TRACE_EVENT(kvm_book3s_reenter,
247 TP_PROTO(int r, struct kvm_vcpu *vcpu),
248 TP_ARGS(r, vcpu),
249
250 TP_STRUCT__entry(
251 __field( unsigned int, r )
252 __field( unsigned long, pc )
253 ),
254
255 TP_fast_assign(
256 __entry->r = r;
257 __entry->pc = kvmppc_get_pc(vcpu);
258 ),
259
260 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
261);
262
263#ifdef CONFIG_PPC_BOOK3S_64
264
265TRACE_EVENT(kvm_book3s_64_mmu_map,
266 TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
267 struct kvmppc_pte *orig_pte),
268 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
269
270 TP_STRUCT__entry(
271 __field( unsigned char, flag_w )
272 __field( unsigned char, flag_x )
273 __field( unsigned long, eaddr )
274 __field( unsigned long, hpteg )
275 __field( unsigned long, va )
276 __field( unsigned long long, vpage )
277 __field( unsigned long, hpaddr )
278 ),
279
280 TP_fast_assign(
281 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
282 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
283 __entry->eaddr = orig_pte->eaddr;
284 __entry->hpteg = hpteg;
285 __entry->va = va;
286 __entry->vpage = orig_pte->vpage;
287 __entry->hpaddr = hpaddr;
288 ),
289
290 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
291 __entry->flag_w, __entry->flag_x, __entry->eaddr,
292 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
293);
294
295#endif /* CONFIG_PPC_BOOK3S_64 */
296
297TRACE_EVENT(kvm_book3s_mmu_map,
298 TP_PROTO(struct hpte_cache *pte),
299 TP_ARGS(pte),
300
301 TP_STRUCT__entry(
302 __field( u64, host_vpn )
303 __field( u64, pfn )
304 __field( ulong, eaddr )
305 __field( u64, vpage )
306 __field( ulong, raddr )
307 __field( int, flags )
308 ),
309
310 TP_fast_assign(
311 __entry->host_vpn = pte->host_vpn;
312 __entry->pfn = pte->pfn;
313 __entry->eaddr = pte->pte.eaddr;
314 __entry->vpage = pte->pte.vpage;
315 __entry->raddr = pte->pte.raddr;
316 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
317 (pte->pte.may_write ? 0x2 : 0) |
318 (pte->pte.may_execute ? 0x1 : 0);
319 ),
320
321 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
322 __entry->host_vpn, __entry->pfn, __entry->eaddr,
323 __entry->vpage, __entry->raddr, __entry->flags)
324);
325
326TRACE_EVENT(kvm_book3s_mmu_invalidate,
327 TP_PROTO(struct hpte_cache *pte),
328 TP_ARGS(pte),
329
330 TP_STRUCT__entry(
331 __field( u64, host_vpn )
332 __field( u64, pfn )
333 __field( ulong, eaddr )
334 __field( u64, vpage )
335 __field( ulong, raddr )
336 __field( int, flags )
337 ),
338
339 TP_fast_assign(
340 __entry->host_vpn = pte->host_vpn;
341 __entry->pfn = pte->pfn;
342 __entry->eaddr = pte->pte.eaddr;
343 __entry->vpage = pte->pte.vpage;
344 __entry->raddr = pte->pte.raddr;
345 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
346 (pte->pte.may_write ? 0x2 : 0) |
347 (pte->pte.may_execute ? 0x1 : 0);
348 ),
349
350 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
351 __entry->host_vpn, __entry->pfn, __entry->eaddr,
352 __entry->vpage, __entry->raddr, __entry->flags)
353);
354
355TRACE_EVENT(kvm_book3s_mmu_flush,
356 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
357 unsigned long long p2),
358 TP_ARGS(type, vcpu, p1, p2),
359
360 TP_STRUCT__entry(
361 __field( int, count )
362 __field( unsigned long long, p1 )
363 __field( unsigned long long, p2 )
364 __field( const char *, type )
365 ),
366
367 TP_fast_assign(
368 __entry->count = to_book3s(vcpu)->hpte_cache_count;
369 __entry->p1 = p1;
370 __entry->p2 = p2;
371 __entry->type = type;
372 ),
373
374 TP_printk("Flush %d %sPTEs: %llx - %llx",
375 __entry->count, __entry->type, __entry->p1, __entry->p2)
376);
377
378TRACE_EVENT(kvm_book3s_slb_found,
379 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
380 TP_ARGS(gvsid, hvsid),
381
382 TP_STRUCT__entry(
383 __field( unsigned long long, gvsid )
384 __field( unsigned long long, hvsid )
385 ),
386
387 TP_fast_assign(
388 __entry->gvsid = gvsid;
389 __entry->hvsid = hvsid;
390 ),
391
392 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
393);
394
395TRACE_EVENT(kvm_book3s_slb_fail,
396 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
397 TP_ARGS(sid_map_mask, gvsid),
398
399 TP_STRUCT__entry(
400 __field( unsigned short, sid_map_mask )
401 __field( unsigned long long, gvsid )
402 ),
403
404 TP_fast_assign(
405 __entry->sid_map_mask = sid_map_mask;
406 __entry->gvsid = gvsid;
407 ),
408
409 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
410 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
411);
412
413TRACE_EVENT(kvm_book3s_slb_map,
414 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
415 unsigned long long hvsid),
416 TP_ARGS(sid_map_mask, gvsid, hvsid),
417
418 TP_STRUCT__entry(
419 __field( unsigned short, sid_map_mask )
420 __field( unsigned long long, guest_vsid )
421 __field( unsigned long long, host_vsid )
422 ),
423
424 TP_fast_assign(
425 __entry->sid_map_mask = sid_map_mask;
426 __entry->guest_vsid = gvsid;
427 __entry->host_vsid = hvsid;
428 ),
429
430 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
431 __entry->guest_vsid, __entry->host_vsid)
432);
433
434TRACE_EVENT(kvm_book3s_slbmte,
435 TP_PROTO(u64 slb_vsid, u64 slb_esid),
436 TP_ARGS(slb_vsid, slb_esid),
437
438 TP_STRUCT__entry(
439 __field( u64, slb_vsid )
440 __field( u64, slb_esid )
441 ),
442
443 TP_fast_assign(
444 __entry->slb_vsid = slb_vsid;
445 __entry->slb_esid = slb_esid;
446 ),
447
448 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
449);
450
451#endif /* CONFIG_PPC_BOOK3S */
452
453
454/*************************************************************************
455 * Book3E trace points *
456 *************************************************************************/
457
458#ifdef CONFIG_BOOKE
459
460TRACE_EVENT(kvm_booke206_stlb_write,
461 TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
462 TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
463
464 TP_STRUCT__entry(
465 __field( __u32, mas0 )
466 __field( __u32, mas8 )
467 __field( __u32, mas1 )
468 __field( __u64, mas2 )
469 __field( __u64, mas7_3 )
470 ),
471
472 TP_fast_assign(
473 __entry->mas0 = mas0;
474 __entry->mas8 = mas8;
475 __entry->mas1 = mas1;
476 __entry->mas2 = mas2;
477 __entry->mas7_3 = mas7_3;
478 ),
479
480 TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
481 __entry->mas0, __entry->mas8, __entry->mas1,
482 __entry->mas2, __entry->mas7_3)
483);
484
485TRACE_EVENT(kvm_booke206_gtlb_write,
486 TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
487 TP_ARGS(mas0, mas1, mas2, mas7_3),
488
489 TP_STRUCT__entry(
490 __field( __u32, mas0 )
491 __field( __u32, mas1 )
492 __field( __u64, mas2 )
493 __field( __u64, mas7_3 )
494 ),
495
496 TP_fast_assign(
497 __entry->mas0 = mas0;
498 __entry->mas1 = mas1;
499 __entry->mas2 = mas2;
500 __entry->mas7_3 = mas7_3;
501 ),
502
503 TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
504 __entry->mas0, __entry->mas1,
505 __entry->mas2, __entry->mas7_3)
506);
507
508TRACE_EVENT(kvm_booke206_ref_release,
509 TP_PROTO(__u64 pfn, __u32 flags),
510 TP_ARGS(pfn, flags),
511
512 TP_STRUCT__entry(
513 __field( __u64, pfn )
514 __field( __u32, flags )
515 ),
516
517 TP_fast_assign(
518 __entry->pfn = pfn;
519 __entry->flags = flags;
520 ),
521
522 TP_printk("pfn=%llx flags=%x",
523 __entry->pfn, __entry->flags)
524);
525
526TRACE_EVENT(kvm_booke_queue_irqprio,
527 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
528 TP_ARGS(vcpu, priority),
529
530 TP_STRUCT__entry(
531 __field( __u32, cpu_nr )
532 __field( __u32, priority )
533 __field( unsigned long, pending )
534 ),
535
536 TP_fast_assign(
537 __entry->cpu_nr = vcpu->vcpu_id;
538 __entry->priority = priority;
539 __entry->pending = vcpu->arch.pending_exceptions;
540 ),
541
542 TP_printk("vcpu=%x prio=%x pending=%lx",
543 __entry->cpu_nr, __entry->priority, __entry->pending)
544);
545
546#endif
547
548#endif /* _TRACE_KVM_H */ 119#endif /* _TRACE_KVM_H */
549 120
550/* This part must be outside protection */ 121/* This part must be outside protection */
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
new file mode 100644
index 000000000000..f7537cf26ce7
--- /dev/null
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -0,0 +1,177 @@
1#if !defined(_TRACE_KVM_BOOKE_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_BOOKE_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm_booke
8#define TRACE_INCLUDE_PATH .
9#define TRACE_INCLUDE_FILE trace_booke
10
11#define kvm_trace_symbol_exit \
12 {0, "CRITICAL"}, \
13 {1, "MACHINE_CHECK"}, \
14 {2, "DATA_STORAGE"}, \
15 {3, "INST_STORAGE"}, \
16 {4, "EXTERNAL"}, \
17 {5, "ALIGNMENT"}, \
18 {6, "PROGRAM"}, \
19 {7, "FP_UNAVAIL"}, \
20 {8, "SYSCALL"}, \
21 {9, "AP_UNAVAIL"}, \
22 {10, "DECREMENTER"}, \
23 {11, "FIT"}, \
24 {12, "WATCHDOG"}, \
25 {13, "DTLB_MISS"}, \
26 {14, "ITLB_MISS"}, \
27 {15, "DEBUG"}, \
28 {32, "SPE_UNAVAIL"}, \
29 {33, "SPE_FP_DATA"}, \
30 {34, "SPE_FP_ROUND"}, \
31 {35, "PERFORMANCE_MONITOR"}, \
32 {36, "DOORBELL"}, \
33 {37, "DOORBELL_CRITICAL"}, \
34 {38, "GUEST_DBELL"}, \
35 {39, "GUEST_DBELL_CRIT"}, \
36 {40, "HV_SYSCALL"}, \
37 {41, "HV_PRIV"}
38
39TRACE_EVENT(kvm_exit,
40 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
41 TP_ARGS(exit_nr, vcpu),
42
43 TP_STRUCT__entry(
44 __field( unsigned int, exit_nr )
45 __field( unsigned long, pc )
46 __field( unsigned long, msr )
47 __field( unsigned long, dar )
48 __field( unsigned long, last_inst )
49 ),
50
51 TP_fast_assign(
52 __entry->exit_nr = exit_nr;
53 __entry->pc = kvmppc_get_pc(vcpu);
54 __entry->dar = kvmppc_get_fault_dar(vcpu);
55 __entry->msr = vcpu->arch.shared->msr;
56 __entry->last_inst = vcpu->arch.last_inst;
57 ),
58
59 TP_printk("exit=%s"
60 " | pc=0x%lx"
61 " | msr=0x%lx"
62 " | dar=0x%lx"
63 " | last_inst=0x%lx"
64 ,
65 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
66 __entry->pc,
67 __entry->msr,
68 __entry->dar,
69 __entry->last_inst
70 )
71);
72
73TRACE_EVENT(kvm_unmap_hva,
74 TP_PROTO(unsigned long hva),
75 TP_ARGS(hva),
76
77 TP_STRUCT__entry(
78 __field( unsigned long, hva )
79 ),
80
81 TP_fast_assign(
82 __entry->hva = hva;
83 ),
84
85 TP_printk("unmap hva 0x%lx\n", __entry->hva)
86);
87
88TRACE_EVENT(kvm_booke206_stlb_write,
89 TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
90 TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
91
92 TP_STRUCT__entry(
93 __field( __u32, mas0 )
94 __field( __u32, mas8 )
95 __field( __u32, mas1 )
96 __field( __u64, mas2 )
97 __field( __u64, mas7_3 )
98 ),
99
100 TP_fast_assign(
101 __entry->mas0 = mas0;
102 __entry->mas8 = mas8;
103 __entry->mas1 = mas1;
104 __entry->mas2 = mas2;
105 __entry->mas7_3 = mas7_3;
106 ),
107
108 TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
109 __entry->mas0, __entry->mas8, __entry->mas1,
110 __entry->mas2, __entry->mas7_3)
111);
112
113TRACE_EVENT(kvm_booke206_gtlb_write,
114 TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
115 TP_ARGS(mas0, mas1, mas2, mas7_3),
116
117 TP_STRUCT__entry(
118 __field( __u32, mas0 )
119 __field( __u32, mas1 )
120 __field( __u64, mas2 )
121 __field( __u64, mas7_3 )
122 ),
123
124 TP_fast_assign(
125 __entry->mas0 = mas0;
126 __entry->mas1 = mas1;
127 __entry->mas2 = mas2;
128 __entry->mas7_3 = mas7_3;
129 ),
130
131 TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
132 __entry->mas0, __entry->mas1,
133 __entry->mas2, __entry->mas7_3)
134);
135
136TRACE_EVENT(kvm_booke206_ref_release,
137 TP_PROTO(__u64 pfn, __u32 flags),
138 TP_ARGS(pfn, flags),
139
140 TP_STRUCT__entry(
141 __field( __u64, pfn )
142 __field( __u32, flags )
143 ),
144
145 TP_fast_assign(
146 __entry->pfn = pfn;
147 __entry->flags = flags;
148 ),
149
150 TP_printk("pfn=%llx flags=%x",
151 __entry->pfn, __entry->flags)
152);
153
154TRACE_EVENT(kvm_booke_queue_irqprio,
155 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
156 TP_ARGS(vcpu, priority),
157
158 TP_STRUCT__entry(
159 __field( __u32, cpu_nr )
160 __field( __u32, priority )
161 __field( unsigned long, pending )
162 ),
163
164 TP_fast_assign(
165 __entry->cpu_nr = vcpu->vcpu_id;
166 __entry->priority = priority;
167 __entry->pending = vcpu->arch.pending_exceptions;
168 ),
169
170 TP_printk("vcpu=%x prio=%x pending=%lx",
171 __entry->cpu_nr, __entry->priority, __entry->pending)
172);
173
174#endif
175
176/* This part must be outside protection */
177#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
new file mode 100644
index 000000000000..8b22e4748344
--- /dev/null
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -0,0 +1,297 @@
1
2#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_PR_H
4
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm_pr
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace_pr
11
12#define kvm_trace_symbol_exit \
13 {0x100, "SYSTEM_RESET"}, \
14 {0x200, "MACHINE_CHECK"}, \
15 {0x300, "DATA_STORAGE"}, \
16 {0x380, "DATA_SEGMENT"}, \
17 {0x400, "INST_STORAGE"}, \
18 {0x480, "INST_SEGMENT"}, \
19 {0x500, "EXTERNAL"}, \
20 {0x501, "EXTERNAL_LEVEL"}, \
21 {0x502, "EXTERNAL_HV"}, \
22 {0x600, "ALIGNMENT"}, \
23 {0x700, "PROGRAM"}, \
24 {0x800, "FP_UNAVAIL"}, \
25 {0x900, "DECREMENTER"}, \
26 {0x980, "HV_DECREMENTER"}, \
27 {0xc00, "SYSCALL"}, \
28 {0xd00, "TRACE"}, \
29 {0xe00, "H_DATA_STORAGE"}, \
30 {0xe20, "H_INST_STORAGE"}, \
31 {0xe40, "H_EMUL_ASSIST"}, \
32 {0xf00, "PERFMON"}, \
33 {0xf20, "ALTIVEC"}, \
34 {0xf40, "VSX"}
35
36TRACE_EVENT(kvm_book3s_reenter,
37 TP_PROTO(int r, struct kvm_vcpu *vcpu),
38 TP_ARGS(r, vcpu),
39
40 TP_STRUCT__entry(
41 __field( unsigned int, r )
42 __field( unsigned long, pc )
43 ),
44
45 TP_fast_assign(
46 __entry->r = r;
47 __entry->pc = kvmppc_get_pc(vcpu);
48 ),
49
50 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
51);
52
53#ifdef CONFIG_PPC_BOOK3S_64
54
55TRACE_EVENT(kvm_book3s_64_mmu_map,
56 TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
57 struct kvmppc_pte *orig_pte),
58 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
59
60 TP_STRUCT__entry(
61 __field( unsigned char, flag_w )
62 __field( unsigned char, flag_x )
63 __field( unsigned long, eaddr )
64 __field( unsigned long, hpteg )
65 __field( unsigned long, va )
66 __field( unsigned long long, vpage )
67 __field( unsigned long, hpaddr )
68 ),
69
70 TP_fast_assign(
71 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
72 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
73 __entry->eaddr = orig_pte->eaddr;
74 __entry->hpteg = hpteg;
75 __entry->va = va;
76 __entry->vpage = orig_pte->vpage;
77 __entry->hpaddr = hpaddr;
78 ),
79
80 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
81 __entry->flag_w, __entry->flag_x, __entry->eaddr,
82 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
83);
84
85#endif /* CONFIG_PPC_BOOK3S_64 */
86
87TRACE_EVENT(kvm_book3s_mmu_map,
88 TP_PROTO(struct hpte_cache *pte),
89 TP_ARGS(pte),
90
91 TP_STRUCT__entry(
92 __field( u64, host_vpn )
93 __field( u64, pfn )
94 __field( ulong, eaddr )
95 __field( u64, vpage )
96 __field( ulong, raddr )
97 __field( int, flags )
98 ),
99
100 TP_fast_assign(
101 __entry->host_vpn = pte->host_vpn;
102 __entry->pfn = pte->pfn;
103 __entry->eaddr = pte->pte.eaddr;
104 __entry->vpage = pte->pte.vpage;
105 __entry->raddr = pte->pte.raddr;
106 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
107 (pte->pte.may_write ? 0x2 : 0) |
108 (pte->pte.may_execute ? 0x1 : 0);
109 ),
110
111 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
112 __entry->host_vpn, __entry->pfn, __entry->eaddr,
113 __entry->vpage, __entry->raddr, __entry->flags)
114);
115
116TRACE_EVENT(kvm_book3s_mmu_invalidate,
117 TP_PROTO(struct hpte_cache *pte),
118 TP_ARGS(pte),
119
120 TP_STRUCT__entry(
121 __field( u64, host_vpn )
122 __field( u64, pfn )
123 __field( ulong, eaddr )
124 __field( u64, vpage )
125 __field( ulong, raddr )
126 __field( int, flags )
127 ),
128
129 TP_fast_assign(
130 __entry->host_vpn = pte->host_vpn;
131 __entry->pfn = pte->pfn;
132 __entry->eaddr = pte->pte.eaddr;
133 __entry->vpage = pte->pte.vpage;
134 __entry->raddr = pte->pte.raddr;
135 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
136 (pte->pte.may_write ? 0x2 : 0) |
137 (pte->pte.may_execute ? 0x1 : 0);
138 ),
139
140 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
141 __entry->host_vpn, __entry->pfn, __entry->eaddr,
142 __entry->vpage, __entry->raddr, __entry->flags)
143);
144
145TRACE_EVENT(kvm_book3s_mmu_flush,
146 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
147 unsigned long long p2),
148 TP_ARGS(type, vcpu, p1, p2),
149
150 TP_STRUCT__entry(
151 __field( int, count )
152 __field( unsigned long long, p1 )
153 __field( unsigned long long, p2 )
154 __field( const char *, type )
155 ),
156
157 TP_fast_assign(
158 __entry->count = to_book3s(vcpu)->hpte_cache_count;
159 __entry->p1 = p1;
160 __entry->p2 = p2;
161 __entry->type = type;
162 ),
163
164 TP_printk("Flush %d %sPTEs: %llx - %llx",
165 __entry->count, __entry->type, __entry->p1, __entry->p2)
166);
167
168TRACE_EVENT(kvm_book3s_slb_found,
169 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
170 TP_ARGS(gvsid, hvsid),
171
172 TP_STRUCT__entry(
173 __field( unsigned long long, gvsid )
174 __field( unsigned long long, hvsid )
175 ),
176
177 TP_fast_assign(
178 __entry->gvsid = gvsid;
179 __entry->hvsid = hvsid;
180 ),
181
182 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
183);
184
185TRACE_EVENT(kvm_book3s_slb_fail,
186 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
187 TP_ARGS(sid_map_mask, gvsid),
188
189 TP_STRUCT__entry(
190 __field( unsigned short, sid_map_mask )
191 __field( unsigned long long, gvsid )
192 ),
193
194 TP_fast_assign(
195 __entry->sid_map_mask = sid_map_mask;
196 __entry->gvsid = gvsid;
197 ),
198
199 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
200 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
201);
202
203TRACE_EVENT(kvm_book3s_slb_map,
204 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
205 unsigned long long hvsid),
206 TP_ARGS(sid_map_mask, gvsid, hvsid),
207
208 TP_STRUCT__entry(
209 __field( unsigned short, sid_map_mask )
210 __field( unsigned long long, guest_vsid )
211 __field( unsigned long long, host_vsid )
212 ),
213
214 TP_fast_assign(
215 __entry->sid_map_mask = sid_map_mask;
216 __entry->guest_vsid = gvsid;
217 __entry->host_vsid = hvsid;
218 ),
219
220 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
221 __entry->guest_vsid, __entry->host_vsid)
222);
223
224TRACE_EVENT(kvm_book3s_slbmte,
225 TP_PROTO(u64 slb_vsid, u64 slb_esid),
226 TP_ARGS(slb_vsid, slb_esid),
227
228 TP_STRUCT__entry(
229 __field( u64, slb_vsid )
230 __field( u64, slb_esid )
231 ),
232
233 TP_fast_assign(
234 __entry->slb_vsid = slb_vsid;
235 __entry->slb_esid = slb_esid;
236 ),
237
238 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
239);
240
241TRACE_EVENT(kvm_exit,
242 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
243 TP_ARGS(exit_nr, vcpu),
244
245 TP_STRUCT__entry(
246 __field( unsigned int, exit_nr )
247 __field( unsigned long, pc )
248 __field( unsigned long, msr )
249 __field( unsigned long, dar )
250 __field( unsigned long, srr1 )
251 __field( unsigned long, last_inst )
252 ),
253
254 TP_fast_assign(
255 __entry->exit_nr = exit_nr;
256 __entry->pc = kvmppc_get_pc(vcpu);
257 __entry->dar = kvmppc_get_fault_dar(vcpu);
258 __entry->msr = vcpu->arch.shared->msr;
259 __entry->srr1 = vcpu->arch.shadow_srr1;
260 __entry->last_inst = vcpu->arch.last_inst;
261 ),
262
263 TP_printk("exit=%s"
264 " | pc=0x%lx"
265 " | msr=0x%lx"
266 " | dar=0x%lx"
267 " | srr1=0x%lx"
268 " | last_inst=0x%lx"
269 ,
270 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
271 __entry->pc,
272 __entry->msr,
273 __entry->dar,
274 __entry->srr1,
275 __entry->last_inst
276 )
277);
278
279TRACE_EVENT(kvm_unmap_hva,
280 TP_PROTO(unsigned long hva),
281 TP_ARGS(hva),
282
283 TP_STRUCT__entry(
284 __field( unsigned long, hva )
285 ),
286
287 TP_fast_assign(
288 __entry->hva = hva;
289 ),
290
291 TP_printk("unmap hva 0x%lx\n", __entry->hva)
292);
293
294#endif /* _TRACE_KVM_H */
295
296/* This part must be outside protection */
297#include <trace/define_trace.h>
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 1e4e7b97337a..bedda67cc222 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1089,12 +1089,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1089 return VM_FAULT_SIGBUS; 1089 return VM_FAULT_SIGBUS;
1090} 1090}
1091 1091
1092void kvm_arch_free_memslot(struct kvm_memory_slot *free, 1092void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1093 struct kvm_memory_slot *dont) 1093 struct kvm_memory_slot *dont)
1094{ 1094{
1095} 1095}
1096 1096
1097int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 1097int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1098 unsigned long npages)
1098{ 1099{
1099 return 0; 1100 return 0;
1100} 1101}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6d16fc530246..07c127fc2064 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7091,7 +7091,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
7091 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 7091 kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
7092} 7092}
7093 7093
7094void kvm_arch_free_memslot(struct kvm_memory_slot *free, 7094void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
7095 struct kvm_memory_slot *dont) 7095 struct kvm_memory_slot *dont)
7096{ 7096{
7097 int i; 7097 int i;
@@ -7112,7 +7112,8 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
7112 } 7112 }
7113} 7113}
7114 7114
7115int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 7115int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
7116 unsigned long npages)
7116{ 7117{
7117 int i; 7118 int i;
7118 7119