aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-12-12 07:36:37 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:37 -0500
commit697d3899dcb4bcd918d060a92db57b794e56b077 (patch)
tree173cdd849eca204fec8b64ea520b619372c3d970 /arch/powerpc/include/asm
parent06ce2c63d933e347f8a199f123a8a293619ab3d2 (diff)
KVM: PPC: Implement MMIO emulation support for Book3S HV guests
This provides the low-level support for MMIO emulation in Book3S HV guests. When the guest tries to map a page which is not covered by any memslot, that page is taken to be an MMIO emulation page. Instead of inserting a valid HPTE, we insert an HPTE that has the valid bit clear but another hypervisor software-use bit set, which we call HPTE_V_ABSENT, to indicate that this is an absent page. An absent page is treated much like a valid page as far as guest hcalls (H_ENTER, H_REMOVE, H_READ etc.) are concerned, except of course that an absent HPTE doesn't need to be invalidated with tlbie since it was never valid as far as the hardware is concerned. When the guest accesses a page for which there is an absent HPTE, it will take a hypervisor data storage interrupt (HDSI) since we now set the VPM1 bit in the LPCR. Our HDSI handler for HPTE-not-present faults looks up the hash table and if it finds an absent HPTE mapping the requested virtual address, will switch to kernel mode and handle the fault in kvmppc_book3s_hv_page_fault(), which at present just calls kvmppc_hv_emulate_mmio() to set up the MMIO emulation. This is based on an earlier patch by Benjamin Herrenschmidt, but since heavily reworked. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h5
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h26
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h4
-rw-r--r--arch/powerpc/include/asm/reg.h1
6 files changed, 41 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index c700f43ba178..3a9e51f43397 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -119,6 +119,11 @@ extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
119extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 119extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
120extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 120extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
121extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 121extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
122extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
123 struct kvm_vcpu *vcpu, unsigned long addr,
124 unsigned long status);
125extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
126 unsigned long slb_v, unsigned long valid);
122 127
123extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 128extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
124extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 129extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 9508c03e6671..79dc37fb86b5 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -43,12 +43,15 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
43#define HPT_HASH_MASK (HPT_NPTEG - 1) 43#define HPT_HASH_MASK (HPT_NPTEG - 1)
44#endif 44#endif
45 45
46#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
47
46/* 48/*
47 * We use a lock bit in HPTE dword 0 to synchronize updates and 49 * We use a lock bit in HPTE dword 0 to synchronize updates and
48 * accesses to each HPTE, and another bit to indicate non-present 50 * accesses to each HPTE, and another bit to indicate non-present
49 * HPTEs. 51 * HPTEs.
50 */ 52 */
51#define HPTE_V_HVLOCK 0x40UL 53#define HPTE_V_HVLOCK 0x40UL
54#define HPTE_V_ABSENT 0x20UL
52 55
53static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) 56static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
54{ 57{
@@ -144,6 +147,29 @@ static inline unsigned long hpte_cache_bits(unsigned long pte_val)
144#endif 147#endif
145} 148}
146 149
150static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
151{
152 if (key)
153 return PP_RWRX <= pp && pp <= PP_RXRX;
154 return 1;
155}
156
157static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
158{
159 if (key)
160 return pp == PP_RWRW;
161 return pp <= PP_RWRW;
162}
163
164static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
165{
166 unsigned long skey;
167
168 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
169 ((hpte_r & HPTE_R_KEY_LO) >> 9);
170 return (amr >> (62 - 2 * skey)) & 3;
171}
172
147static inline void lock_rmap(unsigned long *rmap) 173static inline void lock_rmap(unsigned long *rmap)
148{ 174{
149 do { 175 do {
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 97cb2d7865f3..937cacaaf236 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -210,6 +210,7 @@ struct kvm_arch {
210 unsigned long lpcr; 210 unsigned long lpcr;
211 unsigned long rmor; 211 unsigned long rmor;
212 struct kvmppc_rma_info *rma; 212 struct kvmppc_rma_info *rma;
213 unsigned long vrma_slb_v;
213 int rma_setup_done; 214 int rma_setup_done;
214 struct list_head spapr_tce_tables; 215 struct list_head spapr_tce_tables;
215 spinlock_t slot_phys_lock; 216 spinlock_t slot_phys_lock;
@@ -452,6 +453,10 @@ struct kvm_vcpu_arch {
452#ifdef CONFIG_KVM_BOOK3S_64_HV 453#ifdef CONFIG_KVM_BOOK3S_64_HV
453 struct kvm_vcpu_arch_shared shregs; 454 struct kvm_vcpu_arch_shared shregs;
454 455
456 unsigned long pgfault_addr;
457 long pgfault_index;
458 unsigned long pgfault_hpte[2];
459
455 struct list_head run_list; 460 struct list_head run_list;
456 struct task_struct *run_task; 461 struct task_struct *run_task;
457 struct kvm_run *kvm_run; 462 struct kvm_run *kvm_run;
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 412ba493cb98..0759dd8bf5aa 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -108,11 +108,11 @@ extern char initial_stab[];
108#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) 108#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
109 109
110/* Values for PP (assumes Ks=0, Kp=1) */ 110/* Values for PP (assumes Ks=0, Kp=1) */
111/* pp0 will always be 0 for linux */
112#define PP_RWXX 0 /* Supervisor read/write, User none */ 111#define PP_RWXX 0 /* Supervisor read/write, User none */
113#define PP_RWRX 1 /* Supervisor read/write, User read */ 112#define PP_RWRX 1 /* Supervisor read/write, User read */
114#define PP_RWRW 2 /* Supervisor read/write, User read/write */ 113#define PP_RWRW 2 /* Supervisor read/write, User read/write */
115#define PP_RXRX 3 /* Supervisor read, User read */ 114#define PP_RXRX 3 /* Supervisor read, User read */
115#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
116 116
117#ifndef __ASSEMBLY__ 117#ifndef __ASSEMBLY__
118 118
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e980faae4225..d81f99430fe7 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -45,6 +45,7 @@
45#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff 45#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
46#define PPC_INST_MTSPR_DSCR 0x7c1103a6 46#define PPC_INST_MTSPR_DSCR 0x7c1103a6
47#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff 47#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
48#define PPC_INST_SLBFEE 0x7c0007a7
48 49
49#define PPC_INST_STRING 0x7c00042a 50#define PPC_INST_STRING 0x7c00042a
50#define PPC_INST_STRING_MASK 0xfc0007fe 51#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -183,7 +184,8 @@
183 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) 184 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
184#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \ 185#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
185 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) 186 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
186 187#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
188 __PPC_RT(t) | __PPC_RB(b))
187 189
188/* 190/*
189 * Define what the VSX XX1 form instructions will look like, then add 191 * Define what the VSX XX1 form instructions will look like, then add
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 64447f6c049a..16efb3151c20 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -216,6 +216,7 @@
216#define DSISR_ISSTORE 0x02000000 /* access was a store */ 216#define DSISR_ISSTORE 0x02000000 /* access was a store */
217#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ 217#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
218#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ 218#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
219#define DSISR_KEYFAULT 0x00200000 /* Key fault */
219#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ 220#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
220#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 221#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
221#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 222#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */