aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/disassemble.h34
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h146
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h5
-rw-r--r--arch/powerpc/include/asm/kvm_host.h9
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h80
-rw-r--r--arch/powerpc/include/asm/reg.h12
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h6
12 files changed, 273 insertions, 45 deletions
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 856f8deb557a..6330a61b875a 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -81,4 +81,38 @@ static inline unsigned int get_oc(u32 inst)
81{ 81{
82 return (inst >> 11) & 0x7fff; 82 return (inst >> 11) & 0x7fff;
83} 83}
84
85#define IS_XFORM(inst) (get_op(inst) == 31)
86#define IS_DSFORM(inst) (get_op(inst) >= 56)
87
88/*
89 * Create a DSISR value from the instruction
90 */
91static inline unsigned make_dsisr(unsigned instr)
92{
93 unsigned dsisr;
94
95
96 /* bits 6:15 --> 22:31 */
97 dsisr = (instr & 0x03ff0000) >> 16;
98
99 if (IS_XFORM(instr)) {
100 /* bits 29:30 --> 15:16 */
101 dsisr |= (instr & 0x00000006) << 14;
102 /* bit 25 --> 17 */
103 dsisr |= (instr & 0x00000040) << 8;
104 /* bits 21:24 --> 18:21 */
105 dsisr |= (instr & 0x00000780) << 3;
106 } else {
107 /* bit 5 --> 17 */
108 dsisr |= (instr & 0x04000000) >> 12;
109 /* bits 1: 4 --> 18:21 */
110 dsisr |= (instr & 0x78000000) >> 17;
111 /* bits 30:31 --> 12:13 */
112 if (IS_DSFORM(instr))
113 dsisr |= (instr & 0x00000003) << 18;
114 }
115
116 return dsisr;
117}
84#endif /* __ASM_PPC_DISASSEMBLE_H__ */ 118#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 19eb74a95b59..9601741080e5 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -102,6 +102,7 @@
102#define BOOK3S_INTERRUPT_PERFMON 0xf00 102#define BOOK3S_INTERRUPT_PERFMON 0xf00
103#define BOOK3S_INTERRUPT_ALTIVEC 0xf20 103#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
104#define BOOK3S_INTERRUPT_VSX 0xf40 104#define BOOK3S_INTERRUPT_VSX 0xf40
105#define BOOK3S_INTERRUPT_FAC_UNAVAIL 0xf60
105#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80 106#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
106 107
107#define BOOK3S_IRQPRIO_SYSTEM_RESET 0 108#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
@@ -114,14 +115,15 @@
114#define BOOK3S_IRQPRIO_FP_UNAVAIL 7 115#define BOOK3S_IRQPRIO_FP_UNAVAIL 7
115#define BOOK3S_IRQPRIO_ALTIVEC 8 116#define BOOK3S_IRQPRIO_ALTIVEC 8
116#define BOOK3S_IRQPRIO_VSX 9 117#define BOOK3S_IRQPRIO_VSX 9
117#define BOOK3S_IRQPRIO_SYSCALL 10 118#define BOOK3S_IRQPRIO_FAC_UNAVAIL 10
118#define BOOK3S_IRQPRIO_MACHINE_CHECK 11 119#define BOOK3S_IRQPRIO_SYSCALL 11
119#define BOOK3S_IRQPRIO_DEBUG 12 120#define BOOK3S_IRQPRIO_MACHINE_CHECK 12
120#define BOOK3S_IRQPRIO_EXTERNAL 13 121#define BOOK3S_IRQPRIO_DEBUG 13
121#define BOOK3S_IRQPRIO_DECREMENTER 14 122#define BOOK3S_IRQPRIO_EXTERNAL 14
122#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15 123#define BOOK3S_IRQPRIO_DECREMENTER 15
123#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 16 124#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16
124#define BOOK3S_IRQPRIO_MAX 17 125#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17
126#define BOOK3S_IRQPRIO_MAX 18
125 127
126#define BOOK3S_HFLAG_DCBZ32 0x1 128#define BOOK3S_HFLAG_DCBZ32 0x1
127#define BOOK3S_HFLAG_SLB 0x2 129#define BOOK3S_HFLAG_SLB 0x2
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bb1e38a23ac7..f52f65694527 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -268,9 +268,10 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
268 return vcpu->arch.pc; 268 return vcpu->arch.pc;
269} 269}
270 270
271static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
271static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 272static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
272{ 273{
273 return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 274 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
274} 275}
275 276
276static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) 277static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 51388befeddb..fddb72b48ce9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -77,34 +77,122 @@ static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
77 return old == 0; 77 return old == 0;
78} 78}
79 79
80static inline int __hpte_actual_psize(unsigned int lp, int psize)
81{
82 int i, shift;
83 unsigned int mask;
84
85 /* start from 1 ignoring MMU_PAGE_4K */
86 for (i = 1; i < MMU_PAGE_COUNT; i++) {
87
88 /* invalid penc */
89 if (mmu_psize_defs[psize].penc[i] == -1)
90 continue;
91 /*
92 * encoding bits per actual page size
93 * PTE LP actual page size
94 * rrrr rrrz >=8KB
95 * rrrr rrzz >=16KB
96 * rrrr rzzz >=32KB
97 * rrrr zzzz >=64KB
98 * .......
99 */
100 shift = mmu_psize_defs[i].shift - LP_SHIFT;
101 if (shift > LP_BITS)
102 shift = LP_BITS;
103 mask = (1 << shift) - 1;
104 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
105 return i;
106 }
107 return -1;
108}
109
80static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 110static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
81 unsigned long pte_index) 111 unsigned long pte_index)
82{ 112{
83 unsigned long rb, va_low; 113 int b_psize, a_psize;
114 unsigned int penc;
115 unsigned long rb = 0, va_low, sllp;
116 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
117
118 if (!(v & HPTE_V_LARGE)) {
119 /* both base and actual psize is 4k */
120 b_psize = MMU_PAGE_4K;
121 a_psize = MMU_PAGE_4K;
122 } else {
123 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
124
125 /* valid entries have a shift value */
126 if (!mmu_psize_defs[b_psize].shift)
127 continue;
84 128
129 a_psize = __hpte_actual_psize(lp, b_psize);
130 if (a_psize != -1)
131 break;
132 }
133 }
134 /*
135 * Ignore the top 14 bits of va
136 * v have top two bits covering segment size, hence move
137 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
138 * AVA field in v also have the lower 23 bits ignored.
139 * For base page size 4K we need 14 .. 65 bits (so need to
140 * collect extra 11 bits)
141 * For others we need 14..14+i
142 */
143 /* This covers 14..54 bits of va*/
85 rb = (v & ~0x7fUL) << 16; /* AVA field */ 144 rb = (v & ~0x7fUL) << 16; /* AVA field */
145 /*
146 * AVA in v had cleared lower 23 bits. We need to derive
147 * that from pteg index
148 */
86 va_low = pte_index >> 3; 149 va_low = pte_index >> 3;
87 if (v & HPTE_V_SECONDARY) 150 if (v & HPTE_V_SECONDARY)
88 va_low = ~va_low; 151 va_low = ~va_low;
89 /* xor vsid from AVA */ 152 /*
153 * get the vpn bits from va_low using reverse of hashing.
154 * In v we have va with 23 bits dropped and then left shifted
155 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
156 * right shift it with (SID_SHIFT - (23 - 7))
157 */
90 if (!(v & HPTE_V_1TB_SEG)) 158 if (!(v & HPTE_V_1TB_SEG))
91 va_low ^= v >> 12; 159 va_low ^= v >> (SID_SHIFT - 16);
92 else 160 else
93 va_low ^= v >> 24; 161 va_low ^= v >> (SID_SHIFT_1T - 16);
94 va_low &= 0x7ff; 162 va_low &= 0x7ff;
95 if (v & HPTE_V_LARGE) { 163
96 rb |= 1; /* L field */ 164 switch (b_psize) {
97 if (cpu_has_feature(CPU_FTR_ARCH_206) && 165 case MMU_PAGE_4K:
98 (r & 0xff000)) { 166 sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
99 /* non-16MB large page, must be 64k */ 167 ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
100 /* (masks depend on page size) */ 168 rb |= sllp << 5; /* AP field */
101 rb |= 0x1000; /* page encoding in LP field */ 169 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
102 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ 170 break;
103 rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */ 171 default:
104 } 172 {
105 } else { 173 int aval_shift;
106 /* 4kB page */ 174 /*
107 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ 175 * remaining 7bits of AVA/LP fields
176 * Also contain the rr bits of LP
177 */
178 rb |= (va_low & 0x7f) << 16;
179 /*
180 * Now clear not needed LP bits based on actual psize
181 */
182 rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
183 /*
184 * AVAL field 58..77 - base_page_shift bits of va
185 * we have space for 58..64 bits, Missing bits should
186 * be zero filled. +1 is to take care of L bit shift
187 */
188 aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
189 rb |= ((va_low << aval_shift) & 0xfe);
190
191 rb |= 1; /* L field */
192 penc = mmu_psize_defs[b_psize].penc[a_psize];
193 rb |= penc << 12; /* LP field */
194 break;
195 }
108 } 196 }
109 rb |= (v >> 54) & 0x300; /* B field */ 197 rb |= (v >> 54) & 0x300; /* B field */
110 return rb; 198 return rb;
@@ -112,14 +200,26 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
112 200
113static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) 201static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
114{ 202{
203 int size, a_psize;
204 /* Look at the 8 bit LP value */
205 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
206
115 /* only handle 4k, 64k and 16M pages for now */ 207 /* only handle 4k, 64k and 16M pages for now */
116 if (!(h & HPTE_V_LARGE)) 208 if (!(h & HPTE_V_LARGE))
117 return 1ul << 12; /* 4k page */ 209 return 1ul << 12;
118 if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206)) 210 else {
119 return 1ul << 16; /* 64k page */ 211 for (size = 0; size < MMU_PAGE_COUNT; size++) {
120 if ((l & 0xff000) == 0) 212 /* valid entries have a shift value */
121 return 1ul << 24; /* 16M page */ 213 if (!mmu_psize_defs[size].shift)
122 return 0; /* error */ 214 continue;
215
216 a_psize = __hpte_actual_psize(lp, size);
217 if (a_psize != -1)
218 return 1ul << mmu_psize_defs[a_psize].shift;
219 }
220
221 }
222 return 0;
123} 223}
124 224
125static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) 225static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 821725c1bf46..5bdfb5dd3400 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -104,6 +104,7 @@ struct kvmppc_host_state {
104#ifdef CONFIG_PPC_BOOK3S_64 104#ifdef CONFIG_PPC_BOOK3S_64
105 u64 cfar; 105 u64 cfar;
106 u64 ppr; 106 u64 ppr;
107 u64 host_fscr;
107#endif 108#endif
108}; 109};
109 110
@@ -133,6 +134,7 @@ struct kvmppc_book3s_shadow_vcpu {
133 u64 esid; 134 u64 esid;
134 u64 vsid; 135 u64 vsid;
135 } slb[64]; /* guest SLB */ 136 } slb[64]; /* guest SLB */
137 u64 shadow_fscr;
136#endif 138#endif
137}; 139};
138 140
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 80d46b5a7efb..c7aed6105ff9 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -108,9 +108,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
108{ 108{
109 return vcpu->arch.fault_dear; 109 return vcpu->arch.fault_dear;
110} 110}
111
112static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
113{
114 return vcpu->arch.shared->msr;
115}
116#endif /* __ASM_KVM_BOOKE_H__ */ 111#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1eaea2dea174..bb66d8b8efdf 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -449,7 +449,9 @@ struct kvm_vcpu_arch {
449 ulong pc; 449 ulong pc;
450 ulong ctr; 450 ulong ctr;
451 ulong lr; 451 ulong lr;
452#ifdef CONFIG_PPC_BOOK3S
452 ulong tar; 453 ulong tar;
454#endif
453 455
454 ulong xer; 456 ulong xer;
455 u32 cr; 457 u32 cr;
@@ -475,6 +477,7 @@ struct kvm_vcpu_arch {
475 ulong ppr; 477 ulong ppr;
476 ulong pspb; 478 ulong pspb;
477 ulong fscr; 479 ulong fscr;
480 ulong shadow_fscr;
478 ulong ebbhr; 481 ulong ebbhr;
479 ulong ebbrr; 482 ulong ebbrr;
480 ulong bescr; 483 ulong bescr;
@@ -562,6 +565,7 @@ struct kvm_vcpu_arch {
562#ifdef CONFIG_PPC_BOOK3S 565#ifdef CONFIG_PPC_BOOK3S
563 ulong fault_dar; 566 ulong fault_dar;
564 u32 fault_dsisr; 567 u32 fault_dsisr;
568 unsigned long intr_msr;
565#endif 569#endif
566 570
567#ifdef CONFIG_BOOKE 571#ifdef CONFIG_BOOKE
@@ -622,8 +626,12 @@ struct kvm_vcpu_arch {
622 wait_queue_head_t cpu_run; 626 wait_queue_head_t cpu_run;
623 627
624 struct kvm_vcpu_arch_shared *shared; 628 struct kvm_vcpu_arch_shared *shared;
629#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
630 bool shared_big_endian;
631#endif
625 unsigned long magic_page_pa; /* phys addr to map the magic page to */ 632 unsigned long magic_page_pa; /* phys addr to map the magic page to */
626 unsigned long magic_page_ea; /* effect. addr to map the magic page to */ 633 unsigned long magic_page_ea; /* effect. addr to map the magic page to */
634 bool disable_kernel_nx;
627 635
628 int irq_type; /* one of KVM_IRQ_* */ 636 int irq_type; /* one of KVM_IRQ_* */
629 int irq_cpu_id; 637 int irq_cpu_id;
@@ -654,7 +662,6 @@ struct kvm_vcpu_arch {
654 spinlock_t tbacct_lock; 662 spinlock_t tbacct_lock;
655 u64 busy_stolen; 663 u64 busy_stolen;
656 u64 busy_preempt; 664 u64 busy_preempt;
657 unsigned long intr_msr;
658#endif 665#endif
659}; 666};
660 667
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 4096f16502a9..4a7cc453be0b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -449,6 +449,84 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
449} 449}
450 450
451/* 451/*
452 * Shared struct helpers. The shared struct can be little or big endian,
453 * depending on the guest endianness. So expose helpers to all of them.
454 */
455static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
456{
457#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
458 /* Only Book3S_64 PR supports bi-endian for now */
459 return vcpu->arch.shared_big_endian;
460#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
461 /* Book3s_64 HV on little endian is always little endian */
462 return false;
463#else
464 return true;
465#endif
466}
467
468#define SHARED_WRAPPER_GET(reg, size) \
469static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
470{ \
471 if (kvmppc_shared_big_endian(vcpu)) \
472 return be##size##_to_cpu(vcpu->arch.shared->reg); \
473 else \
474 return le##size##_to_cpu(vcpu->arch.shared->reg); \
475} \
476
477#define SHARED_WRAPPER_SET(reg, size) \
478static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
479{ \
480 if (kvmppc_shared_big_endian(vcpu)) \
481 vcpu->arch.shared->reg = cpu_to_be##size(val); \
482 else \
483 vcpu->arch.shared->reg = cpu_to_le##size(val); \
484} \
485
486#define SHARED_WRAPPER(reg, size) \
487 SHARED_WRAPPER_GET(reg, size) \
488 SHARED_WRAPPER_SET(reg, size) \
489
490SHARED_WRAPPER(critical, 64)
491SHARED_WRAPPER(sprg0, 64)
492SHARED_WRAPPER(sprg1, 64)
493SHARED_WRAPPER(sprg2, 64)
494SHARED_WRAPPER(sprg3, 64)
495SHARED_WRAPPER(srr0, 64)
496SHARED_WRAPPER(srr1, 64)
497SHARED_WRAPPER(dar, 64)
498SHARED_WRAPPER_GET(msr, 64)
499static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
500{
501 if (kvmppc_shared_big_endian(vcpu))
502 vcpu->arch.shared->msr = cpu_to_be64(val);
503 else
504 vcpu->arch.shared->msr = cpu_to_le64(val);
505}
506SHARED_WRAPPER(dsisr, 32)
507SHARED_WRAPPER(int_pending, 32)
508SHARED_WRAPPER(sprg4, 64)
509SHARED_WRAPPER(sprg5, 64)
510SHARED_WRAPPER(sprg6, 64)
511SHARED_WRAPPER(sprg7, 64)
512
513static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
514{
515 if (kvmppc_shared_big_endian(vcpu))
516 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
517 else
518 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
519}
520
521static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
522{
523 if (kvmppc_shared_big_endian(vcpu))
524 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
525 else
526 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
527}
528
529/*
452 * Please call after prepare_to_enter. This function puts the lazy ee and irq 530 * Please call after prepare_to_enter. This function puts the lazy ee and irq
453 * disabled tracking state back to normal mode, without actually enabling 531 * disabled tracking state back to normal mode, without actually enabling
454 * interrupts. 532 * interrupts.
@@ -485,7 +563,7 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
485 msr_64bit = MSR_SF; 563 msr_64bit = MSR_SF;
486#endif 564#endif
487 565
488 if (!(vcpu->arch.shared->msr & msr_64bit)) 566 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
489 ea = (uint32_t)ea; 567 ea = (uint32_t)ea;
490 568
491 return ea; 569 return ea;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e5d2e0bc7e03..4852bcf270f3 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -670,18 +670,20 @@
670#define MMCR0_PROBLEM_DISABLE MMCR0_FCP 670#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
671#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ 671#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
672#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ 672#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
673#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ 673#define MMCR0_PMXE ASM_CONST(0x04000000) /* perf mon exception enable */
674#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ 674#define MMCR0_FCECE ASM_CONST(0x02000000) /* freeze ctrs on enabled cond or event */
675#define MMCR0_TBEE 0x00400000UL /* time base exception enable */ 675#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
676#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */ 676#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */
677#define MMCR0_EBE 0x00100000UL /* Event based branch enable */ 677#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
678#define MMCR0_PMCC 0x000c0000UL /* PMC control */ 678#define MMCR0_PMCC 0x000c0000UL /* PMC control */
679#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ 679#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
680#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ 680#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
681#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ 681#define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/
682#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ 682#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
683#define MMCR0_PMAO_SYNC 0x00000800UL /* PMU interrupt is synchronous */ 683#define MMCR0_PMAO_SYNC ASM_CONST(0x00000800) /* PMU intr is synchronous */
684#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ 684#define MMCR0_C56RUN ASM_CONST(0x00000100) /* PMC5/6 count when RUN=0 */
685/* performance monitor alert has occurred, set to 0 after handling exception */
686#define MMCR0_PMAO ASM_CONST(0x00000080)
685#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ 687#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
686#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */ 688#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
687#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */ 689#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 163c3b05a76e..464f1089b532 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -583,6 +583,7 @@
583 583
584/* Bit definitions for L1CSR0. */ 584/* Bit definitions for L1CSR0. */
585#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ 585#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
586#define L1CSR0_CUL 0x00000400 /* Data Cache Unable to Lock */
586#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ 587#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */
587#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ 588#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
588#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ 589#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index a6665be4f3ab..2bc4a9409a93 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -545,7 +545,6 @@ struct kvm_get_htab_header {
545#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) 545#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
546#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) 546#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
547#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) 547#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
548#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
549 548
550#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) 549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
551#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) 550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
@@ -555,6 +554,7 @@ struct kvm_get_htab_header {
555#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) 554#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
556 555
557#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) 556#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
557#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
558 558
559/* Transactional Memory checkpointed state: 559/* Transactional Memory checkpointed state:
560 * This is all GPRs, all VSX regs and a subset of SPRs 560 * This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index e3af3286a068..91e42f09b323 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -82,10 +82,16 @@ struct kvm_vcpu_arch_shared {
82 82
83#define KVM_FEATURE_MAGIC_PAGE 1 83#define KVM_FEATURE_MAGIC_PAGE 1
84 84
85/* Magic page flags from host to guest */
86
85#define KVM_MAGIC_FEAT_SR (1 << 0) 87#define KVM_MAGIC_FEAT_SR (1 << 0)
86 88
87/* MASn, ESR, PIR, and high SPRGs */ 89/* MASn, ESR, PIR, and high SPRGs */
88#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1) 90#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
89 91
92/* Magic page flags from guest to host */
93
94#define MAGIC_PAGE_FLAG_NOT_MAPPED_NX (1 << 0)
95
90 96
91#endif /* _UAPI__POWERPC_KVM_PARA_H__ */ 97#endif /* _UAPI__POWERPC_KVM_PARA_H__ */