aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2012-10-10 18:03:54 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-10-10 18:03:54 -0400
commit03604b31142058362db13e7881385806977893f5 (patch)
tree8b3c7934fbfcbea5cce3e1fad82f6fd23aea1778
parent87cac8f879a5ecd7109dbe688087e8810b3364eb (diff)
parent12ecd9570d8941c15602a11725ec9b0ede48d6c2 (diff)
Merge branch 'for-upstream' of http://github.com/agraf/linux-2.6 into queue
* 'for-upstream' of http://github.com/agraf/linux-2.6: (56 commits) arch/powerpc/kvm/e500_tlb.c: fix error return code KVM: PPC: Book3S HV: Provide a way for userspace to get/set per-vCPU areas KVM: PPC: Book3S: Get/set guest FP regs using the GET/SET_ONE_REG interface KVM: PPC: Book3S: Get/set guest SPRs using the GET/SET_ONE_REG interface KVM: PPC: set IN_GUEST_MODE before checking requests KVM: PPC: e500: MMU API: fix leak of shared_tlb_pages KVM: PPC: e500: fix allocation size error on g2h_tlb1_map KVM: PPC: Book3S HV: Fix calculation of guest phys address for MMIO emulation KVM: PPC: Book3S HV: Remove bogus update of physical thread IDs KVM: PPC: Book3S HV: Fix updates of vcpu->cpu KVM: Move some PPC ioctl definitions to the correct place KVM: PPC: Book3S HV: Handle memory slot deletion and modification correctly KVM: PPC: Move kvm->arch.slot_phys into memslot.arch KVM: PPC: Book3S HV: Take the SRCU read lock before looking up memslots KVM: PPC: bookehv: Allow duplicate calls of DO_KVM macro KVM: PPC: BookE: Support FPU on non-hv systems KVM: PPC: 440: Implement mfdcrx KVM: PPC: 440: Implement mtdcrx Document IACx/DACx registers access using ONE_REG API KVM: PPC: E500: Remove E500_TLB_DIRTY flag ...
-rw-r--r--Documentation/virtual/kvm/api.txt49
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h36
-rw-r--r--arch/powerpc/include/asm/fsl_hcalls.h36
-rw-r--r--arch/powerpc/include/asm/kvm.h59
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h38
-rw-r--r--arch/powerpc/include/asm/kvm_para.h21
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h64
-rw-r--r--arch/powerpc/include/asm/reg_booke.h7
-rw-r--r--arch/powerpc/kernel/epapr_hcalls.S28
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c11
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kvm/44x.c1
-rw-r--r--arch/powerpc/kvm/44x_emulate.c110
-rw-r--r--arch/powerpc/kvm/Kconfig3
-rw-r--r--arch/powerpc/kvm/book3s.c125
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c65
-rw-r--r--arch/powerpc/kvm/book3s_hv.c337
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c4
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c5
-rw-r--r--arch/powerpc/kvm/book3s_pr.c180
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S15
-rw-r--r--arch/powerpc/kvm/booke.c310
-rw-r--r--arch/powerpc/kvm/booke_emulate.c22
-rw-r--r--arch/powerpc/kvm/e500.h3
-rw-r--r--arch/powerpc/kvm/e500_tlb.c92
-rw-r--r--arch/powerpc/kvm/powerpc.c150
-rw-r--r--arch/powerpc/kvm/trace.h200
-rw-r--r--arch/powerpc/platforms/Kconfig1
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c9
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c2
-rw-r--r--drivers/tty/Kconfig1
-rw-r--r--drivers/virt/Kconfig1
-rw-r--r--include/linux/kvm.h10
-rw-r--r--include/linux/kvm_host.h1
41 files changed, 1635 insertions, 381 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index a46a416810fb..4258180b1ecd 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1194,12 +1194,15 @@ struct kvm_ppc_pvinfo {
1194This ioctl fetches PV specific information that need to be passed to the guest 1194This ioctl fetches PV specific information that need to be passed to the guest
1195using the device tree or other means from vm context. 1195using the device tree or other means from vm context.
1196 1196
1197For now the only implemented piece of information distributed here is an array 1197The hcall array defines 4 instructions that make up a hypercall.
1198of 4 instructions that make up a hypercall.
1199 1198
1200If any additional field gets added to this structure later on, a bit for that 1199If any additional field gets added to this structure later on, a bit for that
1201additional piece of information will be set in the flags bitmap. 1200additional piece of information will be set in the flags bitmap.
1202 1201
1202The flags bitmap is defined as:
1203
1204 /* the host supports the ePAPR idle hcall
1205 #define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
1203 1206
12044.48 KVM_ASSIGN_PCI_DEVICE 12074.48 KVM_ASSIGN_PCI_DEVICE
1205 1208
@@ -1731,7 +1734,45 @@ registers, find a list below:
1731 Arch | Register | Width (bits) 1734 Arch | Register | Width (bits)
1732 | | 1735 | |
1733 PPC | KVM_REG_PPC_HIOR | 64 1736 PPC | KVM_REG_PPC_HIOR | 64
1734 1737 PPC | KVM_REG_PPC_IAC1 | 64
1738 PPC | KVM_REG_PPC_IAC2 | 64
1739 PPC | KVM_REG_PPC_IAC3 | 64
1740 PPC | KVM_REG_PPC_IAC4 | 64
1741 PPC | KVM_REG_PPC_DAC1 | 64
1742 PPC | KVM_REG_PPC_DAC2 | 64
1743 PPC | KVM_REG_PPC_DABR | 64
1744 PPC | KVM_REG_PPC_DSCR | 64
1745 PPC | KVM_REG_PPC_PURR | 64
1746 PPC | KVM_REG_PPC_SPURR | 64
1747 PPC | KVM_REG_PPC_DAR | 64
1748 PPC | KVM_REG_PPC_DSISR | 32
1749 PPC | KVM_REG_PPC_AMR | 64
1750 PPC | KVM_REG_PPC_UAMOR | 64
1751 PPC | KVM_REG_PPC_MMCR0 | 64
1752 PPC | KVM_REG_PPC_MMCR1 | 64
1753 PPC | KVM_REG_PPC_MMCRA | 64
1754 PPC | KVM_REG_PPC_PMC1 | 32
1755 PPC | KVM_REG_PPC_PMC2 | 32
1756 PPC | KVM_REG_PPC_PMC3 | 32
1757 PPC | KVM_REG_PPC_PMC4 | 32
1758 PPC | KVM_REG_PPC_PMC5 | 32
1759 PPC | KVM_REG_PPC_PMC6 | 32
1760 PPC | KVM_REG_PPC_PMC7 | 32
1761 PPC | KVM_REG_PPC_PMC8 | 32
1762 PPC | KVM_REG_PPC_FPR0 | 64
1763 ...
1764 PPC | KVM_REG_PPC_FPR31 | 64
1765 PPC | KVM_REG_PPC_VR0 | 128
1766 ...
1767 PPC | KVM_REG_PPC_VR31 | 128
1768 PPC | KVM_REG_PPC_VSR0 | 128
1769 ...
1770 PPC | KVM_REG_PPC_VSR31 | 128
1771 PPC | KVM_REG_PPC_FPSCR | 64
1772 PPC | KVM_REG_PPC_VSCR | 32
1773 PPC | KVM_REG_PPC_VPA_ADDR | 64
1774 PPC | KVM_REG_PPC_VPA_SLB | 128
1775 PPC | KVM_REG_PPC_VPA_DTL | 128
1735 1776
17364.69 KVM_GET_ONE_REG 17774.69 KVM_GET_ONE_REG
1737 1778
@@ -1747,7 +1788,7 @@ kvm_one_reg struct passed in. On success, the register value can be found
1747at the memory location pointed to by "addr". 1788at the memory location pointed to by "addr".
1748 1789
1749The list of registers accessible using this interface is identical to the 1790The list of registers accessible using this interface is identical to the
1750list in 4.64. 1791list in 4.68.
1751 1792
1752 1793
17534.70 KVM_KVMCLOCK_CTRL 17944.70 KVM_KVMCLOCK_CTRL
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 7e313f1ed183..13d6b7bf3b69 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -34,5 +34,6 @@ header-y += termios.h
34header-y += types.h 34header-y += types.h
35header-y += ucontext.h 35header-y += ucontext.h
36header-y += unistd.h 36header-y += unistd.h
37header-y += epapr_hcalls.h
37 38
38generic-y += rwsem.h 39generic-y += rwsem.h
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index bf2c06c33871..b8d94459a929 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -50,10 +50,6 @@
50#ifndef _EPAPR_HCALLS_H 50#ifndef _EPAPR_HCALLS_H
51#define _EPAPR_HCALLS_H 51#define _EPAPR_HCALLS_H
52 52
53#include <linux/types.h>
54#include <linux/errno.h>
55#include <asm/byteorder.h>
56
57#define EV_BYTE_CHANNEL_SEND 1 53#define EV_BYTE_CHANNEL_SEND 1
58#define EV_BYTE_CHANNEL_RECEIVE 2 54#define EV_BYTE_CHANNEL_RECEIVE 2
59#define EV_BYTE_CHANNEL_POLL 3 55#define EV_BYTE_CHANNEL_POLL 3
@@ -88,7 +84,8 @@
88#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num)) 84#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
89#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num) 85#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
90 86
91/* epapr error codes */ 87/* epapr return codes */
88#define EV_SUCCESS 0
92#define EV_EPERM 1 /* Operation not permitted */ 89#define EV_EPERM 1 /* Operation not permitted */
93#define EV_ENOENT 2 /* Entry Not Found */ 90#define EV_ENOENT 2 /* Entry Not Found */
94#define EV_EIO 3 /* I/O error occured */ 91#define EV_EIO 3 /* I/O error occured */
@@ -108,6 +105,11 @@
108#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */ 105#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
109#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */ 106#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
110 107
108#ifndef __ASSEMBLY__
109#include <linux/types.h>
110#include <linux/errno.h>
111#include <asm/byteorder.h>
112
111/* 113/*
112 * Hypercall register clobber list 114 * Hypercall register clobber list
113 * 115 *
@@ -193,7 +195,7 @@ static inline unsigned int ev_int_set_config(unsigned int interrupt,
193 r5 = priority; 195 r5 = priority;
194 r6 = destination; 196 r6 = destination;
195 197
196 __asm__ __volatile__ ("sc 1" 198 asm volatile("bl epapr_hypercall_start"
197 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6) 199 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
198 : : EV_HCALL_CLOBBERS4 200 : : EV_HCALL_CLOBBERS4
199 ); 201 );
@@ -222,7 +224,7 @@ static inline unsigned int ev_int_get_config(unsigned int interrupt,
222 r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG); 224 r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
223 r3 = interrupt; 225 r3 = interrupt;
224 226
225 __asm__ __volatile__ ("sc 1" 227 asm volatile("bl epapr_hypercall_start"
226 : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6) 228 : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
227 : : EV_HCALL_CLOBBERS4 229 : : EV_HCALL_CLOBBERS4
228 ); 230 );
@@ -252,7 +254,7 @@ static inline unsigned int ev_int_set_mask(unsigned int interrupt,
252 r3 = interrupt; 254 r3 = interrupt;
253 r4 = mask; 255 r4 = mask;
254 256
255 __asm__ __volatile__ ("sc 1" 257 asm volatile("bl epapr_hypercall_start"
256 : "+r" (r11), "+r" (r3), "+r" (r4) 258 : "+r" (r11), "+r" (r3), "+r" (r4)
257 : : EV_HCALL_CLOBBERS2 259 : : EV_HCALL_CLOBBERS2
258 ); 260 );
@@ -277,7 +279,7 @@ static inline unsigned int ev_int_get_mask(unsigned int interrupt,
277 r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK); 279 r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
278 r3 = interrupt; 280 r3 = interrupt;
279 281
280 __asm__ __volatile__ ("sc 1" 282 asm volatile("bl epapr_hypercall_start"
281 : "+r" (r11), "+r" (r3), "=r" (r4) 283 : "+r" (r11), "+r" (r3), "=r" (r4)
282 : : EV_HCALL_CLOBBERS2 284 : : EV_HCALL_CLOBBERS2
283 ); 285 );
@@ -305,7 +307,7 @@ static inline unsigned int ev_int_eoi(unsigned int interrupt)
305 r11 = EV_HCALL_TOKEN(EV_INT_EOI); 307 r11 = EV_HCALL_TOKEN(EV_INT_EOI);
306 r3 = interrupt; 308 r3 = interrupt;
307 309
308 __asm__ __volatile__ ("sc 1" 310 asm volatile("bl epapr_hypercall_start"
309 : "+r" (r11), "+r" (r3) 311 : "+r" (r11), "+r" (r3)
310 : : EV_HCALL_CLOBBERS1 312 : : EV_HCALL_CLOBBERS1
311 ); 313 );
@@ -344,7 +346,7 @@ static inline unsigned int ev_byte_channel_send(unsigned int handle,
344 r7 = be32_to_cpu(p[2]); 346 r7 = be32_to_cpu(p[2]);
345 r8 = be32_to_cpu(p[3]); 347 r8 = be32_to_cpu(p[3]);
346 348
347 __asm__ __volatile__ ("sc 1" 349 asm volatile("bl epapr_hypercall_start"
348 : "+r" (r11), "+r" (r3), 350 : "+r" (r11), "+r" (r3),
349 "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8) 351 "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
350 : : EV_HCALL_CLOBBERS6 352 : : EV_HCALL_CLOBBERS6
@@ -383,7 +385,7 @@ static inline unsigned int ev_byte_channel_receive(unsigned int handle,
383 r3 = handle; 385 r3 = handle;
384 r4 = *count; 386 r4 = *count;
385 387
386 __asm__ __volatile__ ("sc 1" 388 asm volatile("bl epapr_hypercall_start"
387 : "+r" (r11), "+r" (r3), "+r" (r4), 389 : "+r" (r11), "+r" (r3), "+r" (r4),
388 "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8) 390 "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
389 : : EV_HCALL_CLOBBERS6 391 : : EV_HCALL_CLOBBERS6
@@ -421,7 +423,7 @@ static inline unsigned int ev_byte_channel_poll(unsigned int handle,
421 r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL); 423 r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
422 r3 = handle; 424 r3 = handle;
423 425
424 __asm__ __volatile__ ("sc 1" 426 asm volatile("bl epapr_hypercall_start"
425 : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5) 427 : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
426 : : EV_HCALL_CLOBBERS3 428 : : EV_HCALL_CLOBBERS3
427 ); 429 );
@@ -454,7 +456,7 @@ static inline unsigned int ev_int_iack(unsigned int handle,
454 r11 = EV_HCALL_TOKEN(EV_INT_IACK); 456 r11 = EV_HCALL_TOKEN(EV_INT_IACK);
455 r3 = handle; 457 r3 = handle;
456 458
457 __asm__ __volatile__ ("sc 1" 459 asm volatile("bl epapr_hypercall_start"
458 : "+r" (r11), "+r" (r3), "=r" (r4) 460 : "+r" (r11), "+r" (r3), "=r" (r4)
459 : : EV_HCALL_CLOBBERS2 461 : : EV_HCALL_CLOBBERS2
460 ); 462 );
@@ -478,7 +480,7 @@ static inline unsigned int ev_doorbell_send(unsigned int handle)
478 r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND); 480 r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
479 r3 = handle; 481 r3 = handle;
480 482
481 __asm__ __volatile__ ("sc 1" 483 asm volatile("bl epapr_hypercall_start"
482 : "+r" (r11), "+r" (r3) 484 : "+r" (r11), "+r" (r3)
483 : : EV_HCALL_CLOBBERS1 485 : : EV_HCALL_CLOBBERS1
484 ); 486 );
@@ -498,12 +500,12 @@ static inline unsigned int ev_idle(void)
498 500
499 r11 = EV_HCALL_TOKEN(EV_IDLE); 501 r11 = EV_HCALL_TOKEN(EV_IDLE);
500 502
501 __asm__ __volatile__ ("sc 1" 503 asm volatile("bl epapr_hypercall_start"
502 : "+r" (r11), "=r" (r3) 504 : "+r" (r11), "=r" (r3)
503 : : EV_HCALL_CLOBBERS1 505 : : EV_HCALL_CLOBBERS1
504 ); 506 );
505 507
506 return r3; 508 return r3;
507} 509}
508 510#endif /* !__ASSEMBLY__ */
509#endif 511#endif
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
index 922d9b5fe3d5..3abb58394da4 100644
--- a/arch/powerpc/include/asm/fsl_hcalls.h
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -96,7 +96,7 @@ static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
96 r11 = FH_HCALL_TOKEN(FH_SEND_NMI); 96 r11 = FH_HCALL_TOKEN(FH_SEND_NMI);
97 r3 = vcpu_mask; 97 r3 = vcpu_mask;
98 98
99 __asm__ __volatile__ ("sc 1" 99 asm volatile("bl epapr_hypercall_start"
100 : "+r" (r11), "+r" (r3) 100 : "+r" (r11), "+r" (r3)
101 : : EV_HCALL_CLOBBERS1 101 : : EV_HCALL_CLOBBERS1
102 ); 102 );
@@ -151,7 +151,7 @@ static inline unsigned int fh_partition_get_dtprop(int handle,
151 r9 = (uint32_t)propvalue_addr; 151 r9 = (uint32_t)propvalue_addr;
152 r10 = *propvalue_len; 152 r10 = *propvalue_len;
153 153
154 __asm__ __volatile__ ("sc 1" 154 asm volatile("bl epapr_hypercall_start"
155 : "+r" (r11), 155 : "+r" (r11),
156 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), 156 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
157 "+r" (r8), "+r" (r9), "+r" (r10) 157 "+r" (r8), "+r" (r9), "+r" (r10)
@@ -205,7 +205,7 @@ static inline unsigned int fh_partition_set_dtprop(int handle,
205 r9 = (uint32_t)propvalue_addr; 205 r9 = (uint32_t)propvalue_addr;
206 r10 = propvalue_len; 206 r10 = propvalue_len;
207 207
208 __asm__ __volatile__ ("sc 1" 208 asm volatile("bl epapr_hypercall_start"
209 : "+r" (r11), 209 : "+r" (r11),
210 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), 210 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
211 "+r" (r8), "+r" (r9), "+r" (r10) 211 "+r" (r8), "+r" (r9), "+r" (r10)
@@ -229,7 +229,7 @@ static inline unsigned int fh_partition_restart(unsigned int partition)
229 r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART); 229 r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART);
230 r3 = partition; 230 r3 = partition;
231 231
232 __asm__ __volatile__ ("sc 1" 232 asm volatile("bl epapr_hypercall_start"
233 : "+r" (r11), "+r" (r3) 233 : "+r" (r11), "+r" (r3)
234 : : EV_HCALL_CLOBBERS1 234 : : EV_HCALL_CLOBBERS1
235 ); 235 );
@@ -262,7 +262,7 @@ static inline unsigned int fh_partition_get_status(unsigned int partition,
262 r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS); 262 r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS);
263 r3 = partition; 263 r3 = partition;
264 264
265 __asm__ __volatile__ ("sc 1" 265 asm volatile("bl epapr_hypercall_start"
266 : "+r" (r11), "+r" (r3), "=r" (r4) 266 : "+r" (r11), "+r" (r3), "=r" (r4)
267 : : EV_HCALL_CLOBBERS2 267 : : EV_HCALL_CLOBBERS2
268 ); 268 );
@@ -295,7 +295,7 @@ static inline unsigned int fh_partition_start(unsigned int partition,
295 r4 = entry_point; 295 r4 = entry_point;
296 r5 = load; 296 r5 = load;
297 297
298 __asm__ __volatile__ ("sc 1" 298 asm volatile("bl epapr_hypercall_start"
299 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5) 299 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5)
300 : : EV_HCALL_CLOBBERS3 300 : : EV_HCALL_CLOBBERS3
301 ); 301 );
@@ -317,7 +317,7 @@ static inline unsigned int fh_partition_stop(unsigned int partition)
317 r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP); 317 r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP);
318 r3 = partition; 318 r3 = partition;
319 319
320 __asm__ __volatile__ ("sc 1" 320 asm volatile("bl epapr_hypercall_start"
321 : "+r" (r11), "+r" (r3) 321 : "+r" (r11), "+r" (r3)
322 : : EV_HCALL_CLOBBERS1 322 : : EV_HCALL_CLOBBERS1
323 ); 323 );
@@ -376,7 +376,7 @@ static inline unsigned int fh_partition_memcpy(unsigned int source,
376#endif 376#endif
377 r7 = count; 377 r7 = count;
378 378
379 __asm__ __volatile__ ("sc 1" 379 asm volatile("bl epapr_hypercall_start"
380 : "+r" (r11), 380 : "+r" (r11),
381 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7) 381 "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7)
382 : : EV_HCALL_CLOBBERS5 382 : : EV_HCALL_CLOBBERS5
@@ -399,7 +399,7 @@ static inline unsigned int fh_dma_enable(unsigned int liodn)
399 r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE); 399 r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE);
400 r3 = liodn; 400 r3 = liodn;
401 401
402 __asm__ __volatile__ ("sc 1" 402 asm volatile("bl epapr_hypercall_start"
403 : "+r" (r11), "+r" (r3) 403 : "+r" (r11), "+r" (r3)
404 : : EV_HCALL_CLOBBERS1 404 : : EV_HCALL_CLOBBERS1
405 ); 405 );
@@ -421,7 +421,7 @@ static inline unsigned int fh_dma_disable(unsigned int liodn)
421 r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE); 421 r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE);
422 r3 = liodn; 422 r3 = liodn;
423 423
424 __asm__ __volatile__ ("sc 1" 424 asm volatile("bl epapr_hypercall_start"
425 : "+r" (r11), "+r" (r3) 425 : "+r" (r11), "+r" (r3)
426 : : EV_HCALL_CLOBBERS1 426 : : EV_HCALL_CLOBBERS1
427 ); 427 );
@@ -447,7 +447,7 @@ static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt,
447 r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR); 447 r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR);
448 r3 = interrupt; 448 r3 = interrupt;
449 449
450 __asm__ __volatile__ ("sc 1" 450 asm volatile("bl epapr_hypercall_start"
451 : "+r" (r11), "+r" (r3), "=r" (r4) 451 : "+r" (r11), "+r" (r3), "=r" (r4)
452 : : EV_HCALL_CLOBBERS2 452 : : EV_HCALL_CLOBBERS2
453 ); 453 );
@@ -469,7 +469,7 @@ static inline unsigned int fh_system_reset(void)
469 469
470 r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET); 470 r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET);
471 471
472 __asm__ __volatile__ ("sc 1" 472 asm volatile("bl epapr_hypercall_start"
473 : "+r" (r11), "=r" (r3) 473 : "+r" (r11), "=r" (r3)
474 : : EV_HCALL_CLOBBERS1 474 : : EV_HCALL_CLOBBERS1
475 ); 475 );
@@ -506,7 +506,7 @@ static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
506 r6 = addr_lo; 506 r6 = addr_lo;
507 r7 = peek; 507 r7 = peek;
508 508
509 __asm__ __volatile__ ("sc 1" 509 asm volatile("bl epapr_hypercall_start"
510 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), 510 : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6),
511 "+r" (r7) 511 "+r" (r7)
512 : : EV_HCALL_CLOBBERS5 512 : : EV_HCALL_CLOBBERS5
@@ -542,7 +542,7 @@ static inline unsigned int fh_get_core_state(unsigned int handle,
542 r3 = handle; 542 r3 = handle;
543 r4 = vcpu; 543 r4 = vcpu;
544 544
545 __asm__ __volatile__ ("sc 1" 545 asm volatile("bl epapr_hypercall_start"
546 : "+r" (r11), "+r" (r3), "+r" (r4) 546 : "+r" (r11), "+r" (r3), "+r" (r4)
547 : : EV_HCALL_CLOBBERS2 547 : : EV_HCALL_CLOBBERS2
548 ); 548 );
@@ -572,7 +572,7 @@ static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
572 r3 = handle; 572 r3 = handle;
573 r4 = vcpu; 573 r4 = vcpu;
574 574
575 __asm__ __volatile__ ("sc 1" 575 asm volatile("bl epapr_hypercall_start"
576 : "+r" (r11), "+r" (r3), "+r" (r4) 576 : "+r" (r11), "+r" (r3), "+r" (r4)
577 : : EV_HCALL_CLOBBERS2 577 : : EV_HCALL_CLOBBERS2
578 ); 578 );
@@ -597,7 +597,7 @@ static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
597 r3 = handle; 597 r3 = handle;
598 r4 = vcpu; 598 r4 = vcpu;
599 599
600 __asm__ __volatile__ ("sc 1" 600 asm volatile("bl epapr_hypercall_start"
601 : "+r" (r11), "+r" (r3), "+r" (r4) 601 : "+r" (r11), "+r" (r3), "+r" (r4)
602 : : EV_HCALL_CLOBBERS2 602 : : EV_HCALL_CLOBBERS2
603 ); 603 );
@@ -618,7 +618,7 @@ static inline unsigned int fh_claim_device(unsigned int handle)
618 r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE); 618 r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE);
619 r3 = handle; 619 r3 = handle;
620 620
621 __asm__ __volatile__ ("sc 1" 621 asm volatile("bl epapr_hypercall_start"
622 : "+r" (r11), "+r" (r3) 622 : "+r" (r11), "+r" (r3)
623 : : EV_HCALL_CLOBBERS1 623 : : EV_HCALL_CLOBBERS1
624 ); 624 );
@@ -645,7 +645,7 @@ static inline unsigned int fh_partition_stop_dma(unsigned int handle)
645 r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA); 645 r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA);
646 r3 = handle; 646 r3 = handle;
647 647
648 __asm__ __volatile__ ("sc 1" 648 asm volatile("bl epapr_hypercall_start"
649 : "+r" (r11), "+r" (r3) 649 : "+r" (r11), "+r" (r3)
650 : : EV_HCALL_CLOBBERS1 650 : : EV_HCALL_CLOBBERS1
651 ); 651 );
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 1bea4d8ea6f4..b89ae4db45ce 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -221,6 +221,12 @@ struct kvm_sregs {
221 221
222 __u32 dbsr; /* KVM_SREGS_E_UPDATE_DBSR */ 222 __u32 dbsr; /* KVM_SREGS_E_UPDATE_DBSR */
223 __u32 dbcr[3]; 223 __u32 dbcr[3];
224 /*
225 * iac/dac registers are 64bit wide, while this API
226 * interface provides only lower 32 bits on 64 bit
227 * processors. ONE_REG interface is added for 64bit
228 * iac/dac registers.
229 */
224 __u32 iac[4]; 230 __u32 iac[4];
225 __u32 dac[2]; 231 __u32 dac[2];
226 __u32 dvc[2]; 232 __u32 dvc[2];
@@ -326,5 +332,58 @@ struct kvm_book3e_206_tlb_params {
326}; 332};
327 333
328#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1) 334#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
335#define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
336#define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
337#define KVM_REG_PPC_IAC3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x4)
338#define KVM_REG_PPC_IAC4 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x5)
339#define KVM_REG_PPC_DAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x6)
340#define KVM_REG_PPC_DAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x7)
341#define KVM_REG_PPC_DABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8)
342#define KVM_REG_PPC_DSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9)
343#define KVM_REG_PPC_PURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa)
344#define KVM_REG_PPC_SPURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb)
345#define KVM_REG_PPC_DAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc)
346#define KVM_REG_PPC_DSISR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd)
347#define KVM_REG_PPC_AMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xe)
348#define KVM_REG_PPC_UAMOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xf)
349
350#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
351#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
352#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
353
354#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
355#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
356#define KVM_REG_PPC_PMC3 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1a)
357#define KVM_REG_PPC_PMC4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1b)
358#define KVM_REG_PPC_PMC5 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1c)
359#define KVM_REG_PPC_PMC6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1d)
360#define KVM_REG_PPC_PMC7 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1e)
361#define KVM_REG_PPC_PMC8 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1f)
362
363/* 32 floating-point registers */
364#define KVM_REG_PPC_FPR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x20)
365#define KVM_REG_PPC_FPR(n) (KVM_REG_PPC_FPR0 + (n))
366#define KVM_REG_PPC_FPR31 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3f)
367
368/* 32 VMX/Altivec vector registers */
369#define KVM_REG_PPC_VR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x40)
370#define KVM_REG_PPC_VR(n) (KVM_REG_PPC_VR0 + (n))
371#define KVM_REG_PPC_VR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x5f)
372
373/* 32 double-width FP registers for VSX */
374/* High-order halves overlap with FP regs */
375#define KVM_REG_PPC_VSR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x60)
376#define KVM_REG_PPC_VSR(n) (KVM_REG_PPC_VSR0 + (n))
377#define KVM_REG_PPC_VSR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x7f)
378
379/* FP and vector status/control registers */
380#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
381#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
382
383/* Virtual processor areas */
384/* For SLB & DTL, address in high (first) half, length in low half */
385#define KVM_REG_PPC_VPA_ADDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x82)
386#define KVM_REG_PPC_VPA_SLB (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83)
387#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
329 388
330#endif /* __LINUX_KVM_POWERPC_H */ 389#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f0e0c6a66d97..ab738005d2ea 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -160,7 +160,7 @@ extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
160extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 160extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
161 long pte_index, unsigned long pteh, unsigned long ptel); 161 long pte_index, unsigned long pteh, unsigned long ptel);
162extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, 162extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
163 struct kvm_memory_slot *memslot); 163 struct kvm_memory_slot *memslot, unsigned long *map);
164 164
165extern void kvmppc_entry_trampoline(void); 165extern void kvmppc_entry_trampoline(void);
166extern void kvmppc_hv_entry_trampoline(void); 166extern void kvmppc_hv_entry_trampoline(void);
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
index 30a600fa1b6a..a37a12a9a7d7 100644
--- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -38,9 +38,9 @@
38#ifdef CONFIG_KVM_BOOKE_HV 38#ifdef CONFIG_KVM_BOOKE_HV
39BEGIN_FTR_SECTION 39BEGIN_FTR_SECTION
40 mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */ 40 mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
41 bf 3, kvmppc_resume_\intno\()_\srr1 41 bf 3, 1975f
42 b kvmppc_handler_\intno\()_\srr1 42 b kvmppc_handler_\intno\()_\srr1
43kvmppc_resume_\intno\()_\srr1: 431975:
44END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) 44END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
45#endif 45#endif
46.endm 46.endm
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 28e8f5e5c63e..68f5a308737a 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -46,7 +46,7 @@
46#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 46#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
47#endif 47#endif
48 48
49#ifdef CONFIG_KVM_BOOK3S_64_HV 49#if !defined(CONFIG_KVM_440)
50#include <linux/mmu_notifier.h> 50#include <linux/mmu_notifier.h>
51 51
52#define KVM_ARCH_WANT_MMU_NOTIFIER 52#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -204,7 +204,7 @@ struct revmap_entry {
204}; 204};
205 205
206/* 206/*
207 * We use the top bit of each memslot->rmap entry as a lock bit, 207 * We use the top bit of each memslot->arch.rmap entry as a lock bit,
208 * and bit 32 as a present flag. The bottom 32 bits are the 208 * and bit 32 as a present flag. The bottom 32 bits are the
209 * index in the guest HPT of a HPTE that points to the page. 209 * index in the guest HPT of a HPTE that points to the page.
210 */ 210 */
@@ -215,14 +215,17 @@ struct revmap_entry {
215#define KVMPPC_RMAP_PRESENT 0x100000000ul 215#define KVMPPC_RMAP_PRESENT 0x100000000ul
216#define KVMPPC_RMAP_INDEX 0xfffffffful 216#define KVMPPC_RMAP_INDEX 0xfffffffful
217 217
218/* Low-order bits in kvm->arch.slot_phys[][] */ 218/* Low-order bits in memslot->arch.slot_phys[] */
219#define KVMPPC_PAGE_ORDER_MASK 0x1f 219#define KVMPPC_PAGE_ORDER_MASK 0x1f
220#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */ 220#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
221#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */ 221#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
222#define KVMPPC_GOT_PAGE 0x80 222#define KVMPPC_GOT_PAGE 0x80
223 223
224struct kvm_arch_memory_slot { 224struct kvm_arch_memory_slot {
225#ifdef CONFIG_KVM_BOOK3S_64_HV
225 unsigned long *rmap; 226 unsigned long *rmap;
227 unsigned long *slot_phys;
228#endif /* CONFIG_KVM_BOOK3S_64_HV */
226}; 229};
227 230
228struct kvm_arch { 231struct kvm_arch {
@@ -246,8 +249,6 @@ struct kvm_arch {
246 unsigned long hpt_npte; 249 unsigned long hpt_npte;
247 unsigned long hpt_mask; 250 unsigned long hpt_mask;
248 spinlock_t slot_phys_lock; 251 spinlock_t slot_phys_lock;
249 unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
250 int slot_npages[KVM_MEM_SLOTS_NUM];
251 unsigned short last_vcpu[NR_CPUS]; 252 unsigned short last_vcpu[NR_CPUS];
252 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 253 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
253 struct kvmppc_linear_info *hpt_li; 254 struct kvmppc_linear_info *hpt_li;
@@ -346,6 +347,27 @@ struct kvmppc_slb {
346 bool class : 1; 347 bool class : 1;
347}; 348};
348 349
350# ifdef CONFIG_PPC_FSL_BOOK3E
351#define KVMPPC_BOOKE_IAC_NUM 2
352#define KVMPPC_BOOKE_DAC_NUM 2
353# else
354#define KVMPPC_BOOKE_IAC_NUM 4
355#define KVMPPC_BOOKE_DAC_NUM 2
356# endif
357#define KVMPPC_BOOKE_MAX_IAC 4
358#define KVMPPC_BOOKE_MAX_DAC 2
359
360struct kvmppc_booke_debug_reg {
361 u32 dbcr0;
362 u32 dbcr1;
363 u32 dbcr2;
364#ifdef CONFIG_KVM_E500MC
365 u32 dbcr4;
366#endif
367 u64 iac[KVMPPC_BOOKE_MAX_IAC];
368 u64 dac[KVMPPC_BOOKE_MAX_DAC];
369};
370
349struct kvm_vcpu_arch { 371struct kvm_vcpu_arch {
350 ulong host_stack; 372 ulong host_stack;
351 u32 host_pid; 373 u32 host_pid;
@@ -440,8 +462,6 @@ struct kvm_vcpu_arch {
440 462
441 u32 ccr0; 463 u32 ccr0;
442 u32 ccr1; 464 u32 ccr1;
443 u32 dbcr0;
444 u32 dbcr1;
445 u32 dbsr; 465 u32 dbsr;
446 466
447 u64 mmcr[3]; 467 u64 mmcr[3];
@@ -471,9 +491,12 @@ struct kvm_vcpu_arch {
471 ulong fault_esr; 491 ulong fault_esr;
472 ulong queued_dear; 492 ulong queued_dear;
473 ulong queued_esr; 493 ulong queued_esr;
494 spinlock_t wdt_lock;
495 struct timer_list wdt_timer;
474 u32 tlbcfg[4]; 496 u32 tlbcfg[4];
475 u32 mmucfg; 497 u32 mmucfg;
476 u32 epr; 498 u32 epr;
499 struct kvmppc_booke_debug_reg dbg_reg;
477#endif 500#endif
478 gpa_t paddr_accessed; 501 gpa_t paddr_accessed;
479 gva_t vaddr_accessed; 502 gva_t vaddr_accessed;
@@ -486,6 +509,7 @@ struct kvm_vcpu_arch {
486 u8 osi_needed; 509 u8 osi_needed;
487 u8 osi_enabled; 510 u8 osi_enabled;
488 u8 papr_enabled; 511 u8 papr_enabled;
512 u8 watchdog_enabled;
489 u8 sane; 513 u8 sane;
490 u8 cpu_type; 514 u8 cpu_type;
491 u8 hcall_needed; 515 u8 hcall_needed;
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index c18916bff689..a168ce37d85c 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -75,9 +75,10 @@ struct kvm_vcpu_arch_shared {
75}; 75};
76 76
77#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */ 77#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
78#define HC_VENDOR_KVM (42 << 16) 78
79#define HC_EV_SUCCESS 0 79#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
80#define HC_EV_UNIMPLEMENTED 12 80
81#include <asm/epapr_hcalls.h>
81 82
82#define KVM_FEATURE_MAGIC_PAGE 1 83#define KVM_FEATURE_MAGIC_PAGE 1
83 84
@@ -121,7 +122,7 @@ static unsigned long kvm_hypercall(unsigned long *in,
121 unsigned long *out, 122 unsigned long *out,
122 unsigned long nr) 123 unsigned long nr)
123{ 124{
124 return HC_EV_UNIMPLEMENTED; 125 return EV_UNIMPLEMENTED;
125} 126}
126 127
127#endif 128#endif
@@ -132,7 +133,7 @@ static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
132 unsigned long out[8]; 133 unsigned long out[8];
133 unsigned long r; 134 unsigned long r;
134 135
135 r = kvm_hypercall(in, out, nr | HC_VENDOR_KVM); 136 r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
136 *r2 = out[0]; 137 *r2 = out[0];
137 138
138 return r; 139 return r;
@@ -143,7 +144,7 @@ static inline long kvm_hypercall0(unsigned int nr)
143 unsigned long in[8]; 144 unsigned long in[8];
144 unsigned long out[8]; 145 unsigned long out[8];
145 146
146 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); 147 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
147} 148}
148 149
149static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) 150static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
@@ -152,7 +153,7 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
152 unsigned long out[8]; 153 unsigned long out[8];
153 154
154 in[0] = p1; 155 in[0] = p1;
155 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); 156 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
156} 157}
157 158
158static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, 159static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
@@ -163,7 +164,7 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
163 164
164 in[0] = p1; 165 in[0] = p1;
165 in[1] = p2; 166 in[1] = p2;
166 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); 167 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
167} 168}
168 169
169static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, 170static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
@@ -175,7 +176,7 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
175 in[0] = p1; 176 in[0] = p1;
176 in[1] = p2; 177 in[1] = p2;
177 in[2] = p3; 178 in[2] = p3;
178 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); 179 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
179} 180}
180 181
181static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, 182static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
@@ -189,7 +190,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
189 in[1] = p2; 190 in[1] = p2;
190 in[2] = p3; 191 in[2] = p3;
191 in[3] = p4; 192 in[3] = p4;
192 return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); 193 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
193} 194}
194 195
195 196
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e006f0bdea95..609cca3e9426 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/kvm_types.h> 29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h> 30#include <linux/kvm_host.h>
31#include <linux/bug.h>
31#ifdef CONFIG_PPC_BOOK3S 32#ifdef CONFIG_PPC_BOOK3S
32#include <asm/kvm_book3s.h> 33#include <asm/kvm_book3s.h>
33#else 34#else
@@ -68,6 +69,8 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
68extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 69extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
69extern void kvmppc_decrementer_func(unsigned long data); 70extern void kvmppc_decrementer_func(unsigned long data);
70extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); 71extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
72extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
73extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
71 74
72/* Core-specific hooks */ 75/* Core-specific hooks */
73 76
@@ -104,6 +107,7 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
104 struct kvm_interrupt *irq); 107 struct kvm_interrupt *irq);
105extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, 108extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
106 struct kvm_interrupt *irq); 109 struct kvm_interrupt *irq);
110extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
107 111
108extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 112extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
109 unsigned int op, int *advance); 113 unsigned int op, int *advance);
@@ -111,6 +115,7 @@ extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
111 ulong val); 115 ulong val);
112extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, 116extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
113 ulong *val); 117 ulong *val);
118extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
114 119
115extern int kvmppc_booke_init(void); 120extern int kvmppc_booke_init(void);
116extern void kvmppc_booke_exit(void); 121extern void kvmppc_booke_exit(void);
@@ -139,16 +144,26 @@ extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
139extern void kvm_release_hpt(struct kvmppc_linear_info *li); 144extern void kvm_release_hpt(struct kvmppc_linear_info *li);
140extern int kvmppc_core_init_vm(struct kvm *kvm); 145extern int kvmppc_core_init_vm(struct kvm *kvm);
141extern void kvmppc_core_destroy_vm(struct kvm *kvm); 146extern void kvmppc_core_destroy_vm(struct kvm *kvm);
147extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
148 struct kvm_memory_slot *dont);
149extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
150 unsigned long npages);
142extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 151extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
152 struct kvm_memory_slot *memslot,
143 struct kvm_userspace_memory_region *mem); 153 struct kvm_userspace_memory_region *mem);
144extern void kvmppc_core_commit_memory_region(struct kvm *kvm, 154extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
145 struct kvm_userspace_memory_region *mem); 155 struct kvm_userspace_memory_region *mem,
156 struct kvm_memory_slot old);
146extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, 157extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
147 struct kvm_ppc_smmu_info *info); 158 struct kvm_ppc_smmu_info *info);
159extern void kvmppc_core_flush_memslot(struct kvm *kvm,
160 struct kvm_memory_slot *memslot);
148 161
149extern int kvmppc_bookehv_init(void); 162extern int kvmppc_bookehv_init(void);
150extern void kvmppc_bookehv_exit(void); 163extern void kvmppc_bookehv_exit(void);
151 164
165extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
166
152/* 167/*
153 * Cuts out inst bits with ordering according to spec. 168 * Cuts out inst bits with ordering according to spec.
154 * That means the leftmost bit is zero. All given bits are included. 169 * That means the leftmost bit is zero. All given bits are included.
@@ -182,6 +197,41 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
182 return r; 197 return r;
183} 198}
184 199
200union kvmppc_one_reg {
201 u32 wval;
202 u64 dval;
203 vector128 vval;
204 u64 vsxval[2];
205 struct {
206 u64 addr;
207 u64 length;
208 } vpaval;
209};
210
211#define one_reg_size(id) \
212 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
213
214#define get_reg_val(id, reg) ({ \
215 union kvmppc_one_reg __u; \
216 switch (one_reg_size(id)) { \
217 case 4: __u.wval = (reg); break; \
218 case 8: __u.dval = (reg); break; \
219 default: BUG(); \
220 } \
221 __u; \
222})
223
224
225#define set_reg_val(id, val) ({ \
226 u64 __v; \
227 switch (one_reg_size(id)) { \
228 case 4: __v = (val).wval; break; \
229 case 8: __v = (val).dval; break; \
230 default: BUG(); \
231 } \
232 __v; \
233})
234
185void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 235void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
186int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 236int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
187 237
@@ -190,6 +240,8 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
190 240
191int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); 241int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
192int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); 242int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
243int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
244int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
193 245
194void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); 246void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
195 247
@@ -230,5 +282,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
230 } 282 }
231} 283}
232 284
285/* Please call after prepare_to_enter. This function puts the lazy ee state
286 back to normal mode, without actually enabling interrupts. */
287static inline void kvmppc_lazy_ee_enable(void)
288{
289#ifdef CONFIG_PPC64
290 /* Only need to enable IRQs by hard enabling them after this */
291 local_paca->irq_happened = 0;
292 local_paca->soft_enabled = 1;
293#endif
294}
233 295
234#endif /* __POWERPC_KVM_PPC_H__ */ 296#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2d916c4982c5..e07e6af5e1ff 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -539,6 +539,13 @@
539#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */ 539#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
540#define TCR_ARE 0x00400000 /* Auto Reload Enable */ 540#define TCR_ARE 0x00400000 /* Auto Reload Enable */
541 541
542#ifdef CONFIG_E500
543#define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \
544 (((tcr) & 0x1E0000) >> 15))
545#else
546#define TCR_GET_WP(tcr) (((tcr) & 0xC0000000) >> 30)
547#endif
548
542/* Bit definitions for the TSR. */ 549/* Bit definitions for the TSR. */
543#define TSR_ENW 0x80000000 /* Enable Next Watchdog */ 550#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
544#define TSR_WIS 0x40000000 /* WDT Interrupt Status */ 551#define TSR_WIS 0x40000000 /* WDT Interrupt Status */
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
index 697b390ebfd8..62c0dc237826 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -8,13 +8,41 @@
8 */ 8 */
9 9
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <asm/epapr_hcalls.h>
11#include <asm/reg.h> 12#include <asm/reg.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include <asm/cputable.h> 14#include <asm/cputable.h>
14#include <asm/thread_info.h> 15#include <asm/thread_info.h>
15#include <asm/ppc_asm.h> 16#include <asm/ppc_asm.h>
17#include <asm/asm-compat.h>
16#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
17 19
20/* epapr_ev_idle() was derived from e500_idle() */
21_GLOBAL(epapr_ev_idle)
22 CURRENT_THREAD_INFO(r3, r1)
23 PPC_LL r4, TI_LOCAL_FLAGS(r3) /* set napping bit */
24 ori r4, r4,_TLF_NAPPING /* so when we take an exception */
25 PPC_STL r4, TI_LOCAL_FLAGS(r3) /* it will return to our caller */
26
27 wrteei 1
28
29idle_loop:
30 LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE))
31
32.global epapr_ev_idle_start
33epapr_ev_idle_start:
34 li r3, -1
35 nop
36 nop
37 nop
38
39 /*
40 * Guard against spurious wakeups from a hypervisor --
41 * only interrupt will cause us to return to LR due to
42 * _TLF_NAPPING.
43 */
44 b idle_loop
45
18/* Hypercall entry point. Will be patched with device tree instructions. */ 46/* Hypercall entry point. Will be patched with device tree instructions. */
19.global epapr_hypercall_start 47.global epapr_hypercall_start
20epapr_hypercall_start: 48epapr_hypercall_start:
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 028aeae370b6..f3eab8594d9f 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -21,6 +21,10 @@
21#include <asm/epapr_hcalls.h> 21#include <asm/epapr_hcalls.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/code-patching.h> 23#include <asm/code-patching.h>
24#include <asm/machdep.h>
25
26extern void epapr_ev_idle(void);
27extern u32 epapr_ev_idle_start[];
24 28
25bool epapr_paravirt_enabled; 29bool epapr_paravirt_enabled;
26 30
@@ -41,8 +45,13 @@ static int __init epapr_paravirt_init(void)
41 if (len % 4 || len > (4 * 4)) 45 if (len % 4 || len > (4 * 4))
42 return -ENODEV; 46 return -ENODEV;
43 47
44 for (i = 0; i < (len / 4); i++) 48 for (i = 0; i < (len / 4); i++) {
45 patch_instruction(epapr_hypercall_start + i, insts[i]); 49 patch_instruction(epapr_hypercall_start + i, insts[i]);
50 patch_instruction(epapr_ev_idle_start + i, insts[i]);
51 }
52
53 if (of_get_property(hyper_node, "has-idle", NULL))
54 ppc_md.power_save = epapr_ev_idle;
46 55
47 epapr_paravirt_enabled = true; 56 epapr_paravirt_enabled = true;
48 57
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 867db1de8949..a61b133c4f99 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -419,7 +419,7 @@ static void kvm_map_magic_page(void *data)
419 in[0] = KVM_MAGIC_PAGE; 419 in[0] = KVM_MAGIC_PAGE;
420 in[1] = KVM_MAGIC_PAGE; 420 in[1] = KVM_MAGIC_PAGE;
421 421
422 kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE); 422 kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
423 423
424 *features = out[0]; 424 *features = out[0];
425} 425}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 3e4031581c65..e597dde124e8 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -43,6 +43,7 @@
43#include <asm/dcr.h> 43#include <asm/dcr.h>
44#include <asm/ftrace.h> 44#include <asm/ftrace.h>
45#include <asm/switch_to.h> 45#include <asm/switch_to.h>
46#include <asm/epapr_hcalls.h>
46 47
47#ifdef CONFIG_PPC32 48#ifdef CONFIG_PPC32
48extern void transfer_to_handler(void); 49extern void transfer_to_handler(void);
@@ -192,3 +193,7 @@ EXPORT_SYMBOL(__arch_hweight64);
192#ifdef CONFIG_PPC_BOOK3S_64 193#ifdef CONFIG_PPC_BOOK3S_64
193EXPORT_SYMBOL_GPL(mmu_psize_defs); 194EXPORT_SYMBOL_GPL(mmu_psize_defs);
194#endif 195#endif
196
197#ifdef CONFIG_EPAPR_PARAVIRT
198EXPORT_SYMBOL(epapr_hypercall_start);
199#endif
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 50e7dbc7356c..3d7fd21c65f9 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -83,6 +83,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
83 vcpu_44x->shadow_refs[i].gtlb_index = -1; 83 vcpu_44x->shadow_refs[i].gtlb_index = -1;
84 84
85 vcpu->arch.cpu_type = KVM_CPU_440; 85 vcpu->arch.cpu_type = KVM_CPU_440;
86 vcpu->arch.pvr = mfspr(SPRN_PVR);
86 87
87 return 0; 88 return 0;
88} 89}
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index c8c61578fdfc..1a793c4c4a67 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -27,12 +27,68 @@
27#include "booke.h" 27#include "booke.h"
28#include "44x_tlb.h" 28#include "44x_tlb.h"
29 29
30#define XOP_MFDCRX 259
30#define XOP_MFDCR 323 31#define XOP_MFDCR 323
32#define XOP_MTDCRX 387
31#define XOP_MTDCR 451 33#define XOP_MTDCR 451
32#define XOP_TLBSX 914 34#define XOP_TLBSX 914
33#define XOP_ICCCI 966 35#define XOP_ICCCI 966
34#define XOP_TLBWE 978 36#define XOP_TLBWE 978
35 37
38static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn)
39{
40 /* emulate some access in kernel */
41 switch (dcrn) {
42 case DCRN_CPR0_CONFIG_ADDR:
43 vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
44 return EMULATE_DONE;
45 default:
46 vcpu->run->dcr.dcrn = dcrn;
47 vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs);
48 vcpu->run->dcr.is_write = 1;
49 vcpu->arch.dcr_needed = 1;
50 kvmppc_account_exit(vcpu, DCR_EXITS);
51 return EMULATE_DO_DCR;
52 }
53}
54
55static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
56{
57 /* The guest may access CPR0 registers to determine the timebase
58 * frequency, and it must know the real host frequency because it
59 * can directly access the timebase registers.
60 *
61 * It would be possible to emulate those accesses in userspace,
62 * but userspace can really only figure out the end frequency.
63 * We could decompose that into the factors that compute it, but
64 * that's tricky math, and it's easier to just report the real
65 * CPR0 values.
66 */
67 switch (dcrn) {
68 case DCRN_CPR0_CONFIG_ADDR:
69 kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
70 break;
71 case DCRN_CPR0_CONFIG_DATA:
72 local_irq_disable();
73 mtdcr(DCRN_CPR0_CONFIG_ADDR,
74 vcpu->arch.cpr0_cfgaddr);
75 kvmppc_set_gpr(vcpu, rt,
76 mfdcr(DCRN_CPR0_CONFIG_DATA));
77 local_irq_enable();
78 break;
79 default:
80 vcpu->run->dcr.dcrn = dcrn;
81 vcpu->run->dcr.data = 0;
82 vcpu->run->dcr.is_write = 0;
83 vcpu->arch.io_gpr = rt;
84 vcpu->arch.dcr_needed = 1;
85 kvmppc_account_exit(vcpu, DCR_EXITS);
86 return EMULATE_DO_DCR;
87 }
88
89 return EMULATE_DONE;
90}
91
36int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 92int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
37 unsigned int inst, int *advance) 93 unsigned int inst, int *advance)
38{ 94{
@@ -50,55 +106,21 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
50 switch (get_xop(inst)) { 106 switch (get_xop(inst)) {
51 107
52 case XOP_MFDCR: 108 case XOP_MFDCR:
53 /* The guest may access CPR0 registers to determine the timebase 109 emulated = emulate_mfdcr(vcpu, rt, dcrn);
54 * frequency, and it must know the real host frequency because it 110 break;
55 * can directly access the timebase registers.
56 *
57 * It would be possible to emulate those accesses in userspace,
58 * but userspace can really only figure out the end frequency.
59 * We could decompose that into the factors that compute it, but
60 * that's tricky math, and it's easier to just report the real
61 * CPR0 values.
62 */
63 switch (dcrn) {
64 case DCRN_CPR0_CONFIG_ADDR:
65 kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
66 break;
67 case DCRN_CPR0_CONFIG_DATA:
68 local_irq_disable();
69 mtdcr(DCRN_CPR0_CONFIG_ADDR,
70 vcpu->arch.cpr0_cfgaddr);
71 kvmppc_set_gpr(vcpu, rt,
72 mfdcr(DCRN_CPR0_CONFIG_DATA));
73 local_irq_enable();
74 break;
75 default:
76 run->dcr.dcrn = dcrn;
77 run->dcr.data = 0;
78 run->dcr.is_write = 0;
79 vcpu->arch.io_gpr = rt;
80 vcpu->arch.dcr_needed = 1;
81 kvmppc_account_exit(vcpu, DCR_EXITS);
82 emulated = EMULATE_DO_DCR;
83 }
84 111
112 case XOP_MFDCRX:
113 emulated = emulate_mfdcr(vcpu, rt,
114 kvmppc_get_gpr(vcpu, ra));
85 break; 115 break;
86 116
87 case XOP_MTDCR: 117 case XOP_MTDCR:
88 /* emulate some access in kernel */ 118 emulated = emulate_mtdcr(vcpu, rs, dcrn);
89 switch (dcrn) { 119 break;
90 case DCRN_CPR0_CONFIG_ADDR:
91 vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
92 break;
93 default:
94 run->dcr.dcrn = dcrn;
95 run->dcr.data = kvmppc_get_gpr(vcpu, rs);
96 run->dcr.is_write = 1;
97 vcpu->arch.dcr_needed = 1;
98 kvmppc_account_exit(vcpu, DCR_EXITS);
99 emulated = EMULATE_DO_DCR;
100 }
101 120
121 case XOP_MTDCRX:
122 emulated = emulate_mtdcr(vcpu, rs,
123 kvmppc_get_gpr(vcpu, ra));
102 break; 124 break;
103 125
104 case XOP_TLBWE: 126 case XOP_TLBWE:
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index f4dacb9c57fa..71f0cd9edf33 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM_BOOK3S_64_HANDLER
36config KVM_BOOK3S_PR 36config KVM_BOOK3S_PR
37 bool 37 bool
38 select KVM_MMIO 38 select KVM_MMIO
39 select MMU_NOTIFIER
39 40
40config KVM_BOOK3S_32 41config KVM_BOOK3S_32
41 tristate "KVM support for PowerPC book3s_32 processors" 42 tristate "KVM support for PowerPC book3s_32 processors"
@@ -123,6 +124,7 @@ config KVM_E500V2
123 depends on EXPERIMENTAL && E500 && !PPC_E500MC 124 depends on EXPERIMENTAL && E500 && !PPC_E500MC
124 select KVM 125 select KVM
125 select KVM_MMIO 126 select KVM_MMIO
127 select MMU_NOTIFIER
126 ---help--- 128 ---help---
127 Support running unmodified E500 guest kernels in virtual machines on 129 Support running unmodified E500 guest kernels in virtual machines on
128 E500v2 host processors. 130 E500v2 host processors.
@@ -138,6 +140,7 @@ config KVM_E500MC
138 select KVM 140 select KVM
139 select KVM_MMIO 141 select KVM_MMIO
140 select KVM_BOOKE_HV 142 select KVM_BOOKE_HV
143 select MMU_NOTIFIER
141 ---help--- 144 ---help---
142 Support running unmodified E500MC/E5500 (32-bit) guest kernels in 145 Support running unmodified E500MC/E5500 (32-bit) guest kernels in
143 virtual machines on E500MC/E5500 host processors. 146 virtual machines on E500MC/E5500 host processors.
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3f2a8360c857..a4b645285240 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -411,6 +411,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
411 return 0; 411 return 0;
412} 412}
413 413
414int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
415{
416 return 0;
417}
418
419void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
420{
421}
422
414int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 423int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
415{ 424{
416 int i; 425 int i;
@@ -476,6 +485,122 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
476 return -ENOTSUPP; 485 return -ENOTSUPP;
477} 486}
478 487
488int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
489{
490 int r;
491 union kvmppc_one_reg val;
492 int size;
493 long int i;
494
495 size = one_reg_size(reg->id);
496 if (size > sizeof(val))
497 return -EINVAL;
498
499 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
500
501 if (r == -EINVAL) {
502 r = 0;
503 switch (reg->id) {
504 case KVM_REG_PPC_DAR:
505 val = get_reg_val(reg->id, vcpu->arch.shared->dar);
506 break;
507 case KVM_REG_PPC_DSISR:
508 val = get_reg_val(reg->id, vcpu->arch.shared->dsisr);
509 break;
510 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
511 i = reg->id - KVM_REG_PPC_FPR0;
512 val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
513 break;
514 case KVM_REG_PPC_FPSCR:
515 val = get_reg_val(reg->id, vcpu->arch.fpscr);
516 break;
517#ifdef CONFIG_ALTIVEC
518 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
519 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
520 r = -ENXIO;
521 break;
522 }
523 val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
524 break;
525 case KVM_REG_PPC_VSCR:
526 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
527 r = -ENXIO;
528 break;
529 }
530 val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
531 break;
532#endif /* CONFIG_ALTIVEC */
533 default:
534 r = -EINVAL;
535 break;
536 }
537 }
538 if (r)
539 return r;
540
541 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
542 r = -EFAULT;
543
544 return r;
545}
546
547int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
548{
549 int r;
550 union kvmppc_one_reg val;
551 int size;
552 long int i;
553
554 size = one_reg_size(reg->id);
555 if (size > sizeof(val))
556 return -EINVAL;
557
558 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
559 return -EFAULT;
560
561 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
562
563 if (r == -EINVAL) {
564 r = 0;
565 switch (reg->id) {
566 case KVM_REG_PPC_DAR:
567 vcpu->arch.shared->dar = set_reg_val(reg->id, val);
568 break;
569 case KVM_REG_PPC_DSISR:
570 vcpu->arch.shared->dsisr = set_reg_val(reg->id, val);
571 break;
572 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
573 i = reg->id - KVM_REG_PPC_FPR0;
574 vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
575 break;
576 case KVM_REG_PPC_FPSCR:
577 vcpu->arch.fpscr = set_reg_val(reg->id, val);
578 break;
579#ifdef CONFIG_ALTIVEC
580 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
581 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
582 r = -ENXIO;
583 break;
584 }
585 vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
586 break;
587 case KVM_REG_PPC_VSCR:
588 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
589 r = -ENXIO;
590 break;
591 }
592 vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
593 break;
594#endif /* CONFIG_ALTIVEC */
595 default:
596 r = -EINVAL;
597 break;
598 }
599 }
600
601 return r;
602}
603
479int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 604int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
480 struct kvm_translation *tr) 605 struct kvm_translation *tr)
481{ 606{
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 837f13e7b6bf..9fac0101ffb9 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -254,6 +254,7 @@ next_pteg:
254 254
255 kvmppc_mmu_hpte_cache_map(vcpu, pte); 255 kvmppc_mmu_hpte_cache_map(vcpu, pte);
256 256
257 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
257out: 258out:
258 return r; 259 return r;
259} 260}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0688b6b39585..6b2c80e49681 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -168,6 +168,7 @@ map_again:
168 168
169 kvmppc_mmu_hpte_cache_map(vcpu, pte); 169 kvmppc_mmu_hpte_cache_map(vcpu, pte);
170 } 170 }
171 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
171 172
172out: 173out:
173 return r; 174 return r;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d95d11322a15..7a4aae99ac5b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/hugetlb.h> 25#include <linux/hugetlb.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/srcu.h>
27 28
28#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
29#include <asm/kvm_ppc.h> 30#include <asm/kvm_ppc.h>
@@ -260,7 +261,7 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
260 261
261/* 262/*
262 * This is called to get a reference to a guest page if there isn't 263 * This is called to get a reference to a guest page if there isn't
263 * one already in the kvm->arch.slot_phys[][] arrays. 264 * one already in the memslot->arch.slot_phys[] array.
264 */ 265 */
265static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, 266static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
266 struct kvm_memory_slot *memslot, 267 struct kvm_memory_slot *memslot,
@@ -275,7 +276,7 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
275 struct vm_area_struct *vma; 276 struct vm_area_struct *vma;
276 unsigned long pfn, i, npages; 277 unsigned long pfn, i, npages;
277 278
278 physp = kvm->arch.slot_phys[memslot->id]; 279 physp = memslot->arch.slot_phys;
279 if (!physp) 280 if (!physp)
280 return -EINVAL; 281 return -EINVAL;
281 if (physp[gfn - memslot->base_gfn]) 282 if (physp[gfn - memslot->base_gfn])
@@ -570,7 +571,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
570 struct kvm *kvm = vcpu->kvm; 571 struct kvm *kvm = vcpu->kvm;
571 unsigned long *hptep, hpte[3], r; 572 unsigned long *hptep, hpte[3], r;
572 unsigned long mmu_seq, psize, pte_size; 573 unsigned long mmu_seq, psize, pte_size;
573 unsigned long gfn, hva, pfn; 574 unsigned long gpa, gfn, hva, pfn;
574 struct kvm_memory_slot *memslot; 575 struct kvm_memory_slot *memslot;
575 unsigned long *rmap; 576 unsigned long *rmap;
576 struct revmap_entry *rev; 577 struct revmap_entry *rev;
@@ -608,15 +609,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
608 609
609 /* Translate the logical address and get the page */ 610 /* Translate the logical address and get the page */
610 psize = hpte_page_size(hpte[0], r); 611 psize = hpte_page_size(hpte[0], r);
611 gfn = hpte_rpn(r, psize); 612 gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1));
613 gfn = gpa >> PAGE_SHIFT;
612 memslot = gfn_to_memslot(kvm, gfn); 614 memslot = gfn_to_memslot(kvm, gfn);
613 615
614 /* No memslot means it's an emulated MMIO region */ 616 /* No memslot means it's an emulated MMIO region */
615 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { 617 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
616 unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
617 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, 618 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
618 dsisr & DSISR_ISSTORE); 619 dsisr & DSISR_ISSTORE);
619 }
620 620
621 if (!kvm->arch.using_mmu_notifiers) 621 if (!kvm->arch.using_mmu_notifiers)
622 return -EFAULT; /* should never get here */ 622 return -EFAULT; /* should never get here */
@@ -850,7 +850,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
850 psize = hpte_page_size(hptep[0], ptel); 850 psize = hpte_page_size(hptep[0], ptel);
851 if ((hptep[0] & HPTE_V_VALID) && 851 if ((hptep[0] & HPTE_V_VALID) &&
852 hpte_rpn(ptel, psize) == gfn) { 852 hpte_rpn(ptel, psize) == gfn) {
853 hptep[0] |= HPTE_V_ABSENT; 853 if (kvm->arch.using_mmu_notifiers)
854 hptep[0] |= HPTE_V_ABSENT;
854 kvmppc_invalidate_hpte(kvm, hptep, i); 855 kvmppc_invalidate_hpte(kvm, hptep, i);
855 /* Harvest R and C */ 856 /* Harvest R and C */
856 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); 857 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
@@ -877,6 +878,28 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
877 return 0; 878 return 0;
878} 879}
879 880
881void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
882{
883 unsigned long *rmapp;
884 unsigned long gfn;
885 unsigned long n;
886
887 rmapp = memslot->arch.rmap;
888 gfn = memslot->base_gfn;
889 for (n = memslot->npages; n; --n) {
890 /*
891 * Testing the present bit without locking is OK because
892 * the memslot has been marked invalid already, and hence
893 * no new HPTEs referencing this page can be created,
894 * thus the present bit can't go from 0 to 1.
895 */
896 if (*rmapp & KVMPPC_RMAP_PRESENT)
897 kvm_unmap_rmapp(kvm, rmapp, gfn);
898 ++rmapp;
899 ++gfn;
900 }
901}
902
880static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 903static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
881 unsigned long gfn) 904 unsigned long gfn)
882{ 905{
@@ -1030,16 +1053,16 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
1030 return ret; 1053 return ret;
1031} 1054}
1032 1055
1033long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 1056long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
1057 unsigned long *map)
1034{ 1058{
1035 unsigned long i; 1059 unsigned long i;
1036 unsigned long *rmapp, *map; 1060 unsigned long *rmapp;
1037 1061
1038 preempt_disable(); 1062 preempt_disable();
1039 rmapp = memslot->arch.rmap; 1063 rmapp = memslot->arch.rmap;
1040 map = memslot->dirty_bitmap;
1041 for (i = 0; i < memslot->npages; ++i) { 1064 for (i = 0; i < memslot->npages; ++i) {
1042 if (kvm_test_clear_dirty(kvm, rmapp)) 1065 if (kvm_test_clear_dirty(kvm, rmapp) && map)
1043 __set_bit_le(i, map); 1066 __set_bit_le(i, map);
1044 ++rmapp; 1067 ++rmapp;
1045 } 1068 }
@@ -1057,20 +1080,22 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1057 unsigned long hva, psize, offset; 1080 unsigned long hva, psize, offset;
1058 unsigned long pa; 1081 unsigned long pa;
1059 unsigned long *physp; 1082 unsigned long *physp;
1083 int srcu_idx;
1060 1084
1085 srcu_idx = srcu_read_lock(&kvm->srcu);
1061 memslot = gfn_to_memslot(kvm, gfn); 1086 memslot = gfn_to_memslot(kvm, gfn);
1062 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 1087 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1063 return NULL; 1088 goto err;
1064 if (!kvm->arch.using_mmu_notifiers) { 1089 if (!kvm->arch.using_mmu_notifiers) {
1065 physp = kvm->arch.slot_phys[memslot->id]; 1090 physp = memslot->arch.slot_phys;
1066 if (!physp) 1091 if (!physp)
1067 return NULL; 1092 goto err;
1068 physp += gfn - memslot->base_gfn; 1093 physp += gfn - memslot->base_gfn;
1069 pa = *physp; 1094 pa = *physp;
1070 if (!pa) { 1095 if (!pa) {
1071 if (kvmppc_get_guest_page(kvm, gfn, memslot, 1096 if (kvmppc_get_guest_page(kvm, gfn, memslot,
1072 PAGE_SIZE) < 0) 1097 PAGE_SIZE) < 0)
1073 return NULL; 1098 goto err;
1074 pa = *physp; 1099 pa = *physp;
1075 } 1100 }
1076 page = pfn_to_page(pa >> PAGE_SHIFT); 1101 page = pfn_to_page(pa >> PAGE_SHIFT);
@@ -1079,9 +1104,11 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1079 hva = gfn_to_hva_memslot(memslot, gfn); 1104 hva = gfn_to_hva_memslot(memslot, gfn);
1080 npages = get_user_pages_fast(hva, 1, 1, pages); 1105 npages = get_user_pages_fast(hva, 1, 1, pages);
1081 if (npages < 1) 1106 if (npages < 1)
1082 return NULL; 1107 goto err;
1083 page = pages[0]; 1108 page = pages[0];
1084 } 1109 }
1110 srcu_read_unlock(&kvm->srcu, srcu_idx);
1111
1085 psize = PAGE_SIZE; 1112 psize = PAGE_SIZE;
1086 if (PageHuge(page)) { 1113 if (PageHuge(page)) {
1087 page = compound_head(page); 1114 page = compound_head(page);
@@ -1091,6 +1118,10 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1091 if (nb_ret) 1118 if (nb_ret)
1092 *nb_ret = psize - offset; 1119 *nb_ret = psize - offset;
1093 return page_address(page) + offset; 1120 return page_address(page) + offset;
1121
1122 err:
1123 srcu_read_unlock(&kvm->srcu, srcu_idx);
1124 return NULL;
1094} 1125}
1095 1126
1096void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) 1127void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 83e929e66f9d..9a15da76e56b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -30,6 +30,7 @@
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/page-flags.h> 32#include <linux/page-flags.h>
33#include <linux/srcu.h>
33 34
34#include <asm/reg.h> 35#include <asm/reg.h>
35#include <asm/cputable.h> 36#include <asm/cputable.h>
@@ -142,6 +143,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
142 vpa->yield_count = 1; 143 vpa->yield_count = 1;
143} 144}
144 145
146static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
147 unsigned long addr, unsigned long len)
148{
149 /* check address is cacheline aligned */
150 if (addr & (L1_CACHE_BYTES - 1))
151 return -EINVAL;
152 spin_lock(&vcpu->arch.vpa_update_lock);
153 if (v->next_gpa != addr || v->len != len) {
154 v->next_gpa = addr;
155 v->len = addr ? len : 0;
156 v->update_pending = 1;
157 }
158 spin_unlock(&vcpu->arch.vpa_update_lock);
159 return 0;
160}
161
145/* Length for a per-processor buffer is passed in at offset 4 in the buffer */ 162/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
146struct reg_vpa { 163struct reg_vpa {
147 u32 dummy; 164 u32 dummy;
@@ -320,7 +337,8 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
320 spin_lock(&vcpu->arch.vpa_update_lock); 337 spin_lock(&vcpu->arch.vpa_update_lock);
321 if (vcpu->arch.vpa.update_pending) { 338 if (vcpu->arch.vpa.update_pending) {
322 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); 339 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
323 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 340 if (vcpu->arch.vpa.pinned_addr)
341 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
324 } 342 }
325 if (vcpu->arch.dtl.update_pending) { 343 if (vcpu->arch.dtl.update_pending) {
326 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); 344 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
@@ -366,13 +384,16 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
366 unsigned long req = kvmppc_get_gpr(vcpu, 3); 384 unsigned long req = kvmppc_get_gpr(vcpu, 3);
367 unsigned long target, ret = H_SUCCESS; 385 unsigned long target, ret = H_SUCCESS;
368 struct kvm_vcpu *tvcpu; 386 struct kvm_vcpu *tvcpu;
387 int idx;
369 388
370 switch (req) { 389 switch (req) {
371 case H_ENTER: 390 case H_ENTER:
391 idx = srcu_read_lock(&vcpu->kvm->srcu);
372 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), 392 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
373 kvmppc_get_gpr(vcpu, 5), 393 kvmppc_get_gpr(vcpu, 5),
374 kvmppc_get_gpr(vcpu, 6), 394 kvmppc_get_gpr(vcpu, 6),
375 kvmppc_get_gpr(vcpu, 7)); 395 kvmppc_get_gpr(vcpu, 7));
396 srcu_read_unlock(&vcpu->kvm->srcu, idx);
376 break; 397 break;
377 case H_CEDE: 398 case H_CEDE:
378 break; 399 break;
@@ -411,6 +432,7 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
411 struct task_struct *tsk) 432 struct task_struct *tsk)
412{ 433{
413 int r = RESUME_HOST; 434 int r = RESUME_HOST;
435 int srcu_idx;
414 436
415 vcpu->stat.sum_exits++; 437 vcpu->stat.sum_exits++;
416 438
@@ -470,12 +492,16 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
470 * have been handled already. 492 * have been handled already.
471 */ 493 */
472 case BOOK3S_INTERRUPT_H_DATA_STORAGE: 494 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
495 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
473 r = kvmppc_book3s_hv_page_fault(run, vcpu, 496 r = kvmppc_book3s_hv_page_fault(run, vcpu,
474 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 497 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
498 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
475 break; 499 break;
476 case BOOK3S_INTERRUPT_H_INST_STORAGE: 500 case BOOK3S_INTERRUPT_H_INST_STORAGE:
501 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
477 r = kvmppc_book3s_hv_page_fault(run, vcpu, 502 r = kvmppc_book3s_hv_page_fault(run, vcpu,
478 kvmppc_get_pc(vcpu), 0); 503 kvmppc_get_pc(vcpu), 0);
504 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
479 break; 505 break;
480 /* 506 /*
481 * This occurs if the guest executes an illegal instruction. 507 * This occurs if the guest executes an illegal instruction.
@@ -535,36 +561,175 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
535 return 0; 561 return 0;
536} 562}
537 563
538int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 564int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
539{ 565{
540 int r = -EINVAL; 566 int r = 0;
567 long int i;
541 568
542 switch (reg->id) { 569 switch (id) {
543 case KVM_REG_PPC_HIOR: 570 case KVM_REG_PPC_HIOR:
544 r = put_user(0, (u64 __user *)reg->addr); 571 *val = get_reg_val(id, 0);
572 break;
573 case KVM_REG_PPC_DABR:
574 *val = get_reg_val(id, vcpu->arch.dabr);
575 break;
576 case KVM_REG_PPC_DSCR:
577 *val = get_reg_val(id, vcpu->arch.dscr);
578 break;
579 case KVM_REG_PPC_PURR:
580 *val = get_reg_val(id, vcpu->arch.purr);
581 break;
582 case KVM_REG_PPC_SPURR:
583 *val = get_reg_val(id, vcpu->arch.spurr);
584 break;
585 case KVM_REG_PPC_AMR:
586 *val = get_reg_val(id, vcpu->arch.amr);
587 break;
588 case KVM_REG_PPC_UAMOR:
589 *val = get_reg_val(id, vcpu->arch.uamor);
590 break;
591 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
592 i = id - KVM_REG_PPC_MMCR0;
593 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
594 break;
595 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
596 i = id - KVM_REG_PPC_PMC1;
597 *val = get_reg_val(id, vcpu->arch.pmc[i]);
598 break;
599#ifdef CONFIG_VSX
600 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
601 if (cpu_has_feature(CPU_FTR_VSX)) {
602 /* VSX => FP reg i is stored in arch.vsr[2*i] */
603 long int i = id - KVM_REG_PPC_FPR0;
604 *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
605 } else {
606 /* let generic code handle it */
607 r = -EINVAL;
608 }
609 break;
610 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
611 if (cpu_has_feature(CPU_FTR_VSX)) {
612 long int i = id - KVM_REG_PPC_VSR0;
613 val->vsxval[0] = vcpu->arch.vsr[2 * i];
614 val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
615 } else {
616 r = -ENXIO;
617 }
618 break;
619#endif /* CONFIG_VSX */
620 case KVM_REG_PPC_VPA_ADDR:
621 spin_lock(&vcpu->arch.vpa_update_lock);
622 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
623 spin_unlock(&vcpu->arch.vpa_update_lock);
624 break;
625 case KVM_REG_PPC_VPA_SLB:
626 spin_lock(&vcpu->arch.vpa_update_lock);
627 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
628 val->vpaval.length = vcpu->arch.slb_shadow.len;
629 spin_unlock(&vcpu->arch.vpa_update_lock);
630 break;
631 case KVM_REG_PPC_VPA_DTL:
632 spin_lock(&vcpu->arch.vpa_update_lock);
633 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
634 val->vpaval.length = vcpu->arch.dtl.len;
635 spin_unlock(&vcpu->arch.vpa_update_lock);
545 break; 636 break;
546 default: 637 default:
638 r = -EINVAL;
547 break; 639 break;
548 } 640 }
549 641
550 return r; 642 return r;
551} 643}
552 644
553int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 645int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
554{ 646{
555 int r = -EINVAL; 647 int r = 0;
648 long int i;
649 unsigned long addr, len;
556 650
557 switch (reg->id) { 651 switch (id) {
558 case KVM_REG_PPC_HIOR: 652 case KVM_REG_PPC_HIOR:
559 {
560 u64 hior;
561 /* Only allow this to be set to zero */ 653 /* Only allow this to be set to zero */
562 r = get_user(hior, (u64 __user *)reg->addr); 654 if (set_reg_val(id, *val))
563 if (!r && (hior != 0))
564 r = -EINVAL; 655 r = -EINVAL;
565 break; 656 break;
566 } 657 case KVM_REG_PPC_DABR:
658 vcpu->arch.dabr = set_reg_val(id, *val);
659 break;
660 case KVM_REG_PPC_DSCR:
661 vcpu->arch.dscr = set_reg_val(id, *val);
662 break;
663 case KVM_REG_PPC_PURR:
664 vcpu->arch.purr = set_reg_val(id, *val);
665 break;
666 case KVM_REG_PPC_SPURR:
667 vcpu->arch.spurr = set_reg_val(id, *val);
668 break;
669 case KVM_REG_PPC_AMR:
670 vcpu->arch.amr = set_reg_val(id, *val);
671 break;
672 case KVM_REG_PPC_UAMOR:
673 vcpu->arch.uamor = set_reg_val(id, *val);
674 break;
675 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
676 i = id - KVM_REG_PPC_MMCR0;
677 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
678 break;
679 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
680 i = id - KVM_REG_PPC_PMC1;
681 vcpu->arch.pmc[i] = set_reg_val(id, *val);
682 break;
683#ifdef CONFIG_VSX
684 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
685 if (cpu_has_feature(CPU_FTR_VSX)) {
686 /* VSX => FP reg i is stored in arch.vsr[2*i] */
687 long int i = id - KVM_REG_PPC_FPR0;
688 vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
689 } else {
690 /* let generic code handle it */
691 r = -EINVAL;
692 }
693 break;
694 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
695 if (cpu_has_feature(CPU_FTR_VSX)) {
696 long int i = id - KVM_REG_PPC_VSR0;
697 vcpu->arch.vsr[2 * i] = val->vsxval[0];
698 vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
699 } else {
700 r = -ENXIO;
701 }
702 break;
703#endif /* CONFIG_VSX */
704 case KVM_REG_PPC_VPA_ADDR:
705 addr = set_reg_val(id, *val);
706 r = -EINVAL;
707 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
708 vcpu->arch.dtl.next_gpa))
709 break;
710 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
711 break;
712 case KVM_REG_PPC_VPA_SLB:
713 addr = val->vpaval.addr;
714 len = val->vpaval.length;
715 r = -EINVAL;
716 if (addr && !vcpu->arch.vpa.next_gpa)
717 break;
718 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
719 break;
720 case KVM_REG_PPC_VPA_DTL:
721 addr = val->vpaval.addr;
722 len = val->vpaval.length;
723 r = -EINVAL;
724 if (len < sizeof(struct dtl_entry))
725 break;
726 if (addr && !vcpu->arch.vpa.next_gpa)
727 break;
728 len -= len % sizeof(struct dtl_entry);
729 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
730 break;
567 default: 731 default:
732 r = -EINVAL;
568 break; 733 break;
569 } 734 }
570 735
@@ -697,17 +862,11 @@ extern void xics_wake_cpu(int cpu);
697static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, 862static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
698 struct kvm_vcpu *vcpu) 863 struct kvm_vcpu *vcpu)
699{ 864{
700 struct kvm_vcpu *v;
701
702 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 865 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
703 return; 866 return;
704 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 867 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
705 --vc->n_runnable; 868 --vc->n_runnable;
706 ++vc->n_busy; 869 ++vc->n_busy;
707 /* decrement the physical thread id of each following vcpu */
708 v = vcpu;
709 list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
710 --v->arch.ptid;
711 list_del(&vcpu->arch.run_list); 870 list_del(&vcpu->arch.run_list);
712} 871}
713 872
@@ -820,6 +979,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
820 long ret; 979 long ret;
821 u64 now; 980 u64 now;
822 int ptid, i, need_vpa_update; 981 int ptid, i, need_vpa_update;
982 int srcu_idx;
823 983
824 /* don't start if any threads have a signal pending */ 984 /* don't start if any threads have a signal pending */
825 need_vpa_update = 0; 985 need_vpa_update = 0;
@@ -898,6 +1058,9 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
898 spin_unlock(&vc->lock); 1058 spin_unlock(&vc->lock);
899 1059
900 kvm_guest_enter(); 1060 kvm_guest_enter();
1061
1062 srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
1063
901 __kvmppc_vcore_entry(NULL, vcpu0); 1064 __kvmppc_vcore_entry(NULL, vcpu0);
902 for (i = 0; i < threads_per_core; ++i) 1065 for (i = 0; i < threads_per_core; ++i)
903 kvmppc_release_hwthread(vc->pcpu + i); 1066 kvmppc_release_hwthread(vc->pcpu + i);
@@ -913,6 +1076,8 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
913 vc->vcore_state = VCORE_EXITING; 1076 vc->vcore_state = VCORE_EXITING;
914 spin_unlock(&vc->lock); 1077 spin_unlock(&vc->lock);
915 1078
1079 srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
1080
916 /* make sure updates to secondary vcpu structs are visible now */ 1081 /* make sure updates to secondary vcpu structs are visible now */
917 smp_mb(); 1082 smp_mb();
918 kvm_guest_exit(); 1083 kvm_guest_exit();
@@ -1273,7 +1438,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1273 n = kvm_dirty_bitmap_bytes(memslot); 1438 n = kvm_dirty_bitmap_bytes(memslot);
1274 memset(memslot->dirty_bitmap, 0, n); 1439 memset(memslot->dirty_bitmap, 0, n);
1275 1440
1276 r = kvmppc_hv_get_dirty_log(kvm, memslot); 1441 r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
1277 if (r) 1442 if (r)
1278 goto out; 1443 goto out;
1279 1444
@@ -1299,53 +1464,86 @@ static unsigned long slb_pgsize_encoding(unsigned long psize)
1299 return senc; 1464 return senc;
1300} 1465}
1301 1466
1302int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1467static void unpin_slot(struct kvm_memory_slot *memslot)
1303 struct kvm_userspace_memory_region *mem)
1304{ 1468{
1305 unsigned long npages; 1469 unsigned long *physp;
1306 unsigned long *phys; 1470 unsigned long j, npages, pfn;
1471 struct page *page;
1307 1472
1308 /* Allocate a slot_phys array */ 1473 physp = memslot->arch.slot_phys;
1309 phys = kvm->arch.slot_phys[mem->slot]; 1474 npages = memslot->npages;
1310 if (!kvm->arch.using_mmu_notifiers && !phys) { 1475 if (!physp)
1311 npages = mem->memory_size >> PAGE_SHIFT; 1476 return;
1312 phys = vzalloc(npages * sizeof(unsigned long)); 1477 for (j = 0; j < npages; j++) {
1313 if (!phys) 1478 if (!(physp[j] & KVMPPC_GOT_PAGE))
1314 return -ENOMEM; 1479 continue;
1315 kvm->arch.slot_phys[mem->slot] = phys; 1480 pfn = physp[j] >> PAGE_SHIFT;
1316 kvm->arch.slot_npages[mem->slot] = npages; 1481 page = pfn_to_page(pfn);
1482 SetPageDirty(page);
1483 put_page(page);
1317 } 1484 }
1485}
1486
1487void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1488 struct kvm_memory_slot *dont)
1489{
1490 if (!dont || free->arch.rmap != dont->arch.rmap) {
1491 vfree(free->arch.rmap);
1492 free->arch.rmap = NULL;
1493 }
1494 if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
1495 unpin_slot(free);
1496 vfree(free->arch.slot_phys);
1497 free->arch.slot_phys = NULL;
1498 }
1499}
1500
1501int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1502 unsigned long npages)
1503{
1504 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
1505 if (!slot->arch.rmap)
1506 return -ENOMEM;
1507 slot->arch.slot_phys = NULL;
1318 1508
1319 return 0; 1509 return 0;
1320} 1510}
1321 1511
1322static void unpin_slot(struct kvm *kvm, int slot_id) 1512int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1513 struct kvm_memory_slot *memslot,
1514 struct kvm_userspace_memory_region *mem)
1323{ 1515{
1324 unsigned long *physp; 1516 unsigned long *phys;
1325 unsigned long j, npages, pfn;
1326 struct page *page;
1327 1517
1328 physp = kvm->arch.slot_phys[slot_id]; 1518 /* Allocate a slot_phys array if needed */
1329 npages = kvm->arch.slot_npages[slot_id]; 1519 phys = memslot->arch.slot_phys;
1330 if (physp) { 1520 if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
1331 spin_lock(&kvm->arch.slot_phys_lock); 1521 phys = vzalloc(memslot->npages * sizeof(unsigned long));
1332 for (j = 0; j < npages; j++) { 1522 if (!phys)
1333 if (!(physp[j] & KVMPPC_GOT_PAGE)) 1523 return -ENOMEM;
1334 continue; 1524 memslot->arch.slot_phys = phys;
1335 pfn = physp[j] >> PAGE_SHIFT;
1336 page = pfn_to_page(pfn);
1337 SetPageDirty(page);
1338 put_page(page);
1339 }
1340 kvm->arch.slot_phys[slot_id] = NULL;
1341 spin_unlock(&kvm->arch.slot_phys_lock);
1342 vfree(physp);
1343 } 1525 }
1526
1527 return 0;
1344} 1528}
1345 1529
1346void kvmppc_core_commit_memory_region(struct kvm *kvm, 1530void kvmppc_core_commit_memory_region(struct kvm *kvm,
1347 struct kvm_userspace_memory_region *mem) 1531 struct kvm_userspace_memory_region *mem,
1532 struct kvm_memory_slot old)
1348{ 1533{
1534 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
1535 struct kvm_memory_slot *memslot;
1536
1537 if (npages && old.npages) {
1538 /*
1539 * If modifying a memslot, reset all the rmap dirty bits.
1540 * If this is a new memslot, we don't need to do anything
1541 * since the rmap array starts out as all zeroes,
1542 * i.e. no pages are dirty.
1543 */
1544 memslot = id_to_memslot(kvm->memslots, mem->slot);
1545 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
1546 }
1349} 1547}
1350 1548
1351static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 1549static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
@@ -1362,6 +1560,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1362 unsigned long rmls; 1560 unsigned long rmls;
1363 unsigned long *physp; 1561 unsigned long *physp;
1364 unsigned long i, npages; 1562 unsigned long i, npages;
1563 int srcu_idx;
1365 1564
1366 mutex_lock(&kvm->lock); 1565 mutex_lock(&kvm->lock);
1367 if (kvm->arch.rma_setup_done) 1566 if (kvm->arch.rma_setup_done)
@@ -1377,12 +1576,13 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1377 } 1576 }
1378 1577
1379 /* Look up the memslot for guest physical address 0 */ 1578 /* Look up the memslot for guest physical address 0 */
1579 srcu_idx = srcu_read_lock(&kvm->srcu);
1380 memslot = gfn_to_memslot(kvm, 0); 1580 memslot = gfn_to_memslot(kvm, 0);
1381 1581
1382 /* We must have some memory at 0 by now */ 1582 /* We must have some memory at 0 by now */
1383 err = -EINVAL; 1583 err = -EINVAL;
1384 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 1584 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1385 goto out; 1585 goto out_srcu;
1386 1586
1387 /* Look up the VMA for the start of this memory slot */ 1587 /* Look up the VMA for the start of this memory slot */
1388 hva = memslot->userspace_addr; 1588 hva = memslot->userspace_addr;
@@ -1406,14 +1606,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1406 err = -EPERM; 1606 err = -EPERM;
1407 if (cpu_has_feature(CPU_FTR_ARCH_201)) { 1607 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1408 pr_err("KVM: CPU requires an RMO\n"); 1608 pr_err("KVM: CPU requires an RMO\n");
1409 goto out; 1609 goto out_srcu;
1410 } 1610 }
1411 1611
1412 /* We can handle 4k, 64k or 16M pages in the VRMA */ 1612 /* We can handle 4k, 64k or 16M pages in the VRMA */
1413 err = -EINVAL; 1613 err = -EINVAL;
1414 if (!(psize == 0x1000 || psize == 0x10000 || 1614 if (!(psize == 0x1000 || psize == 0x10000 ||
1415 psize == 0x1000000)) 1615 psize == 0x1000000))
1416 goto out; 1616 goto out_srcu;
1417 1617
1418 /* Update VRMASD field in the LPCR */ 1618 /* Update VRMASD field in the LPCR */
1419 senc = slb_pgsize_encoding(psize); 1619 senc = slb_pgsize_encoding(psize);
@@ -1436,7 +1636,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1436 err = -EINVAL; 1636 err = -EINVAL;
1437 if (rmls < 0) { 1637 if (rmls < 0) {
1438 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); 1638 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1439 goto out; 1639 goto out_srcu;
1440 } 1640 }
1441 atomic_inc(&ri->use_count); 1641 atomic_inc(&ri->use_count);
1442 kvm->arch.rma = ri; 1642 kvm->arch.rma = ri;
@@ -1465,17 +1665,24 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1465 /* Initialize phys addrs of pages in RMO */ 1665 /* Initialize phys addrs of pages in RMO */
1466 npages = ri->npages; 1666 npages = ri->npages;
1467 porder = __ilog2(npages); 1667 porder = __ilog2(npages);
1468 physp = kvm->arch.slot_phys[memslot->id]; 1668 physp = memslot->arch.slot_phys;
1469 spin_lock(&kvm->arch.slot_phys_lock); 1669 if (physp) {
1470 for (i = 0; i < npages; ++i) 1670 if (npages > memslot->npages)
1471 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder; 1671 npages = memslot->npages;
1472 spin_unlock(&kvm->arch.slot_phys_lock); 1672 spin_lock(&kvm->arch.slot_phys_lock);
1673 for (i = 0; i < npages; ++i)
1674 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
1675 porder;
1676 spin_unlock(&kvm->arch.slot_phys_lock);
1677 }
1473 } 1678 }
1474 1679
1475 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ 1680 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1476 smp_wmb(); 1681 smp_wmb();
1477 kvm->arch.rma_setup_done = 1; 1682 kvm->arch.rma_setup_done = 1;
1478 err = 0; 1683 err = 0;
1684 out_srcu:
1685 srcu_read_unlock(&kvm->srcu, srcu_idx);
1479 out: 1686 out:
1480 mutex_unlock(&kvm->lock); 1687 mutex_unlock(&kvm->lock);
1481 return err; 1688 return err;
@@ -1528,12 +1735,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1528 1735
1529void kvmppc_core_destroy_vm(struct kvm *kvm) 1736void kvmppc_core_destroy_vm(struct kvm *kvm)
1530{ 1737{
1531 unsigned long i;
1532
1533 if (!kvm->arch.using_mmu_notifiers)
1534 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
1535 unpin_slot(kvm, i);
1536
1537 if (kvm->arch.rma) { 1738 if (kvm->arch.rma) {
1538 kvm_release_rma(kvm->arch.rma); 1739 kvm_release_rma(kvm->arch.rma);
1539 kvm->arch.rma = NULL; 1740 kvm->arch.rma = NULL;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index fb4eac290fef..ec0a9e5de100 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -157,8 +157,8 @@ static void __init kvm_linear_init_one(ulong size, int count, int type)
157 linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info)); 157 linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
158 for (i = 0; i < count; ++i) { 158 for (i = 0; i < count; ++i) {
159 linear = alloc_bootmem_align(size, size); 159 linear = alloc_bootmem_align(size, size);
160 pr_info("Allocated KVM %s at %p (%ld MB)\n", typestr, linear, 160 pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
161 size >> 20); 161 size >> 20);
162 linear_info[i].base_virt = linear; 162 linear_info[i].base_virt = linear;
163 linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT; 163 linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
164 linear_info[i].npages = npages; 164 linear_info[i].npages = npages;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index fb0e821622d4..9955216477a4 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -81,7 +81,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
81 ptel = rev->guest_rpte |= rcbits; 81 ptel = rev->guest_rpte |= rcbits;
82 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); 82 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
83 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 83 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
84 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 84 if (!memslot)
85 return; 85 return;
86 86
87 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); 87 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
@@ -183,7 +183,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
183 rmap = &memslot->arch.rmap[slot_fn]; 183 rmap = &memslot->arch.rmap[slot_fn];
184 184
185 if (!kvm->arch.using_mmu_notifiers) { 185 if (!kvm->arch.using_mmu_notifiers) {
186 physp = kvm->arch.slot_phys[memslot->id]; 186 physp = memslot->arch.slot_phys;
187 if (!physp) 187 if (!physp)
188 return H_PARAMETER; 188 return H_PARAMETER;
189 physp += slot_fn; 189 physp += slot_fn;
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 41cb0017e757..2c86b0d63714 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -114,11 +114,6 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
114 hlist_del_init_rcu(&pte->list_vpte); 114 hlist_del_init_rcu(&pte->list_vpte);
115 hlist_del_init_rcu(&pte->list_vpte_long); 115 hlist_del_init_rcu(&pte->list_vpte_long);
116 116
117 if (pte->pte.may_write)
118 kvm_release_pfn_dirty(pte->pfn);
119 else
120 kvm_release_pfn_clean(pte->pfn);
121
122 spin_unlock(&vcpu3s->mmu_lock); 117 spin_unlock(&vcpu3s->mmu_lock);
123 118
124 vcpu3s->hpte_cache_count--; 119 vcpu3s->hpte_cache_count--;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 05c28f59f77f..b853696b6d8e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -52,8 +52,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
52#define MSR_USER32 MSR_USER 52#define MSR_USER32 MSR_USER
53#define MSR_USER64 MSR_USER 53#define MSR_USER64 MSR_USER
54#define HW_PAGE_SIZE PAGE_SIZE 54#define HW_PAGE_SIZE PAGE_SIZE
55#define __hard_irq_disable local_irq_disable
56#define __hard_irq_enable local_irq_enable
57#endif 55#endif
58 56
59void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 57void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -66,7 +64,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
66 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 64 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
67 svcpu_put(svcpu); 65 svcpu_put(svcpu);
68#endif 66#endif
69 67 vcpu->cpu = smp_processor_id();
70#ifdef CONFIG_PPC_BOOK3S_32 68#ifdef CONFIG_PPC_BOOK3S_32
71 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; 69 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
72#endif 70#endif
@@ -86,8 +84,64 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
86 kvmppc_giveup_ext(vcpu, MSR_FP); 84 kvmppc_giveup_ext(vcpu, MSR_FP);
87 kvmppc_giveup_ext(vcpu, MSR_VEC); 85 kvmppc_giveup_ext(vcpu, MSR_VEC);
88 kvmppc_giveup_ext(vcpu, MSR_VSX); 86 kvmppc_giveup_ext(vcpu, MSR_VSX);
87 vcpu->cpu = -1;
88}
89
90int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
91{
92 int r = 1; /* Indicate we want to get back into the guest */
93
94 /* We misuse TLB_FLUSH to indicate that we want to clear
95 all shadow cache entries */
96 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
97 kvmppc_mmu_pte_flush(vcpu, 0, 0);
98
99 return r;
100}
101
102/************* MMU Notifiers *************/
103
104int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
105{
106 trace_kvm_unmap_hva(hva);
107
108 /*
109 * Flush all shadow tlb entries everywhere. This is slow, but
110 * we are 100% sure that we catch the to be unmapped page
111 */
112 kvm_flush_remote_tlbs(kvm);
113
114 return 0;
115}
116
117int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
118{
119 /* kvm_unmap_hva flushes everything anyways */
120 kvm_unmap_hva(kvm, start);
121
122 return 0;
123}
124
125int kvm_age_hva(struct kvm *kvm, unsigned long hva)
126{
127 /* XXX could be more clever ;) */
128 return 0;
129}
130
131int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
132{
133 /* XXX could be more clever ;) */
134 return 0;
135}
136
137void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
138{
139 /* The page will get remapped properly on its next fault */
140 kvm_unmap_hva(kvm, hva);
89} 141}
90 142
143/*****************************************/
144
91static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 145static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
92{ 146{
93 ulong smsr = vcpu->arch.shared->msr; 147 ulong smsr = vcpu->arch.shared->msr;
@@ -540,18 +594,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
540 unsigned int exit_nr) 594 unsigned int exit_nr)
541{ 595{
542 int r = RESUME_HOST; 596 int r = RESUME_HOST;
597 int s;
543 598
544 vcpu->stat.sum_exits++; 599 vcpu->stat.sum_exits++;
545 600
546 run->exit_reason = KVM_EXIT_UNKNOWN; 601 run->exit_reason = KVM_EXIT_UNKNOWN;
547 run->ready_for_interrupt_injection = 1; 602 run->ready_for_interrupt_injection = 1;
548 603
549 /* We get here with MSR.EE=0, so enable it to be a nice citizen */ 604 /* We get here with MSR.EE=1 */
550 __hard_irq_enable(); 605
606 trace_kvm_exit(exit_nr, vcpu);
607 kvm_guest_exit();
551 608
552 trace_kvm_book3s_exit(exit_nr, vcpu);
553 preempt_enable();
554 kvm_resched(vcpu);
555 switch (exit_nr) { 609 switch (exit_nr) {
556 case BOOK3S_INTERRUPT_INST_STORAGE: 610 case BOOK3S_INTERRUPT_INST_STORAGE:
557 { 611 {
@@ -802,7 +856,6 @@ program_interrupt:
802 } 856 }
803 } 857 }
804 858
805 preempt_disable();
806 if (!(r & RESUME_HOST)) { 859 if (!(r & RESUME_HOST)) {
807 /* To avoid clobbering exit_reason, only check for signals if 860 /* To avoid clobbering exit_reason, only check for signals if
808 * we aren't already exiting to userspace for some other 861 * we aren't already exiting to userspace for some other
@@ -814,20 +867,13 @@ program_interrupt:
814 * and if we really did time things so badly, then we just exit 867 * and if we really did time things so badly, then we just exit
815 * again due to a host external interrupt. 868 * again due to a host external interrupt.
816 */ 869 */
817 __hard_irq_disable(); 870 local_irq_disable();
818 if (signal_pending(current)) { 871 s = kvmppc_prepare_to_enter(vcpu);
819 __hard_irq_enable(); 872 if (s <= 0) {
820#ifdef EXIT_DEBUG 873 local_irq_enable();
821 printk(KERN_EMERG "KVM: Going back to host\n"); 874 r = s;
822#endif
823 vcpu->stat.signal_exits++;
824 run->exit_reason = KVM_EXIT_INTR;
825 r = -EINTR;
826 } else { 875 } else {
827 /* In case an interrupt came in that was triggered 876 kvmppc_lazy_ee_enable();
828 * from userspace (like DEC), we need to check what
829 * to inject now! */
830 kvmppc_core_prepare_to_enter(vcpu);
831 } 877 }
832 } 878 }
833 879
@@ -899,34 +945,59 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
899 return 0; 945 return 0;
900} 946}
901 947
902int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 948int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
903{ 949{
904 int r = -EINVAL; 950 int r = 0;
905 951
906 switch (reg->id) { 952 switch (id) {
907 case KVM_REG_PPC_HIOR: 953 case KVM_REG_PPC_HIOR:
908 r = copy_to_user((u64 __user *)(long)reg->addr, 954 *val = get_reg_val(id, to_book3s(vcpu)->hior);
909 &to_book3s(vcpu)->hior, sizeof(u64)); 955 break;
956#ifdef CONFIG_VSX
957 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
958 long int i = id - KVM_REG_PPC_VSR0;
959
960 if (!cpu_has_feature(CPU_FTR_VSX)) {
961 r = -ENXIO;
962 break;
963 }
964 val->vsxval[0] = vcpu->arch.fpr[i];
965 val->vsxval[1] = vcpu->arch.vsr[i];
910 break; 966 break;
967 }
968#endif /* CONFIG_VSX */
911 default: 969 default:
970 r = -EINVAL;
912 break; 971 break;
913 } 972 }
914 973
915 return r; 974 return r;
916} 975}
917 976
918int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 977int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
919{ 978{
920 int r = -EINVAL; 979 int r = 0;
921 980
922 switch (reg->id) { 981 switch (id) {
923 case KVM_REG_PPC_HIOR: 982 case KVM_REG_PPC_HIOR:
924 r = copy_from_user(&to_book3s(vcpu)->hior, 983 to_book3s(vcpu)->hior = set_reg_val(id, *val);
925 (u64 __user *)(long)reg->addr, sizeof(u64)); 984 to_book3s(vcpu)->hior_explicit = true;
926 if (!r) 985 break;
927 to_book3s(vcpu)->hior_explicit = true; 986#ifdef CONFIG_VSX
987 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
988 long int i = id - KVM_REG_PPC_VSR0;
989
990 if (!cpu_has_feature(CPU_FTR_VSX)) {
991 r = -ENXIO;
992 break;
993 }
994 vcpu->arch.fpr[i] = val->vsxval[0];
995 vcpu->arch.vsr[i] = val->vsxval[1];
928 break; 996 break;
997 }
998#endif /* CONFIG_VSX */
929 default: 999 default:
1000 r = -EINVAL;
930 break; 1001 break;
931 } 1002 }
932 1003
@@ -1020,8 +1091,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1020#endif 1091#endif
1021 ulong ext_msr; 1092 ulong ext_msr;
1022 1093
1023 preempt_disable();
1024
1025 /* Check if we can run the vcpu at all */ 1094 /* Check if we can run the vcpu at all */
1026 if (!vcpu->arch.sane) { 1095 if (!vcpu->arch.sane) {
1027 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1096 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -1029,21 +1098,16 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1029 goto out; 1098 goto out;
1030 } 1099 }
1031 1100
1032 kvmppc_core_prepare_to_enter(vcpu);
1033
1034 /* 1101 /*
1035 * Interrupts could be timers for the guest which we have to inject 1102 * Interrupts could be timers for the guest which we have to inject
1036 * again, so let's postpone them until we're in the guest and if we 1103 * again, so let's postpone them until we're in the guest and if we
1037 * really did time things so badly, then we just exit again due to 1104 * really did time things so badly, then we just exit again due to
1038 * a host external interrupt. 1105 * a host external interrupt.
1039 */ 1106 */
1040 __hard_irq_disable(); 1107 local_irq_disable();
1041 1108 ret = kvmppc_prepare_to_enter(vcpu);
1042 /* No need to go into the guest when all we do is going out */ 1109 if (ret <= 0) {
1043 if (signal_pending(current)) { 1110 local_irq_enable();
1044 __hard_irq_enable();
1045 kvm_run->exit_reason = KVM_EXIT_INTR;
1046 ret = -EINTR;
1047 goto out; 1111 goto out;
1048 } 1112 }
1049 1113
@@ -1080,11 +1144,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1080 if (vcpu->arch.shared->msr & MSR_FP) 1144 if (vcpu->arch.shared->msr & MSR_FP)
1081 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1145 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1082 1146
1083 kvm_guest_enter(); 1147 kvmppc_lazy_ee_enable();
1084 1148
1085 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 1149 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1086 1150
1087 kvm_guest_exit(); 1151 /* No need for kvm_guest_exit. It's done in handle_exit.
1152 We also get here with interrupts enabled. */
1088 1153
1089 current->thread.regs->msr = ext_msr; 1154 current->thread.regs->msr = ext_msr;
1090 1155
@@ -1113,7 +1178,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1113#endif 1178#endif
1114 1179
1115out: 1180out:
1116 preempt_enable(); 1181 vcpu->mode = OUTSIDE_GUEST_MODE;
1117 return ret; 1182 return ret;
1118} 1183}
1119 1184
@@ -1181,14 +1246,31 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1181} 1246}
1182#endif /* CONFIG_PPC64 */ 1247#endif /* CONFIG_PPC64 */
1183 1248
1249void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1250 struct kvm_memory_slot *dont)
1251{
1252}
1253
1254int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1255 unsigned long npages)
1256{
1257 return 0;
1258}
1259
1184int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1260int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1261 struct kvm_memory_slot *memslot,
1185 struct kvm_userspace_memory_region *mem) 1262 struct kvm_userspace_memory_region *mem)
1186{ 1263{
1187 return 0; 1264 return 0;
1188} 1265}
1189 1266
1190void kvmppc_core_commit_memory_region(struct kvm *kvm, 1267void kvmppc_core_commit_memory_region(struct kvm *kvm,
1191 struct kvm_userspace_memory_region *mem) 1268 struct kvm_userspace_memory_region *mem,
1269 struct kvm_memory_slot old)
1270{
1271}
1272
1273void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1192{ 1274{
1193} 1275}
1194 1276
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9ecf6e35cd8d..b2f8258b545a 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -170,20 +170,21 @@ kvmppc_handler_skip_ins:
170 * Call kvmppc_handler_trampoline_enter in real mode 170 * Call kvmppc_handler_trampoline_enter in real mode
171 * 171 *
172 * On entry, r4 contains the guest shadow MSR 172 * On entry, r4 contains the guest shadow MSR
173 * MSR.EE has to be 0 when calling this function
173 */ 174 */
174_GLOBAL(kvmppc_entry_trampoline) 175_GLOBAL(kvmppc_entry_trampoline)
175 mfmsr r5 176 mfmsr r5
176 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) 177 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
177 toreal(r7) 178 toreal(r7)
178 179
179 li r9, MSR_RI
180 ori r9, r9, MSR_EE
181 andc r9, r5, r9 /* Clear EE and RI in MSR value */
182 li r6, MSR_IR | MSR_DR 180 li r6, MSR_IR | MSR_DR
183 ori r6, r6, MSR_EE 181 andc r6, r5, r6 /* Clear DR and IR in MSR value */
184 andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */ 182 /*
185 MTMSR_EERI(r9) /* Clear EE and RI in MSR */ 183 * Set EE in HOST_MSR so that it's enabled when we get into our
186 mtsrr0 r7 /* before we set srr0/1 */ 184 * C exit handler function
185 */
186 ori r5, r5, MSR_EE
187 mtsrr0 r7
187 mtsrr1 r6 188 mtsrr1 r6
188 RFI 189 RFI
189 190
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d25a097c852b..3d1f35dc7862 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -39,6 +39,7 @@
39 39
40#include "timing.h" 40#include "timing.h"
41#include "booke.h" 41#include "booke.h"
42#include "trace.h"
42 43
43unsigned long kvmppc_booke_handlers; 44unsigned long kvmppc_booke_handlers;
44 45
@@ -62,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 63 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "doorbell", VCPU_STAT(dbell_exits) }, 64 { "doorbell", VCPU_STAT(dbell_exits) },
64 { "guest doorbell", VCPU_STAT(gdbell_exits) }, 65 { "guest doorbell", VCPU_STAT(gdbell_exits) },
66 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
65 { NULL } 67 { NULL }
66}; 68};
67 69
@@ -120,6 +122,16 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
120} 122}
121#endif 123#endif
122 124
125static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
126{
127#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
128 /* We always treat the FP bit as enabled from the host
129 perspective, so only need to adjust the shadow MSR */
130 vcpu->arch.shadow_msr &= ~MSR_FP;
131 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
132#endif
133}
134
123/* 135/*
124 * Helper function for "full" MSR writes. No need to call this if only 136 * Helper function for "full" MSR writes. No need to call this if only
125 * EE/CE/ME/DE/RI are changing. 137 * EE/CE/ME/DE/RI are changing.
@@ -136,11 +148,13 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
136 148
137 kvmppc_mmu_msr_notify(vcpu, old_msr); 149 kvmppc_mmu_msr_notify(vcpu, old_msr);
138 kvmppc_vcpu_sync_spe(vcpu); 150 kvmppc_vcpu_sync_spe(vcpu);
151 kvmppc_vcpu_sync_fpu(vcpu);
139} 152}
140 153
141static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 154static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
142 unsigned int priority) 155 unsigned int priority)
143{ 156{
157 trace_kvm_booke_queue_irqprio(vcpu, priority);
144 set_bit(priority, &vcpu->arch.pending_exceptions); 158 set_bit(priority, &vcpu->arch.pending_exceptions);
145} 159}
146 160
@@ -206,6 +220,16 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
206 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 220 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
207} 221}
208 222
223static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
224{
225 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
226}
227
228static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
229{
230 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
231}
232
209static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 233static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
210{ 234{
211#ifdef CONFIG_KVM_BOOKE_HV 235#ifdef CONFIG_KVM_BOOKE_HV
@@ -325,6 +349,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
325 msr_mask = MSR_CE | MSR_ME | MSR_DE; 349 msr_mask = MSR_CE | MSR_ME | MSR_DE;
326 int_class = INT_CLASS_NONCRIT; 350 int_class = INT_CLASS_NONCRIT;
327 break; 351 break;
352 case BOOKE_IRQPRIO_WATCHDOG:
328 case BOOKE_IRQPRIO_CRITICAL: 353 case BOOKE_IRQPRIO_CRITICAL:
329 case BOOKE_IRQPRIO_DBELL_CRIT: 354 case BOOKE_IRQPRIO_DBELL_CRIT:
330 allowed = vcpu->arch.shared->msr & MSR_CE; 355 allowed = vcpu->arch.shared->msr & MSR_CE;
@@ -404,12 +429,121 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
404 return allowed; 429 return allowed;
405} 430}
406 431
432/*
433 * Return the number of jiffies until the next timeout. If the timeout is
434 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
435 * because the larger value can break the timer APIs.
436 */
437static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
438{
439 u64 tb, wdt_tb, wdt_ticks = 0;
440 u64 nr_jiffies = 0;
441 u32 period = TCR_GET_WP(vcpu->arch.tcr);
442
443 wdt_tb = 1ULL << (63 - period);
444 tb = get_tb();
445 /*
446 * The watchdog timeout will hapeen when TB bit corresponding
447 * to watchdog will toggle from 0 to 1.
448 */
449 if (tb & wdt_tb)
450 wdt_ticks = wdt_tb;
451
452 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
453
454 /* Convert timebase ticks to jiffies */
455 nr_jiffies = wdt_ticks;
456
457 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
458 nr_jiffies++;
459
460 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
461}
462
463static void arm_next_watchdog(struct kvm_vcpu *vcpu)
464{
465 unsigned long nr_jiffies;
466 unsigned long flags;
467
468 /*
469 * If TSR_ENW and TSR_WIS are not set then no need to exit to
470 * userspace, so clear the KVM_REQ_WATCHDOG request.
471 */
472 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
473 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
474
475 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
476 nr_jiffies = watchdog_next_timeout(vcpu);
477 /*
478 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
479 * then do not run the watchdog timer as this can break timer APIs.
480 */
481 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
482 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
483 else
484 del_timer(&vcpu->arch.wdt_timer);
485 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
486}
487
488void kvmppc_watchdog_func(unsigned long data)
489{
490 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
491 u32 tsr, new_tsr;
492 int final;
493
494 do {
495 new_tsr = tsr = vcpu->arch.tsr;
496 final = 0;
497
498 /* Time out event */
499 if (tsr & TSR_ENW) {
500 if (tsr & TSR_WIS)
501 final = 1;
502 else
503 new_tsr = tsr | TSR_WIS;
504 } else {
505 new_tsr = tsr | TSR_ENW;
506 }
507 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
508
509 if (new_tsr & TSR_WIS) {
510 smp_wmb();
511 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
512 kvm_vcpu_kick(vcpu);
513 }
514
515 /*
516 * If this is final watchdog expiry and some action is required
517 * then exit to userspace.
518 */
519 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
520 vcpu->arch.watchdog_enabled) {
521 smp_wmb();
522 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
523 kvm_vcpu_kick(vcpu);
524 }
525
526 /*
527 * Stop running the watchdog timer after final expiration to
528 * prevent the host from being flooded with timers if the
529 * guest sets a short period.
530 * Timers will resume when TSR/TCR is updated next time.
531 */
532 if (!final)
533 arm_next_watchdog(vcpu);
534}
535
407static void update_timer_ints(struct kvm_vcpu *vcpu) 536static void update_timer_ints(struct kvm_vcpu *vcpu)
408{ 537{
409 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) 538 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
410 kvmppc_core_queue_dec(vcpu); 539 kvmppc_core_queue_dec(vcpu);
411 else 540 else
412 kvmppc_core_dequeue_dec(vcpu); 541 kvmppc_core_dequeue_dec(vcpu);
542
543 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
544 kvmppc_core_queue_watchdog(vcpu);
545 else
546 kvmppc_core_dequeue_watchdog(vcpu);
413} 547}
414 548
415static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) 549static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
@@ -417,13 +551,6 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
417 unsigned long *pending = &vcpu->arch.pending_exceptions; 551 unsigned long *pending = &vcpu->arch.pending_exceptions;
418 unsigned int priority; 552 unsigned int priority;
419 553
420 if (vcpu->requests) {
421 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
422 smp_mb();
423 update_timer_ints(vcpu);
424 }
425 }
426
427 priority = __ffs(*pending); 554 priority = __ffs(*pending);
428 while (priority < BOOKE_IRQPRIO_MAX) { 555 while (priority < BOOKE_IRQPRIO_MAX) {
429 if (kvmppc_booke_irqprio_deliver(vcpu, priority)) 556 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -459,37 +586,20 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
459 return r; 586 return r;
460} 587}
461 588
462/* 589int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
463 * Common checks before entering the guest world. Call with interrupts
464 * disabled.
465 *
466 * returns !0 if a signal is pending and check_signal is true
467 */
468static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
469{ 590{
470 int r = 0; 591 int r = 1; /* Indicate we want to get back into the guest */
471
472 WARN_ON_ONCE(!irqs_disabled());
473 while (true) {
474 if (need_resched()) {
475 local_irq_enable();
476 cond_resched();
477 local_irq_disable();
478 continue;
479 }
480
481 if (signal_pending(current)) {
482 r = 1;
483 break;
484 }
485 592
486 if (kvmppc_core_prepare_to_enter(vcpu)) { 593 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
487 /* interrupts got enabled in between, so we 594 update_timer_ints(vcpu);
488 are back at square 1 */ 595#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
489 continue; 596 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
490 } 597 kvmppc_core_flush_tlb(vcpu);
598#endif
491 599
492 break; 600 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
601 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
602 r = 0;
493 } 603 }
494 604
495 return r; 605 return r;
@@ -497,7 +607,7 @@ static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
497 607
498int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 608int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
499{ 609{
500 int ret; 610 int ret, s;
501#ifdef CONFIG_PPC_FPU 611#ifdef CONFIG_PPC_FPU
502 unsigned int fpscr; 612 unsigned int fpscr;
503 int fpexc_mode; 613 int fpexc_mode;
@@ -510,11 +620,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
510 } 620 }
511 621
512 local_irq_disable(); 622 local_irq_disable();
513 if (kvmppc_prepare_to_enter(vcpu)) { 623 s = kvmppc_prepare_to_enter(vcpu);
514 kvm_run->exit_reason = KVM_EXIT_INTR; 624 if (s <= 0) {
515 ret = -EINTR; 625 local_irq_enable();
626 ret = s;
516 goto out; 627 goto out;
517 } 628 }
629 kvmppc_lazy_ee_enable();
518 630
519 kvm_guest_enter(); 631 kvm_guest_enter();
520 632
@@ -542,6 +654,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
542 654
543 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 655 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
544 656
657 /* No need for kvm_guest_exit. It's done in handle_exit.
658 We also get here with interrupts enabled. */
659
545#ifdef CONFIG_PPC_FPU 660#ifdef CONFIG_PPC_FPU
546 kvmppc_save_guest_fp(vcpu); 661 kvmppc_save_guest_fp(vcpu);
547 662
@@ -557,10 +672,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
557 current->thread.fpexc_mode = fpexc_mode; 672 current->thread.fpexc_mode = fpexc_mode;
558#endif 673#endif
559 674
560 kvm_guest_exit();
561
562out: 675out:
563 local_irq_enable(); 676 vcpu->mode = OUTSIDE_GUEST_MODE;
564 return ret; 677 return ret;
565} 678}
566 679
@@ -668,6 +781,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
668 unsigned int exit_nr) 781 unsigned int exit_nr)
669{ 782{
670 int r = RESUME_HOST; 783 int r = RESUME_HOST;
784 int s;
671 785
672 /* update before a new last_exit_type is rewritten */ 786 /* update before a new last_exit_type is rewritten */
673 kvmppc_update_timing_stats(vcpu); 787 kvmppc_update_timing_stats(vcpu);
@@ -677,6 +791,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
677 791
678 local_irq_enable(); 792 local_irq_enable();
679 793
794 trace_kvm_exit(exit_nr, vcpu);
795 kvm_guest_exit();
796
680 run->exit_reason = KVM_EXIT_UNKNOWN; 797 run->exit_reason = KVM_EXIT_UNKNOWN;
681 run->ready_for_interrupt_injection = 1; 798 run->ready_for_interrupt_injection = 1;
682 799
@@ -971,10 +1088,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
971 */ 1088 */
972 if (!(r & RESUME_HOST)) { 1089 if (!(r & RESUME_HOST)) {
973 local_irq_disable(); 1090 local_irq_disable();
974 if (kvmppc_prepare_to_enter(vcpu)) { 1091 s = kvmppc_prepare_to_enter(vcpu);
975 run->exit_reason = KVM_EXIT_INTR; 1092 if (s <= 0) {
976 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 1093 local_irq_enable();
977 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 1094 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1095 } else {
1096 kvmppc_lazy_ee_enable();
978 } 1097 }
979 } 1098 }
980 1099
@@ -1011,6 +1130,21 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1011 return r; 1130 return r;
1012} 1131}
1013 1132
1133int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1134{
1135 /* setup watchdog timer once */
1136 spin_lock_init(&vcpu->arch.wdt_lock);
1137 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1138 (unsigned long)vcpu);
1139
1140 return 0;
1141}
1142
1143void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1144{
1145 del_timer_sync(&vcpu->arch.wdt_timer);
1146}
1147
1014int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1148int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1015{ 1149{
1016 int i; 1150 int i;
@@ -1106,7 +1240,13 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
1106 } 1240 }
1107 1241
1108 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { 1242 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
1243 u32 old_tsr = vcpu->arch.tsr;
1244
1109 vcpu->arch.tsr = sregs->u.e.tsr; 1245 vcpu->arch.tsr = sregs->u.e.tsr;
1246
1247 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1248 arm_next_watchdog(vcpu);
1249
1110 update_timer_ints(vcpu); 1250 update_timer_ints(vcpu);
1111 } 1251 }
1112 1252
@@ -1221,12 +1361,56 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1221 1361
1222int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1362int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1223{ 1363{
1224 return -EINVAL; 1364 int r = -EINVAL;
1365
1366 switch (reg->id) {
1367 case KVM_REG_PPC_IAC1:
1368 case KVM_REG_PPC_IAC2:
1369 case KVM_REG_PPC_IAC3:
1370 case KVM_REG_PPC_IAC4: {
1371 int iac = reg->id - KVM_REG_PPC_IAC1;
1372 r = copy_to_user((u64 __user *)(long)reg->addr,
1373 &vcpu->arch.dbg_reg.iac[iac], sizeof(u64));
1374 break;
1375 }
1376 case KVM_REG_PPC_DAC1:
1377 case KVM_REG_PPC_DAC2: {
1378 int dac = reg->id - KVM_REG_PPC_DAC1;
1379 r = copy_to_user((u64 __user *)(long)reg->addr,
1380 &vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
1381 break;
1382 }
1383 default:
1384 break;
1385 }
1386 return r;
1225} 1387}
1226 1388
1227int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1389int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1228{ 1390{
1229 return -EINVAL; 1391 int r = -EINVAL;
1392
1393 switch (reg->id) {
1394 case KVM_REG_PPC_IAC1:
1395 case KVM_REG_PPC_IAC2:
1396 case KVM_REG_PPC_IAC3:
1397 case KVM_REG_PPC_IAC4: {
1398 int iac = reg->id - KVM_REG_PPC_IAC1;
1399 r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac],
1400 (u64 __user *)(long)reg->addr, sizeof(u64));
1401 break;
1402 }
1403 case KVM_REG_PPC_DAC1:
1404 case KVM_REG_PPC_DAC2: {
1405 int dac = reg->id - KVM_REG_PPC_DAC1;
1406 r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac],
1407 (u64 __user *)(long)reg->addr, sizeof(u64));
1408 break;
1409 }
1410 default:
1411 break;
1412 }
1413 return r;
1230} 1414}
1231 1415
1232int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1416int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
@@ -1253,20 +1437,38 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1253 return -ENOTSUPP; 1437 return -ENOTSUPP;
1254} 1438}
1255 1439
1440void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1441 struct kvm_memory_slot *dont)
1442{
1443}
1444
1445int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1446 unsigned long npages)
1447{
1448 return 0;
1449}
1450
1256int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1451int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1452 struct kvm_memory_slot *memslot,
1257 struct kvm_userspace_memory_region *mem) 1453 struct kvm_userspace_memory_region *mem)
1258{ 1454{
1259 return 0; 1455 return 0;
1260} 1456}
1261 1457
1262void kvmppc_core_commit_memory_region(struct kvm *kvm, 1458void kvmppc_core_commit_memory_region(struct kvm *kvm,
1263 struct kvm_userspace_memory_region *mem) 1459 struct kvm_userspace_memory_region *mem,
1460 struct kvm_memory_slot old)
1461{
1462}
1463
1464void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1264{ 1465{
1265} 1466}
1266 1467
1267void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) 1468void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1268{ 1469{
1269 vcpu->arch.tcr = new_tcr; 1470 vcpu->arch.tcr = new_tcr;
1471 arm_next_watchdog(vcpu);
1270 update_timer_ints(vcpu); 1472 update_timer_ints(vcpu);
1271} 1473}
1272 1474
@@ -1281,6 +1483,14 @@ void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1281void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) 1483void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1282{ 1484{
1283 clear_bits(tsr_bits, &vcpu->arch.tsr); 1485 clear_bits(tsr_bits, &vcpu->arch.tsr);
1486
1487 /*
1488 * We may have stopped the watchdog due to
1489 * being stuck on final expiration.
1490 */
1491 if (tsr_bits & (TSR_ENW | TSR_WIS))
1492 arm_next_watchdog(vcpu);
1493
1284 update_timer_ints(vcpu); 1494 update_timer_ints(vcpu);
1285} 1495}
1286 1496
@@ -1298,12 +1508,14 @@ void kvmppc_decrementer_func(unsigned long data)
1298 1508
1299void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1509void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1300{ 1510{
1511 vcpu->cpu = smp_processor_id();
1301 current->thread.kvm_vcpu = vcpu; 1512 current->thread.kvm_vcpu = vcpu;
1302} 1513}
1303 1514
1304void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) 1515void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1305{ 1516{
1306 current->thread.kvm_vcpu = NULL; 1517 current->thread.kvm_vcpu = NULL;
1518 vcpu->cpu = -1;
1307} 1519}
1308 1520
1309int __init kvmppc_booke_init(void) 1521int __init kvmppc_booke_init(void)
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 12834bb608ab..514790f41aba 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -133,10 +133,10 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
133 vcpu->arch.csrr1 = spr_val; 133 vcpu->arch.csrr1 = spr_val;
134 break; 134 break;
135 case SPRN_DBCR0: 135 case SPRN_DBCR0:
136 vcpu->arch.dbcr0 = spr_val; 136 vcpu->arch.dbg_reg.dbcr0 = spr_val;
137 break; 137 break;
138 case SPRN_DBCR1: 138 case SPRN_DBCR1:
139 vcpu->arch.dbcr1 = spr_val; 139 vcpu->arch.dbg_reg.dbcr1 = spr_val;
140 break; 140 break;
141 case SPRN_DBSR: 141 case SPRN_DBSR:
142 vcpu->arch.dbsr &= ~spr_val; 142 vcpu->arch.dbsr &= ~spr_val;
@@ -145,6 +145,14 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
145 kvmppc_clr_tsr_bits(vcpu, spr_val); 145 kvmppc_clr_tsr_bits(vcpu, spr_val);
146 break; 146 break;
147 case SPRN_TCR: 147 case SPRN_TCR:
148 /*
149 * WRC is a 2-bit field that is supposed to preserve its
150 * value once written to non-zero.
151 */
152 if (vcpu->arch.tcr & TCR_WRC_MASK) {
153 spr_val &= ~TCR_WRC_MASK;
154 spr_val |= vcpu->arch.tcr & TCR_WRC_MASK;
155 }
148 kvmppc_set_tcr(vcpu, spr_val); 156 kvmppc_set_tcr(vcpu, spr_val);
149 break; 157 break;
150 158
@@ -229,6 +237,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
229 case SPRN_IVOR15: 237 case SPRN_IVOR15:
230 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; 238 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
231 break; 239 break;
240 case SPRN_MCSR:
241 vcpu->arch.mcsr &= ~spr_val;
242 break;
232 243
233 default: 244 default:
234 emulated = EMULATE_FAIL; 245 emulated = EMULATE_FAIL;
@@ -258,10 +269,10 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
258 *spr_val = vcpu->arch.csrr1; 269 *spr_val = vcpu->arch.csrr1;
259 break; 270 break;
260 case SPRN_DBCR0: 271 case SPRN_DBCR0:
261 *spr_val = vcpu->arch.dbcr0; 272 *spr_val = vcpu->arch.dbg_reg.dbcr0;
262 break; 273 break;
263 case SPRN_DBCR1: 274 case SPRN_DBCR1:
264 *spr_val = vcpu->arch.dbcr1; 275 *spr_val = vcpu->arch.dbg_reg.dbcr1;
265 break; 276 break;
266 case SPRN_DBSR: 277 case SPRN_DBSR:
267 *spr_val = vcpu->arch.dbsr; 278 *spr_val = vcpu->arch.dbsr;
@@ -321,6 +332,9 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
321 case SPRN_IVOR15: 332 case SPRN_IVOR15:
322 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; 333 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
323 break; 334 break;
335 case SPRN_MCSR:
336 *spr_val = vcpu->arch.mcsr;
337 break;
324 338
325 default: 339 default:
326 emulated = EMULATE_FAIL; 340 emulated = EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index aa8b81428bf4..d1622864549e 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -27,8 +27,7 @@
27#define E500_TLB_NUM 2 27#define E500_TLB_NUM 2
28 28
29#define E500_TLB_VALID 1 29#define E500_TLB_VALID 1
30#define E500_TLB_DIRTY 2 30#define E500_TLB_BITMAP 2
31#define E500_TLB_BITMAP 4
32 31
33struct tlbe_ref { 32struct tlbe_ref {
34 pfn_t pfn; 33 pfn_t pfn;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index ff38b664195d..c73389477d17 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -304,17 +304,13 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
304 ref->flags = E500_TLB_VALID; 304 ref->flags = E500_TLB_VALID;
305 305
306 if (tlbe_is_writable(gtlbe)) 306 if (tlbe_is_writable(gtlbe))
307 ref->flags |= E500_TLB_DIRTY; 307 kvm_set_pfn_dirty(pfn);
308} 308}
309 309
310static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) 310static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
311{ 311{
312 if (ref->flags & E500_TLB_VALID) { 312 if (ref->flags & E500_TLB_VALID) {
313 if (ref->flags & E500_TLB_DIRTY) 313 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
314 kvm_release_pfn_dirty(ref->pfn);
315 else
316 kvm_release_pfn_clean(ref->pfn);
317
318 ref->flags = 0; 314 ref->flags = 0;
319 } 315 }
320} 316}
@@ -357,6 +353,13 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
357 clear_tlb_privs(vcpu_e500); 353 clear_tlb_privs(vcpu_e500);
358} 354}
359 355
356void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
357{
358 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
359 clear_tlb_refs(vcpu_e500);
360 clear_tlb1_bitmap(vcpu_e500);
361}
362
360static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, 363static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
361 unsigned int eaddr, int as) 364 unsigned int eaddr, int as)
362{ 365{
@@ -541,6 +544,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
541 544
542 /* Clear i-cache for new pages */ 545 /* Clear i-cache for new pages */
543 kvmppc_mmu_flush_icache(pfn); 546 kvmppc_mmu_flush_icache(pfn);
547
548 /* Drop refcount on page, so that mmu notifiers can clear it */
549 kvm_release_pfn_clean(pfn);
544} 550}
545 551
546/* XXX only map the one-one case, for now use TLB0 */ 552/* XXX only map the one-one case, for now use TLB0 */
@@ -1039,8 +1045,12 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1039 sesel = 0; /* unused */ 1045 sesel = 0; /* unused */
1040 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 1046 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
1041 1047
1042 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, 1048 /* Only triggers after clear_tlb_refs */
1043 &priv->ref, eaddr, &stlbe); 1049 if (unlikely(!(priv->ref.flags & E500_TLB_VALID)))
1050 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
1051 else
1052 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
1053 &priv->ref, eaddr, &stlbe);
1044 break; 1054 break;
1045 1055
1046 case 1: { 1056 case 1: {
@@ -1060,6 +1070,49 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1060 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); 1070 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
1061} 1071}
1062 1072
1073/************* MMU Notifiers *************/
1074
1075int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1076{
1077 trace_kvm_unmap_hva(hva);
1078
1079 /*
1080 * Flush all shadow tlb entries everywhere. This is slow, but
1081 * we are 100% sure that we catch the to be unmapped page
1082 */
1083 kvm_flush_remote_tlbs(kvm);
1084
1085 return 0;
1086}
1087
1088int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
1089{
1090 /* kvm_unmap_hva flushes everything anyways */
1091 kvm_unmap_hva(kvm, start);
1092
1093 return 0;
1094}
1095
1096int kvm_age_hva(struct kvm *kvm, unsigned long hva)
1097{
1098 /* XXX could be more clever ;) */
1099 return 0;
1100}
1101
1102int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1103{
1104 /* XXX could be more clever ;) */
1105 return 0;
1106}
1107
1108void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1109{
1110 /* The page will get remapped properly on its next fault */
1111 kvm_unmap_hva(kvm, hva);
1112}
1113
1114/*****************************************/
1115
1063static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) 1116static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
1064{ 1117{
1065 int i; 1118 int i;
@@ -1081,6 +1134,8 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
1081 } 1134 }
1082 1135
1083 vcpu_e500->num_shared_tlb_pages = 0; 1136 vcpu_e500->num_shared_tlb_pages = 0;
1137
1138 kfree(vcpu_e500->shared_tlb_pages);
1084 vcpu_e500->shared_tlb_pages = NULL; 1139 vcpu_e500->shared_tlb_pages = NULL;
1085 } else { 1140 } else {
1086 kfree(vcpu_e500->gtlb_arch); 1141 kfree(vcpu_e500->gtlb_arch);
@@ -1178,21 +1233,27 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1178 } 1233 }
1179 1234
1180 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); 1235 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
1181 if (!virt) 1236 if (!virt) {
1237 ret = -ENOMEM;
1182 goto err_put_page; 1238 goto err_put_page;
1239 }
1183 1240
1184 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0], 1241 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
1185 GFP_KERNEL); 1242 GFP_KERNEL);
1186 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1], 1243 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
1187 GFP_KERNEL); 1244 GFP_KERNEL);
1188 1245
1189 if (!privs[0] || !privs[1]) 1246 if (!privs[0] || !privs[1]) {
1190 goto err_put_page; 1247 ret = -ENOMEM;
1248 goto err_privs;
1249 }
1191 1250
1192 g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1], 1251 g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
1193 GFP_KERNEL); 1252 GFP_KERNEL);
1194 if (!g2h_bitmap) 1253 if (!g2h_bitmap) {
1195 goto err_put_page; 1254 ret = -ENOMEM;
1255 goto err_privs;
1256 }
1196 1257
1197 free_gtlb(vcpu_e500); 1258 free_gtlb(vcpu_e500);
1198 1259
@@ -1232,10 +1293,11 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1232 kvmppc_recalc_tlb1map_range(vcpu_e500); 1293 kvmppc_recalc_tlb1map_range(vcpu_e500);
1233 return 0; 1294 return 0;
1234 1295
1235err_put_page: 1296err_privs:
1236 kfree(privs[0]); 1297 kfree(privs[0]);
1237 kfree(privs[1]); 1298 kfree(privs[1]);
1238 1299
1300err_put_page:
1239 for (i = 0; i < num_pages; i++) 1301 for (i = 0; i < num_pages; i++)
1240 put_page(pages[i]); 1302 put_page(pages[i]);
1241 1303
@@ -1332,7 +1394,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1332 if (!vcpu_e500->gtlb_priv[1]) 1394 if (!vcpu_e500->gtlb_priv[1])
1333 goto err; 1395 goto err;
1334 1396
1335 vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) * 1397 vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
1336 vcpu_e500->gtlb_params[1].entries, 1398 vcpu_e500->gtlb_params[1].entries,
1337 GFP_KERNEL); 1399 GFP_KERNEL);
1338 if (!vcpu_e500->g2h_tlb1_map) 1400 if (!vcpu_e500->g2h_tlb1_map)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 4d213b8b0fb5..deb0d596d815 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -30,6 +30,7 @@
30#include <asm/kvm_ppc.h> 30#include <asm/kvm_ppc.h>
31#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
32#include <asm/cputhreads.h> 32#include <asm/cputhreads.h>
33#include <asm/irqflags.h>
33#include "timing.h" 34#include "timing.h"
34#include "../mm/mmu_decl.h" 35#include "../mm/mmu_decl.h"
35 36
@@ -38,8 +39,7 @@
38 39
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 40int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{ 41{
41 return !(v->arch.shared->msr & MSR_WE) || 42 return !!(v->arch.pending_exceptions) ||
42 !!(v->arch.pending_exceptions) ||
43 v->requests; 43 v->requests;
44} 44}
45 45
@@ -48,6 +48,85 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
48 return 1; 48 return 1;
49} 49}
50 50
51#ifndef CONFIG_KVM_BOOK3S_64_HV
52/*
53 * Common checks before entering the guest world. Call with interrupts
54 * disabled.
55 *
56 * returns:
57 *
58 * == 1 if we're ready to go into guest state
59 * <= 0 if we need to go back to the host with return value
60 */
61int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
62{
63 int r = 1;
64
65 WARN_ON_ONCE(!irqs_disabled());
66 while (true) {
67 if (need_resched()) {
68 local_irq_enable();
69 cond_resched();
70 local_irq_disable();
71 continue;
72 }
73
74 if (signal_pending(current)) {
75 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
76 vcpu->run->exit_reason = KVM_EXIT_INTR;
77 r = -EINTR;
78 break;
79 }
80
81 vcpu->mode = IN_GUEST_MODE;
82
83 /*
84 * Reading vcpu->requests must happen after setting vcpu->mode,
85 * so we don't miss a request because the requester sees
86 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
87 * before next entering the guest (and thus doesn't IPI).
88 */
89 smp_mb();
90
91 if (vcpu->requests) {
92 /* Make sure we process requests preemptable */
93 local_irq_enable();
94 trace_kvm_check_requests(vcpu);
95 r = kvmppc_core_check_requests(vcpu);
96 local_irq_disable();
97 if (r > 0)
98 continue;
99 break;
100 }
101
102 if (kvmppc_core_prepare_to_enter(vcpu)) {
103 /* interrupts got enabled in between, so we
104 are back at square 1 */
105 continue;
106 }
107
108#ifdef CONFIG_PPC64
109 /* lazy EE magic */
110 hard_irq_disable();
111 if (lazy_irq_pending()) {
112 /* Got an interrupt in between, try again */
113 local_irq_enable();
114 local_irq_disable();
115 kvm_guest_exit();
116 continue;
117 }
118
119 trace_hardirqs_on();
120#endif
121
122 kvm_guest_enter();
123 break;
124 }
125
126 return r;
127}
128#endif /* CONFIG_KVM_BOOK3S_64_HV */
129
51int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 130int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
52{ 131{
53 int nr = kvmppc_get_gpr(vcpu, 11); 132 int nr = kvmppc_get_gpr(vcpu, 11);
@@ -67,18 +146,18 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
67 } 146 }
68 147
69 switch (nr) { 148 switch (nr) {
70 case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE: 149 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
71 { 150 {
72 vcpu->arch.magic_page_pa = param1; 151 vcpu->arch.magic_page_pa = param1;
73 vcpu->arch.magic_page_ea = param2; 152 vcpu->arch.magic_page_ea = param2;
74 153
75 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 154 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
76 155
77 r = HC_EV_SUCCESS; 156 r = EV_SUCCESS;
78 break; 157 break;
79 } 158 }
80 case HC_VENDOR_KVM | KVM_HC_FEATURES: 159 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
81 r = HC_EV_SUCCESS; 160 r = EV_SUCCESS;
82#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 161#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
83 /* XXX Missing magic page on 44x */ 162 /* XXX Missing magic page on 44x */
84 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 163 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
@@ -86,8 +165,13 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
86 165
87 /* Second return value is in r4 */ 166 /* Second return value is in r4 */
88 break; 167 break;
168 case EV_HCALL_TOKEN(EV_IDLE):
169 r = EV_SUCCESS;
170 kvm_vcpu_block(vcpu);
171 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
172 break;
89 default: 173 default:
90 r = HC_EV_UNIMPLEMENTED; 174 r = EV_UNIMPLEMENTED;
91 break; 175 break;
92 } 176 }
93 177
@@ -220,6 +304,7 @@ int kvm_dev_ioctl_check_extension(long ext)
220 switch (ext) { 304 switch (ext) {
221#ifdef CONFIG_BOOKE 305#ifdef CONFIG_BOOKE
222 case KVM_CAP_PPC_BOOKE_SREGS: 306 case KVM_CAP_PPC_BOOKE_SREGS:
307 case KVM_CAP_PPC_BOOKE_WATCHDOG:
223#else 308#else
224 case KVM_CAP_PPC_SEGSTATE: 309 case KVM_CAP_PPC_SEGSTATE:
225 case KVM_CAP_PPC_HIOR: 310 case KVM_CAP_PPC_HIOR:
@@ -260,10 +345,16 @@ int kvm_dev_ioctl_check_extension(long ext)
260 if (cpu_has_feature(CPU_FTR_ARCH_201)) 345 if (cpu_has_feature(CPU_FTR_ARCH_201))
261 r = 2; 346 r = 2;
262 break; 347 break;
348#endif
263 case KVM_CAP_SYNC_MMU: 349 case KVM_CAP_SYNC_MMU:
350#ifdef CONFIG_KVM_BOOK3S_64_HV
264 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; 351 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
265 break; 352#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
353 r = 1;
354#else
355 r = 0;
266#endif 356#endif
357 break;
267 case KVM_CAP_NR_VCPUS: 358 case KVM_CAP_NR_VCPUS:
268 /* 359 /*
269 * Recommending a number of CPUs is somewhat arbitrary; we 360 * Recommending a number of CPUs is somewhat arbitrary; we
@@ -302,19 +393,12 @@ long kvm_arch_dev_ioctl(struct file *filp,
302void kvm_arch_free_memslot(struct kvm_memory_slot *free, 393void kvm_arch_free_memslot(struct kvm_memory_slot *free,
303 struct kvm_memory_slot *dont) 394 struct kvm_memory_slot *dont)
304{ 395{
305 if (!dont || free->arch.rmap != dont->arch.rmap) { 396 kvmppc_core_free_memslot(free, dont);
306 vfree(free->arch.rmap);
307 free->arch.rmap = NULL;
308 }
309} 397}
310 398
311int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 399int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
312{ 400{
313 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); 401 return kvmppc_core_create_memslot(slot, npages);
314 if (!slot->arch.rmap)
315 return -ENOMEM;
316
317 return 0;
318} 402}
319 403
320int kvm_arch_prepare_memory_region(struct kvm *kvm, 404int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -323,7 +407,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
323 struct kvm_userspace_memory_region *mem, 407 struct kvm_userspace_memory_region *mem,
324 int user_alloc) 408 int user_alloc)
325{ 409{
326 return kvmppc_core_prepare_memory_region(kvm, mem); 410 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
327} 411}
328 412
329void kvm_arch_commit_memory_region(struct kvm *kvm, 413void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -331,7 +415,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
331 struct kvm_memory_slot old, 415 struct kvm_memory_slot old,
332 int user_alloc) 416 int user_alloc)
333{ 417{
334 kvmppc_core_commit_memory_region(kvm, mem); 418 kvmppc_core_commit_memory_region(kvm, mem, old);
335} 419}
336 420
337void kvm_arch_flush_shadow_all(struct kvm *kvm) 421void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -341,6 +425,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
341void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 425void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
342 struct kvm_memory_slot *slot) 426 struct kvm_memory_slot *slot)
343{ 427{
428 kvmppc_core_flush_memslot(kvm, slot);
344} 429}
345 430
346struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 431struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
@@ -390,6 +475,8 @@ enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
390 475
391int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 476int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
392{ 477{
478 int ret;
479
393 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 480 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
394 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 481 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
395 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 482 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
@@ -398,13 +485,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
398#ifdef CONFIG_KVM_EXIT_TIMING 485#ifdef CONFIG_KVM_EXIT_TIMING
399 mutex_init(&vcpu->arch.exit_timing_lock); 486 mutex_init(&vcpu->arch.exit_timing_lock);
400#endif 487#endif
401 488 ret = kvmppc_subarch_vcpu_init(vcpu);
402 return 0; 489 return ret;
403} 490}
404 491
405void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 492void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
406{ 493{
407 kvmppc_mmu_destroy(vcpu); 494 kvmppc_mmu_destroy(vcpu);
495 kvmppc_subarch_vcpu_uninit(vcpu);
408} 496}
409 497
410void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 498void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -420,7 +508,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
420 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 508 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
421#endif 509#endif
422 kvmppc_core_vcpu_load(vcpu, cpu); 510 kvmppc_core_vcpu_load(vcpu, cpu);
423 vcpu->cpu = smp_processor_id();
424} 511}
425 512
426void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 513void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -429,7 +516,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
429#ifdef CONFIG_BOOKE 516#ifdef CONFIG_BOOKE
430 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 517 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
431#endif 518#endif
432 vcpu->cpu = -1;
433} 519}
434 520
435int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 521int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -649,6 +735,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
649 r = 0; 735 r = 0;
650 vcpu->arch.papr_enabled = true; 736 vcpu->arch.papr_enabled = true;
651 break; 737 break;
738#ifdef CONFIG_BOOKE
739 case KVM_CAP_PPC_BOOKE_WATCHDOG:
740 r = 0;
741 vcpu->arch.watchdog_enabled = true;
742 break;
743#endif
652#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 744#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
653 case KVM_CAP_SW_TLB: { 745 case KVM_CAP_SW_TLB: {
654 struct kvm_config_tlb cfg; 746 struct kvm_config_tlb cfg;
@@ -751,9 +843,16 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
751 843
752static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 844static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
753{ 845{
846 u32 inst_nop = 0x60000000;
847#ifdef CONFIG_KVM_BOOKE_HV
848 u32 inst_sc1 = 0x44000022;
849 pvinfo->hcall[0] = inst_sc1;
850 pvinfo->hcall[1] = inst_nop;
851 pvinfo->hcall[2] = inst_nop;
852 pvinfo->hcall[3] = inst_nop;
853#else
754 u32 inst_lis = 0x3c000000; 854 u32 inst_lis = 0x3c000000;
755 u32 inst_ori = 0x60000000; 855 u32 inst_ori = 0x60000000;
756 u32 inst_nop = 0x60000000;
757 u32 inst_sc = 0x44000002; 856 u32 inst_sc = 0x44000002;
758 u32 inst_imm_mask = 0xffff; 857 u32 inst_imm_mask = 0xffff;
759 858
@@ -770,6 +869,9 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
770 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); 869 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
771 pvinfo->hcall[2] = inst_sc; 870 pvinfo->hcall[2] = inst_sc;
772 pvinfo->hcall[3] = inst_nop; 871 pvinfo->hcall[3] = inst_nop;
872#endif
873
874 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
773 875
774 return 0; 876 return 0;
775} 877}
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 877186b7b1c3..519aba8bb3d3 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -31,6 +31,126 @@ TRACE_EVENT(kvm_ppc_instr,
31 __entry->inst, __entry->pc, __entry->emulate) 31 __entry->inst, __entry->pc, __entry->emulate)
32); 32);
33 33
34#ifdef CONFIG_PPC_BOOK3S
35#define kvm_trace_symbol_exit \
36 {0x100, "SYSTEM_RESET"}, \
37 {0x200, "MACHINE_CHECK"}, \
38 {0x300, "DATA_STORAGE"}, \
39 {0x380, "DATA_SEGMENT"}, \
40 {0x400, "INST_STORAGE"}, \
41 {0x480, "INST_SEGMENT"}, \
42 {0x500, "EXTERNAL"}, \
43 {0x501, "EXTERNAL_LEVEL"}, \
44 {0x502, "EXTERNAL_HV"}, \
45 {0x600, "ALIGNMENT"}, \
46 {0x700, "PROGRAM"}, \
47 {0x800, "FP_UNAVAIL"}, \
48 {0x900, "DECREMENTER"}, \
49 {0x980, "HV_DECREMENTER"}, \
50 {0xc00, "SYSCALL"}, \
51 {0xd00, "TRACE"}, \
52 {0xe00, "H_DATA_STORAGE"}, \
53 {0xe20, "H_INST_STORAGE"}, \
54 {0xe40, "H_EMUL_ASSIST"}, \
55 {0xf00, "PERFMON"}, \
56 {0xf20, "ALTIVEC"}, \
57 {0xf40, "VSX"}
58#else
59#define kvm_trace_symbol_exit \
60 {0, "CRITICAL"}, \
61 {1, "MACHINE_CHECK"}, \
62 {2, "DATA_STORAGE"}, \
63 {3, "INST_STORAGE"}, \
64 {4, "EXTERNAL"}, \
65 {5, "ALIGNMENT"}, \
66 {6, "PROGRAM"}, \
67 {7, "FP_UNAVAIL"}, \
68 {8, "SYSCALL"}, \
69 {9, "AP_UNAVAIL"}, \
70 {10, "DECREMENTER"}, \
71 {11, "FIT"}, \
72 {12, "WATCHDOG"}, \
73 {13, "DTLB_MISS"}, \
74 {14, "ITLB_MISS"}, \
75 {15, "DEBUG"}, \
76 {32, "SPE_UNAVAIL"}, \
77 {33, "SPE_FP_DATA"}, \
78 {34, "SPE_FP_ROUND"}, \
79 {35, "PERFORMANCE_MONITOR"}, \
80 {36, "DOORBELL"}, \
81 {37, "DOORBELL_CRITICAL"}, \
82 {38, "GUEST_DBELL"}, \
83 {39, "GUEST_DBELL_CRIT"}, \
84 {40, "HV_SYSCALL"}, \
85 {41, "HV_PRIV"}
86#endif
87
88TRACE_EVENT(kvm_exit,
89 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
90 TP_ARGS(exit_nr, vcpu),
91
92 TP_STRUCT__entry(
93 __field( unsigned int, exit_nr )
94 __field( unsigned long, pc )
95 __field( unsigned long, msr )
96 __field( unsigned long, dar )
97#ifdef CONFIG_KVM_BOOK3S_PR
98 __field( unsigned long, srr1 )
99#endif
100 __field( unsigned long, last_inst )
101 ),
102
103 TP_fast_assign(
104#ifdef CONFIG_KVM_BOOK3S_PR
105 struct kvmppc_book3s_shadow_vcpu *svcpu;
106#endif
107 __entry->exit_nr = exit_nr;
108 __entry->pc = kvmppc_get_pc(vcpu);
109 __entry->dar = kvmppc_get_fault_dar(vcpu);
110 __entry->msr = vcpu->arch.shared->msr;
111#ifdef CONFIG_KVM_BOOK3S_PR
112 svcpu = svcpu_get(vcpu);
113 __entry->srr1 = svcpu->shadow_srr1;
114 svcpu_put(svcpu);
115#endif
116 __entry->last_inst = vcpu->arch.last_inst;
117 ),
118
119 TP_printk("exit=%s"
120 " | pc=0x%lx"
121 " | msr=0x%lx"
122 " | dar=0x%lx"
123#ifdef CONFIG_KVM_BOOK3S_PR
124 " | srr1=0x%lx"
125#endif
126 " | last_inst=0x%lx"
127 ,
128 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
129 __entry->pc,
130 __entry->msr,
131 __entry->dar,
132#ifdef CONFIG_KVM_BOOK3S_PR
133 __entry->srr1,
134#endif
135 __entry->last_inst
136 )
137);
138
139TRACE_EVENT(kvm_unmap_hva,
140 TP_PROTO(unsigned long hva),
141 TP_ARGS(hva),
142
143 TP_STRUCT__entry(
144 __field( unsigned long, hva )
145 ),
146
147 TP_fast_assign(
148 __entry->hva = hva;
149 ),
150
151 TP_printk("unmap hva 0x%lx\n", __entry->hva)
152);
153
34TRACE_EVENT(kvm_stlb_inval, 154TRACE_EVENT(kvm_stlb_inval,
35 TP_PROTO(unsigned int stlb_index), 155 TP_PROTO(unsigned int stlb_index),
36 TP_ARGS(stlb_index), 156 TP_ARGS(stlb_index),
@@ -98,41 +218,31 @@ TRACE_EVENT(kvm_gtlb_write,
98 __entry->word1, __entry->word2) 218 __entry->word1, __entry->word2)
99); 219);
100 220
101 221TRACE_EVENT(kvm_check_requests,
102/************************************************************************* 222 TP_PROTO(struct kvm_vcpu *vcpu),
103 * Book3S trace points * 223 TP_ARGS(vcpu),
104 *************************************************************************/
105
106#ifdef CONFIG_KVM_BOOK3S_PR
107
108TRACE_EVENT(kvm_book3s_exit,
109 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
110 TP_ARGS(exit_nr, vcpu),
111 224
112 TP_STRUCT__entry( 225 TP_STRUCT__entry(
113 __field( unsigned int, exit_nr ) 226 __field( __u32, cpu_nr )
114 __field( unsigned long, pc ) 227 __field( __u32, requests )
115 __field( unsigned long, msr )
116 __field( unsigned long, dar )
117 __field( unsigned long, srr1 )
118 ), 228 ),
119 229
120 TP_fast_assign( 230 TP_fast_assign(
121 struct kvmppc_book3s_shadow_vcpu *svcpu; 231 __entry->cpu_nr = vcpu->vcpu_id;
122 __entry->exit_nr = exit_nr; 232 __entry->requests = vcpu->requests;
123 __entry->pc = kvmppc_get_pc(vcpu);
124 __entry->dar = kvmppc_get_fault_dar(vcpu);
125 __entry->msr = vcpu->arch.shared->msr;
126 svcpu = svcpu_get(vcpu);
127 __entry->srr1 = svcpu->shadow_srr1;
128 svcpu_put(svcpu);
129 ), 233 ),
130 234
131 TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx", 235 TP_printk("vcpu=%x requests=%x",
132 __entry->exit_nr, __entry->pc, __entry->msr, __entry->dar, 236 __entry->cpu_nr, __entry->requests)
133 __entry->srr1)
134); 237);
135 238
239
240/*************************************************************************
241 * Book3S trace points *
242 *************************************************************************/
243
244#ifdef CONFIG_KVM_BOOK3S_PR
245
136TRACE_EVENT(kvm_book3s_reenter, 246TRACE_EVENT(kvm_book3s_reenter,
137 TP_PROTO(int r, struct kvm_vcpu *vcpu), 247 TP_PROTO(int r, struct kvm_vcpu *vcpu),
138 TP_ARGS(r, vcpu), 248 TP_ARGS(r, vcpu),
@@ -395,6 +505,44 @@ TRACE_EVENT(kvm_booke206_gtlb_write,
395 __entry->mas2, __entry->mas7_3) 505 __entry->mas2, __entry->mas7_3)
396); 506);
397 507
508TRACE_EVENT(kvm_booke206_ref_release,
509 TP_PROTO(__u64 pfn, __u32 flags),
510 TP_ARGS(pfn, flags),
511
512 TP_STRUCT__entry(
513 __field( __u64, pfn )
514 __field( __u32, flags )
515 ),
516
517 TP_fast_assign(
518 __entry->pfn = pfn;
519 __entry->flags = flags;
520 ),
521
522 TP_printk("pfn=%llx flags=%x",
523 __entry->pfn, __entry->flags)
524);
525
526TRACE_EVENT(kvm_booke_queue_irqprio,
527 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
528 TP_ARGS(vcpu, priority),
529
530 TP_STRUCT__entry(
531 __field( __u32, cpu_nr )
532 __field( __u32, priority )
533 __field( unsigned long, pending )
534 ),
535
536 TP_fast_assign(
537 __entry->cpu_nr = vcpu->vcpu_id;
538 __entry->priority = priority;
539 __entry->pending = vcpu->arch.pending_exceptions;
540 ),
541
542 TP_printk("vcpu=%x prio=%x pending=%lx",
543 __entry->cpu_nr, __entry->priority, __entry->pending)
544);
545
398#endif 546#endif
399 547
400#endif /* _TRACE_KVM_H */ 548#endif /* _TRACE_KVM_H */
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e7a896acd982..48a920d51489 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -90,6 +90,7 @@ config MPIC
90config PPC_EPAPR_HV_PIC 90config PPC_EPAPR_HV_PIC
91 bool 91 bool
92 default n 92 default n
93 select EPAPR_PARAVIRT
93 94
94config MPIC_WEIRD 95config MPIC_WEIRD
95 bool 96 bool
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 51ffafae561e..63c5f04ea580 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -236,7 +236,6 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
236 u32 intr_index; 236 u32 intr_index;
237 u32 have_shift = 0; 237 u32 have_shift = 0;
238 struct fsl_msi_cascade_data *cascade_data; 238 struct fsl_msi_cascade_data *cascade_data;
239 unsigned int ret;
240 239
241 cascade_data = irq_get_handler_data(irq); 240 cascade_data = irq_get_handler_data(irq);
242 msi_data = cascade_data->msi_data; 241 msi_data = cascade_data->msi_data;
@@ -268,7 +267,9 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
268 case FSL_PIC_IP_IPIC: 267 case FSL_PIC_IP_IPIC:
269 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); 268 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
270 break; 269 break;
271 case FSL_PIC_IP_VMPIC: 270#ifdef CONFIG_EPAPR_PARAVIRT
271 case FSL_PIC_IP_VMPIC: {
272 unsigned int ret;
272 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value); 273 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
273 if (ret) { 274 if (ret) {
274 pr_err("fsl-msi: fh_vmpic_get_msir() failed for " 275 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
@@ -277,6 +278,8 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
277 } 278 }
278 break; 279 break;
279 } 280 }
281#endif
282 }
280 283
281 while (msir_value) { 284 while (msir_value) {
282 intr_index = ffs(msir_value) - 1; 285 intr_index = ffs(msir_value) - 1;
@@ -508,10 +511,12 @@ static const struct of_device_id fsl_of_msi_ids[] = {
508 .compatible = "fsl,ipic-msi", 511 .compatible = "fsl,ipic-msi",
509 .data = &ipic_msi_feature, 512 .data = &ipic_msi_feature,
510 }, 513 },
514#ifdef CONFIG_EPAPR_PARAVIRT
511 { 515 {
512 .compatible = "fsl,vmpic-msi", 516 .compatible = "fsl,vmpic-msi",
513 .data = &vmpic_msi_feature, 517 .data = &vmpic_msi_feature,
514 }, 518 },
519#endif
515 {} 520 {}
516}; 521};
517 522
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index c449dbd1c938..97118dc3d285 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -253,6 +253,7 @@ struct platform_diu_data_ops diu_ops;
253EXPORT_SYMBOL(diu_ops); 253EXPORT_SYMBOL(diu_ops);
254#endif 254#endif
255 255
256#ifdef CONFIG_EPAPR_PARAVIRT
256/* 257/*
257 * Restart the current partition 258 * Restart the current partition
258 * 259 *
@@ -278,3 +279,4 @@ void fsl_hv_halt(void)
278 pr_info("hv exit\n"); 279 pr_info("hv exit\n");
279 fh_partition_stop(-1); 280 fh_partition_stop(-1);
280} 281}
282#endif
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index d8e05eeab232..0ecf22b6a38e 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -357,6 +357,7 @@ config TRACE_SINK
357config PPC_EPAPR_HV_BYTECHAN 357config PPC_EPAPR_HV_BYTECHAN
358 tristate "ePAPR hypervisor byte channel driver" 358 tristate "ePAPR hypervisor byte channel driver"
359 depends on PPC 359 depends on PPC
360 select EPAPR_PARAVIRT
360 help 361 help
361 This driver creates /dev entries for each ePAPR hypervisor byte 362 This driver creates /dev entries for each ePAPR hypervisor byte
362 channel, thereby allowing applications to communicate with byte 363 channel, thereby allowing applications to communicate with byte
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 2dcdbc9364d8..99ebdde590f8 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -15,6 +15,7 @@ if VIRT_DRIVERS
15config FSL_HV_MANAGER 15config FSL_HV_MANAGER
16 tristate "Freescale hypervisor management driver" 16 tristate "Freescale hypervisor management driver"
17 depends on FSL_SOC 17 depends on FSL_SOC
18 select EPAPR_PARAVIRT
18 help 19 help
19 The Freescale hypervisor management driver provides several services 20 The Freescale hypervisor management driver provides several services
20 to drivers and applications related to the Freescale hypervisor: 21 to drivers and applications related to the Freescale hypervisor:
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 0a6d6ba44c85..65ad5c624c70 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -167,6 +167,7 @@ struct kvm_pit_config {
167#define KVM_EXIT_OSI 18 167#define KVM_EXIT_OSI 18
168#define KVM_EXIT_PAPR_HCALL 19 168#define KVM_EXIT_PAPR_HCALL 19
169#define KVM_EXIT_S390_UCONTROL 20 169#define KVM_EXIT_S390_UCONTROL 20
170#define KVM_EXIT_WATCHDOG 21
170 171
171/* For KVM_EXIT_INTERNAL_ERROR */ 172/* For KVM_EXIT_INTERNAL_ERROR */
172#define KVM_INTERNAL_ERROR_EMULATION 1 173#define KVM_INTERNAL_ERROR_EMULATION 1
@@ -477,6 +478,8 @@ struct kvm_ppc_smmu_info {
477 struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; 478 struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
478}; 479};
479 480
481#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
482
480#define KVMIO 0xAE 483#define KVMIO 0xAE
481 484
482/* machine type bits, to be used as argument to KVM_CREATE_VM */ 485/* machine type bits, to be used as argument to KVM_CREATE_VM */
@@ -626,6 +629,7 @@ struct kvm_ppc_smmu_info {
626#define KVM_CAP_READONLY_MEM 81 629#define KVM_CAP_READONLY_MEM 81
627#endif 630#endif
628#define KVM_CAP_IRQFD_RESAMPLE 82 631#define KVM_CAP_IRQFD_RESAMPLE 82
632#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
629 633
630#ifdef KVM_CAP_IRQ_ROUTING 634#ifdef KVM_CAP_IRQ_ROUTING
631 635
@@ -848,6 +852,9 @@ struct kvm_s390_ucas_mapping {
848#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info) 852#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
849/* Available with KVM_CAP_PPC_ALLOC_HTAB */ 853/* Available with KVM_CAP_PPC_ALLOC_HTAB */
850#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32) 854#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
855#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
856/* Available with KVM_CAP_RMA */
857#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
851 858
852/* 859/*
853 * ioctls for vcpu fds 860 * ioctls for vcpu fds
@@ -911,9 +918,6 @@ struct kvm_s390_ucas_mapping {
911/* Available with KVM_CAP_XCRS */ 918/* Available with KVM_CAP_XCRS */
912#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs) 919#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
913#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs) 920#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
914#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
915/* Available with KVM_CAP_RMA */
916#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
917/* Available with KVM_CAP_SW_TLB */ 921/* Available with KVM_CAP_SW_TLB */
918#define KVM_DIRTY_TLB _IOW(KVMIO, 0xaa, struct kvm_dirty_tlb) 922#define KVM_DIRTY_TLB _IOW(KVMIO, 0xaa, struct kvm_dirty_tlb)
919/* Available with KVM_CAP_ONE_REG */ 923/* Available with KVM_CAP_ONE_REG */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c35b1c08c004..6afc5be2615e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -118,6 +118,7 @@ static inline bool is_error_page(struct page *page)
118#define KVM_REQ_IMMEDIATE_EXIT 15 118#define KVM_REQ_IMMEDIATE_EXIT 15
119#define KVM_REQ_PMU 16 119#define KVM_REQ_PMU 16
120#define KVM_REQ_PMI 17 120#define KVM_REQ_PMI 17
121#define KVM_REQ_WATCHDOG 18
121 122
122#define KVM_USERSPACE_IRQ_SOURCE_ID 0 123#define KVM_USERSPACE_IRQ_SOURCE_ID 0
123#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 124#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1