aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c73
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S5
9 files changed, 75 insertions, 17 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 26fc37355dcb..387f4c7dad9f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1835,6 +1835,7 @@ registers, find a list below:
1835 PPC | KVM_REG_PPC_PID | 64 1835 PPC | KVM_REG_PPC_PID | 64
1836 PPC | KVM_REG_PPC_ACOP | 64 1836 PPC | KVM_REG_PPC_ACOP | 64
1837 PPC | KVM_REG_PPC_VRSAVE | 32 1837 PPC | KVM_REG_PPC_VRSAVE | 32
1838 PPC | KVM_REG_PPC_LPCR | 64
1838 PPC | KVM_REG_PPC_TM_GPR0 | 64 1839 PPC | KVM_REG_PPC_TM_GPR0 | 64
1839 ... 1840 ...
1840 PPC | KVM_REG_PPC_TM_GPR31 | 64 1841 PPC | KVM_REG_PPC_TM_GPR31 | 64
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fa19e2f1a874..14a47416bdd4 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -172,6 +172,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
172 unsigned long *hpret); 172 unsigned long *hpret);
173extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, 173extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
174 struct kvm_memory_slot *memslot, unsigned long *map); 174 struct kvm_memory_slot *memslot, unsigned long *map);
175extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
176 unsigned long mask);
175 177
176extern void kvmppc_entry_trampoline(void); 178extern void kvmppc_entry_trampoline(void);
177extern void kvmppc_hv_entry_trampoline(void); 179extern void kvmppc_hv_entry_trampoline(void);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index e4d67a606e43..6eabffcb1c3c 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -290,6 +290,7 @@ struct kvmppc_vcore {
290 u64 preempt_tb; 290 u64 preempt_tb;
291 struct kvm_vcpu *runner; 291 struct kvm_vcpu *runner;
292 u64 tb_offset; /* guest timebase - host timebase */ 292 u64 tb_offset; /* guest timebase - host timebase */
293 ulong lpcr;
293}; 294};
294 295
295#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 296#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fd4db15e6f2a..4bec4df3fb98 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -284,6 +284,7 @@
284#define LPCR_ISL (1ul << (63-2)) 284#define LPCR_ISL (1ul << (63-2))
285#define LPCR_VC_SH (63-2) 285#define LPCR_VC_SH (63-2)
286#define LPCR_DPFD_SH (63-11) 286#define LPCR_DPFD_SH (63-11)
287#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
287#define LPCR_VRMASD (0x1ful << (63-16)) 288#define LPCR_VRMASD (0x1ful << (63-16))
288#define LPCR_VRMA_L (1ul << (63-12)) 289#define LPCR_VRMA_L (1ul << (63-12))
289#define LPCR_VRMA_LP0 (1ul << (63-15)) 290#define LPCR_VRMA_LP0 (1ul << (63-15))
@@ -300,6 +301,7 @@
300#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ 301#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
301#define LPCR_MER 0x00000800 /* Mediated External Exception */ 302#define LPCR_MER 0x00000800 /* Mediated External Exception */
302#define LPCR_MER_SH 11 303#define LPCR_MER_SH 11
304#define LPCR_TC 0x00000200 /* Translation control */
303#define LPCR_LPES 0x0000000c 305#define LPCR_LPES 0x0000000c
304#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ 306#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
305#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ 307#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
@@ -421,6 +423,7 @@
421#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ 423#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
422#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ 424#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
423#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ 425#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
426#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
424#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ 427#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
425#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ 428#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
426#define HID4_LPID1_SH 0 /* partition ID top 2 bits */ 429#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index b98bf3f50527..e42127d1ae8e 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -533,6 +533,7 @@ struct kvm_get_htab_header {
533#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) 533#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
534 534
535#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) 535#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
536#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
536 537
537/* Transactional Memory checkpointed state: 538/* Transactional Memory checkpointed state:
538 * This is all GPRs, all VSX regs and a subset of SPRs 539 * This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 34d63d871917..fd7513f8014b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -524,6 +524,7 @@ int main(void)
524 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); 524 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
525 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); 525 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
526 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); 526 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
527 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
527 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - 528 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
528 offsetof(struct kvmppc_vcpu_book3s, vcpu)); 529 offsetof(struct kvmppc_vcpu_book3s, vcpu));
529 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); 530 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 043eec8461e7..ccb89a048bf8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1512,9 +1512,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1512 1512
1513 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 1513 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1514 (VRMA_VSID << SLB_VSID_SHIFT_1T); 1514 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1515 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; 1515 lpcr = senc << (LPCR_VRMASD_SH - 4);
1516 lpcr |= senc << (LPCR_VRMASD_SH - 4); 1516 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
1517 kvm->arch.lpcr = lpcr;
1518 rma_setup = 1; 1517 rma_setup = 1;
1519 } 1518 }
1520 ++i; 1519 ++i;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index a010aa4cd026..36eb95cc48ae 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -195,7 +195,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
195 pr_err(" ESID = %.16llx VSID = %.16llx\n", 195 pr_err(" ESID = %.16llx VSID = %.16llx\n",
196 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); 196 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
197 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", 197 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
198 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1, 198 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
199 vcpu->arch.last_inst); 199 vcpu->arch.last_inst);
200} 200}
201 201
@@ -723,6 +723,21 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
723 return 0; 723 return 0;
724} 724}
725 725
726static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
727{
728 struct kvmppc_vcore *vc = vcpu->arch.vcore;
729 u64 mask;
730
731 spin_lock(&vc->lock);
732 /*
733 * Userspace can only modify DPFD (default prefetch depth),
734 * ILE (interrupt little-endian) and TC (translation control).
735 */
736 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
737 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
738 spin_unlock(&vc->lock);
739}
740
726int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 741int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
727{ 742{
728 int r = 0; 743 int r = 0;
@@ -805,6 +820,9 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
805 case KVM_REG_PPC_TB_OFFSET: 820 case KVM_REG_PPC_TB_OFFSET:
806 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); 821 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
807 break; 822 break;
823 case KVM_REG_PPC_LPCR:
824 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
825 break;
808 default: 826 default:
809 r = -EINVAL; 827 r = -EINVAL;
810 break; 828 break;
@@ -909,6 +927,9 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
909 vcpu->arch.vcore->tb_offset = 927 vcpu->arch.vcore->tb_offset =
910 ALIGN(set_reg_val(id, *val), 1UL << 24); 928 ALIGN(set_reg_val(id, *val), 1UL << 24);
911 break; 929 break;
930 case KVM_REG_PPC_LPCR:
931 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
932 break;
912 default: 933 default:
913 r = -EINVAL; 934 r = -EINVAL;
914 break; 935 break;
@@ -969,6 +990,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
969 spin_lock_init(&vcore->lock); 990 spin_lock_init(&vcore->lock);
970 init_waitqueue_head(&vcore->wq); 991 init_waitqueue_head(&vcore->wq);
971 vcore->preempt_tb = TB_NIL; 992 vcore->preempt_tb = TB_NIL;
993 vcore->lpcr = kvm->arch.lpcr;
972 } 994 }
973 kvm->arch.vcores[core] = vcore; 995 kvm->arch.vcores[core] = vcore;
974 kvm->arch.online_vcores++; 996 kvm->arch.online_vcores++;
@@ -1758,6 +1780,32 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
1758 } 1780 }
1759} 1781}
1760 1782
1783/*
1784 * Update LPCR values in kvm->arch and in vcores.
1785 * Caller must hold kvm->lock.
1786 */
1787void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
1788{
1789 long int i;
1790 u32 cores_done = 0;
1791
1792 if ((kvm->arch.lpcr & mask) == lpcr)
1793 return;
1794
1795 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
1796
1797 for (i = 0; i < KVM_MAX_VCORES; ++i) {
1798 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
1799 if (!vc)
1800 continue;
1801 spin_lock(&vc->lock);
1802 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
1803 spin_unlock(&vc->lock);
1804 if (++cores_done >= kvm->arch.online_vcores)
1805 break;
1806 }
1807}
1808
1761static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 1809static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1762{ 1810{
1763 int err = 0; 1811 int err = 0;
@@ -1766,7 +1814,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1766 unsigned long hva; 1814 unsigned long hva;
1767 struct kvm_memory_slot *memslot; 1815 struct kvm_memory_slot *memslot;
1768 struct vm_area_struct *vma; 1816 struct vm_area_struct *vma;
1769 unsigned long lpcr, senc; 1817 unsigned long lpcr = 0, senc;
1818 unsigned long lpcr_mask = 0;
1770 unsigned long psize, porder; 1819 unsigned long psize, porder;
1771 unsigned long rma_size; 1820 unsigned long rma_size;
1772 unsigned long rmls; 1821 unsigned long rmls;
@@ -1831,9 +1880,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1831 senc = slb_pgsize_encoding(psize); 1880 senc = slb_pgsize_encoding(psize);
1832 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 1881 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1833 (VRMA_VSID << SLB_VSID_SHIFT_1T); 1882 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1834 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; 1883 lpcr_mask = LPCR_VRMASD;
1835 lpcr |= senc << (LPCR_VRMASD_SH - 4); 1884 /* the -4 is to account for senc values starting at 0x10 */
1836 kvm->arch.lpcr = lpcr; 1885 lpcr = senc << (LPCR_VRMASD_SH - 4);
1837 1886
1838 /* Create HPTEs in the hash page table for the VRMA */ 1887 /* Create HPTEs in the hash page table for the VRMA */
1839 kvmppc_map_vrma(vcpu, memslot, porder); 1888 kvmppc_map_vrma(vcpu, memslot, porder);
@@ -1854,23 +1903,21 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1854 kvm->arch.rma = ri; 1903 kvm->arch.rma = ri;
1855 1904
1856 /* Update LPCR and RMOR */ 1905 /* Update LPCR and RMOR */
1857 lpcr = kvm->arch.lpcr;
1858 if (cpu_has_feature(CPU_FTR_ARCH_201)) { 1906 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1859 /* PPC970; insert RMLS value (split field) in HID4 */ 1907 /* PPC970; insert RMLS value (split field) in HID4 */
1860 lpcr &= ~((1ul << HID4_RMLS0_SH) | 1908 lpcr_mask = (1ul << HID4_RMLS0_SH) |
1861 (3ul << HID4_RMLS2_SH)); 1909 (3ul << HID4_RMLS2_SH) | HID4_RMOR;
1862 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) | 1910 lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
1863 ((rmls & 3) << HID4_RMLS2_SH); 1911 ((rmls & 3) << HID4_RMLS2_SH);
1864 /* RMOR is also in HID4 */ 1912 /* RMOR is also in HID4 */
1865 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) 1913 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1866 << HID4_RMOR_SH; 1914 << HID4_RMOR_SH;
1867 } else { 1915 } else {
1868 /* POWER7 */ 1916 /* POWER7 */
1869 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); 1917 lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
1870 lpcr |= rmls << LPCR_RMLS_SH; 1918 lpcr = rmls << LPCR_RMLS_SH;
1871 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; 1919 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
1872 } 1920 }
1873 kvm->arch.lpcr = lpcr;
1874 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", 1921 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1875 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1922 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1876 1923
@@ -1889,6 +1936,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1889 } 1936 }
1890 } 1937 }
1891 1938
1939 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
1940
1892 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ 1941 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1893 smp_wmb(); 1942 smp_wmb();
1894 kvm->arch.rma_setup_done = 1; 1943 kvm->arch.rma_setup_done = 1;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 0effcd144241..295fd58af39a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -509,7 +509,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
509 beq 20b 509 beq 20b
510 510
511 /* Set LPCR and RMOR. */ 511 /* Set LPCR and RMOR. */
51210: ld r8,KVM_LPCR(r9) 51210: ld r8,VCORE_LPCR(r5)
513 mtspr SPRN_LPCR,r8 513 mtspr SPRN_LPCR,r8
514 ld r8,KVM_RMOR(r9) 514 ld r8,KVM_RMOR(r9)
515 mtspr SPRN_RMOR,r8 515 mtspr SPRN_RMOR,r8
@@ -571,7 +571,8 @@ toc_tlbie_lock:
571 bne 24b 571 bne 24b
572 isync 572 isync
573 573
574 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ 574 ld r5,HSTATE_KVM_VCORE(r13)
575 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
575 li r0,0x18f 576 li r0,0x18f
576 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 577 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
577 or r0,r7,r0 578 or r0,r7,r0