diff options
-rw-r--r-- | Documentation/virtual/kvm/api.txt | 36 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_64.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 123 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 40 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_builtin.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 15 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 18 | ||||
-rw-r--r-- | include/linux/kvm.h | 3 |
10 files changed, 200 insertions, 54 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 930126698a0f..310fe508d9cd 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -1930,6 +1930,42 @@ The "pte_enc" field provides a value that can OR'ed into the hash | |||
1930 | PTE's RPN field (ie, it needs to be shifted left by 12 to OR it | 1930 | PTE's RPN field (ie, it needs to be shifted left by 12 to OR it |
1931 | into the hash PTE second double word). | 1931 | into the hash PTE second double word). |
1932 | 1932 | ||
1933 | |||
1934 | 4.75 KVM_PPC_ALLOCATE_HTAB | ||
1935 | |||
1936 | Capability: KVM_CAP_PPC_ALLOC_HTAB | ||
1937 | Architectures: powerpc | ||
1938 | Type: vm ioctl | ||
1939 | Parameters: Pointer to u32 containing hash table order (in/out) | ||
1940 | Returns: 0 on success, -1 on error | ||
1941 | |||
1942 | This requests the host kernel to allocate an MMU hash table for a | ||
1943 | guest using the PAPR paravirtualization interface. This only does | ||
1944 | anything if the kernel is configured to use the Book 3S HV style of | ||
1945 | virtualization. Otherwise the capability doesn't exist and the ioctl | ||
1946 | returns an ENOTTY error. The rest of this description assumes Book 3S | ||
1947 | HV. | ||
1948 | |||
1949 | There must be no vcpus running when this ioctl is called; if there | ||
1950 | are, it will do nothing and return an EBUSY error. | ||
1951 | |||
1952 | The parameter is a pointer to a 32-bit unsigned integer variable | ||
1953 | containing the order (log base 2) of the desired size of the hash | ||
1954 | table, which must be between 18 and 46. On successful return from the | ||
1955 | ioctl, it will have been updated with the order of the hash table that | ||
1956 | was allocated. | ||
1957 | |||
1958 | If no hash table has been allocated when any vcpu is asked to run | ||
1959 | (with the KVM_RUN ioctl), the host kernel will allocate a | ||
1960 | default-sized hash table (16 MB). | ||
1961 | |||
1962 | If this ioctl is called when a hash table has already been allocated, | ||
1963 | the kernel will clear out the existing hash table (zero all HPTEs) and | ||
1964 | return the hash table order in the parameter. (If the guest is using | ||
1965 | the virtualized real-mode area (VRMA) facility, the kernel will | ||
1966 | re-create the VMRA HPTEs on the next KVM_RUN of any vcpu.) | ||
1967 | |||
1968 | |||
1933 | 5. The kvm_run structure | 1969 | 5. The kvm_run structure |
1934 | ------------------------ | 1970 | ------------------------ |
1935 | 1971 | ||
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index b0c08b142770..0dd1d86d3e31 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -36,11 +36,8 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) | |||
36 | #define SPAPR_TCE_SHIFT 12 | 36 | #define SPAPR_TCE_SHIFT 12 |
37 | 37 | ||
38 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 38 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
39 | /* For now use fixed-size 16MB page table */ | 39 | #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ |
40 | #define HPT_ORDER 24 | 40 | extern int kvm_hpt_order; /* order of preallocated HPTs */ |
41 | #define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ | ||
42 | #define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */ | ||
43 | #define HPT_HASH_MASK (HPT_NPTEG - 1) | ||
44 | #endif | 41 | #endif |
45 | 42 | ||
46 | #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ | 43 | #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d848cdc49715..dd783beb88b3 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -237,6 +237,10 @@ struct kvm_arch { | |||
237 | unsigned long vrma_slb_v; | 237 | unsigned long vrma_slb_v; |
238 | int rma_setup_done; | 238 | int rma_setup_done; |
239 | int using_mmu_notifiers; | 239 | int using_mmu_notifiers; |
240 | u32 hpt_order; | ||
241 | atomic_t vcpus_running; | ||
242 | unsigned long hpt_npte; | ||
243 | unsigned long hpt_mask; | ||
240 | spinlock_t slot_phys_lock; | 244 | spinlock_t slot_phys_lock; |
241 | unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; | 245 | unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; |
242 | int slot_npages[KVM_MEM_SLOTS_NUM]; | 246 | int slot_npages[KVM_MEM_SLOTS_NUM]; |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index f68c22fa2fce..0124937a23b9 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -119,7 +119,8 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); | |||
119 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); | 119 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); |
120 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); | 120 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); |
121 | 121 | ||
122 | extern long kvmppc_alloc_hpt(struct kvm *kvm); | 122 | extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp); |
123 | extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp); | ||
123 | extern void kvmppc_free_hpt(struct kvm *kvm); | 124 | extern void kvmppc_free_hpt(struct kvm *kvm); |
124 | extern long kvmppc_prepare_vrma(struct kvm *kvm, | 125 | extern long kvmppc_prepare_vrma(struct kvm *kvm, |
125 | struct kvm_userspace_memory_region *mem); | 126 | struct kvm_userspace_memory_region *mem); |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 80a577517584..d03eb6f7b058 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -37,56 +37,121 @@ | |||
37 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ | 37 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ |
38 | #define MAX_LPID_970 63 | 38 | #define MAX_LPID_970 63 |
39 | 39 | ||
40 | long kvmppc_alloc_hpt(struct kvm *kvm) | 40 | /* Power architecture requires HPT is at least 256kB */ |
41 | #define PPC_MIN_HPT_ORDER 18 | ||
42 | |||
43 | long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) | ||
41 | { | 44 | { |
42 | unsigned long hpt; | 45 | unsigned long hpt; |
43 | long lpid; | ||
44 | struct revmap_entry *rev; | 46 | struct revmap_entry *rev; |
45 | struct kvmppc_linear_info *li; | 47 | struct kvmppc_linear_info *li; |
48 | long order = kvm_hpt_order; | ||
46 | 49 | ||
47 | /* Allocate guest's hashed page table */ | 50 | if (htab_orderp) { |
48 | li = kvm_alloc_hpt(); | 51 | order = *htab_orderp; |
49 | if (li) { | 52 | if (order < PPC_MIN_HPT_ORDER) |
50 | /* using preallocated memory */ | 53 | order = PPC_MIN_HPT_ORDER; |
51 | hpt = (ulong)li->base_virt; | 54 | } |
52 | kvm->arch.hpt_li = li; | 55 | |
53 | } else { | 56 | /* |
54 | /* using dynamic memory */ | 57 | * If the user wants a different size from default, |
58 | * try first to allocate it from the kernel page allocator. | ||
59 | */ | ||
60 | hpt = 0; | ||
61 | if (order != kvm_hpt_order) { | ||
55 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| | 62 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| |
56 | __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT); | 63 | __GFP_NOWARN, order - PAGE_SHIFT); |
64 | if (!hpt) | ||
65 | --order; | ||
57 | } | 66 | } |
58 | 67 | ||
68 | /* Next try to allocate from the preallocated pool */ | ||
59 | if (!hpt) { | 69 | if (!hpt) { |
60 | pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); | 70 | li = kvm_alloc_hpt(); |
61 | return -ENOMEM; | 71 | if (li) { |
72 | hpt = (ulong)li->base_virt; | ||
73 | kvm->arch.hpt_li = li; | ||
74 | order = kvm_hpt_order; | ||
75 | } | ||
62 | } | 76 | } |
77 | |||
78 | /* Lastly try successively smaller sizes from the page allocator */ | ||
79 | while (!hpt && order > PPC_MIN_HPT_ORDER) { | ||
80 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| | ||
81 | __GFP_NOWARN, order - PAGE_SHIFT); | ||
82 | if (!hpt) | ||
83 | --order; | ||
84 | } | ||
85 | |||
86 | if (!hpt) | ||
87 | return -ENOMEM; | ||
88 | |||
63 | kvm->arch.hpt_virt = hpt; | 89 | kvm->arch.hpt_virt = hpt; |
90 | kvm->arch.hpt_order = order; | ||
91 | /* HPTEs are 2**4 bytes long */ | ||
92 | kvm->arch.hpt_npte = 1ul << (order - 4); | ||
93 | /* 128 (2**7) bytes in each HPTEG */ | ||
94 | kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; | ||
64 | 95 | ||
65 | /* Allocate reverse map array */ | 96 | /* Allocate reverse map array */ |
66 | rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE); | 97 | rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); |
67 | if (!rev) { | 98 | if (!rev) { |
68 | pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); | 99 | pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); |
69 | goto out_freehpt; | 100 | goto out_freehpt; |
70 | } | 101 | } |
71 | kvm->arch.revmap = rev; | 102 | kvm->arch.revmap = rev; |
103 | kvm->arch.sdr1 = __pa(hpt) | (order - 18); | ||
72 | 104 | ||
73 | lpid = kvmppc_alloc_lpid(); | 105 | pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", |
74 | if (lpid < 0) | 106 | hpt, order, kvm->arch.lpid); |
75 | goto out_freeboth; | ||
76 | 107 | ||
77 | kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); | 108 | if (htab_orderp) |
78 | kvm->arch.lpid = lpid; | 109 | *htab_orderp = order; |
79 | |||
80 | pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); | ||
81 | return 0; | 110 | return 0; |
82 | 111 | ||
83 | out_freeboth: | ||
84 | vfree(rev); | ||
85 | out_freehpt: | 112 | out_freehpt: |
86 | free_pages(hpt, HPT_ORDER - PAGE_SHIFT); | 113 | if (kvm->arch.hpt_li) |
114 | kvm_release_hpt(kvm->arch.hpt_li); | ||
115 | else | ||
116 | free_pages(hpt, order - PAGE_SHIFT); | ||
87 | return -ENOMEM; | 117 | return -ENOMEM; |
88 | } | 118 | } |
89 | 119 | ||
120 | long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) | ||
121 | { | ||
122 | long err = -EBUSY; | ||
123 | long order; | ||
124 | |||
125 | mutex_lock(&kvm->lock); | ||
126 | if (kvm->arch.rma_setup_done) { | ||
127 | kvm->arch.rma_setup_done = 0; | ||
128 | /* order rma_setup_done vs. vcpus_running */ | ||
129 | smp_mb(); | ||
130 | if (atomic_read(&kvm->arch.vcpus_running)) { | ||
131 | kvm->arch.rma_setup_done = 1; | ||
132 | goto out; | ||
133 | } | ||
134 | } | ||
135 | if (kvm->arch.hpt_virt) { | ||
136 | order = kvm->arch.hpt_order; | ||
137 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ | ||
138 | memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); | ||
139 | /* | ||
140 | * Set the whole last_vcpu array to an invalid vcpu number. | ||
141 | * This ensures that each vcpu will flush its TLB on next entry. | ||
142 | */ | ||
143 | memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu)); | ||
144 | *htab_orderp = order; | ||
145 | err = 0; | ||
146 | } else { | ||
147 | err = kvmppc_alloc_hpt(kvm, htab_orderp); | ||
148 | order = *htab_orderp; | ||
149 | } | ||
150 | out: | ||
151 | mutex_unlock(&kvm->lock); | ||
152 | return err; | ||
153 | } | ||
154 | |||
90 | void kvmppc_free_hpt(struct kvm *kvm) | 155 | void kvmppc_free_hpt(struct kvm *kvm) |
91 | { | 156 | { |
92 | kvmppc_free_lpid(kvm->arch.lpid); | 157 | kvmppc_free_lpid(kvm->arch.lpid); |
@@ -94,7 +159,8 @@ void kvmppc_free_hpt(struct kvm *kvm) | |||
94 | if (kvm->arch.hpt_li) | 159 | if (kvm->arch.hpt_li) |
95 | kvm_release_hpt(kvm->arch.hpt_li); | 160 | kvm_release_hpt(kvm->arch.hpt_li); |
96 | else | 161 | else |
97 | free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); | 162 | free_pages(kvm->arch.hpt_virt, |
163 | kvm->arch.hpt_order - PAGE_SHIFT); | ||
98 | } | 164 | } |
99 | 165 | ||
100 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ | 166 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ |
@@ -119,6 +185,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, | |||
119 | unsigned long psize; | 185 | unsigned long psize; |
120 | unsigned long hp0, hp1; | 186 | unsigned long hp0, hp1; |
121 | long ret; | 187 | long ret; |
188 | struct kvm *kvm = vcpu->kvm; | ||
122 | 189 | ||
123 | psize = 1ul << porder; | 190 | psize = 1ul << porder; |
124 | npages = memslot->npages >> (porder - PAGE_SHIFT); | 191 | npages = memslot->npages >> (porder - PAGE_SHIFT); |
@@ -127,8 +194,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, | |||
127 | if (npages > 1ul << (40 - porder)) | 194 | if (npages > 1ul << (40 - porder)) |
128 | npages = 1ul << (40 - porder); | 195 | npages = 1ul << (40 - porder); |
129 | /* Can't use more than 1 HPTE per HPTEG */ | 196 | /* Can't use more than 1 HPTE per HPTEG */ |
130 | if (npages > HPT_NPTEG) | 197 | if (npages > kvm->arch.hpt_mask + 1) |
131 | npages = HPT_NPTEG; | 198 | npages = kvm->arch.hpt_mask + 1; |
132 | 199 | ||
133 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | | 200 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | |
134 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); | 201 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); |
@@ -138,7 +205,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, | |||
138 | for (i = 0; i < npages; ++i) { | 205 | for (i = 0; i < npages; ++i) { |
139 | addr = i << porder; | 206 | addr = i << porder; |
140 | /* can't use hpt_hash since va > 64 bits */ | 207 | /* can't use hpt_hash since va > 64 bits */ |
141 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK; | 208 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; |
142 | /* | 209 | /* |
143 | * We assume that the hash table is empty and no | 210 | * We assume that the hash table is empty and no |
144 | * vcpus are using it at this stage. Since we create | 211 | * vcpus are using it at this stage. Since we create |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c6af1d623839..d084e412b3c5 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -56,7 +56,7 @@ | |||
56 | /* #define EXIT_DEBUG_INT */ | 56 | /* #define EXIT_DEBUG_INT */ |
57 | 57 | ||
58 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); | 58 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
59 | static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); | 59 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
60 | 60 | ||
61 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 61 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
62 | { | 62 | { |
@@ -1068,11 +1068,15 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1068 | return -EINTR; | 1068 | return -EINTR; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | /* On the first time here, set up VRMA or RMA */ | 1071 | atomic_inc(&vcpu->kvm->arch.vcpus_running); |
1072 | /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */ | ||
1073 | smp_mb(); | ||
1074 | |||
1075 | /* On the first time here, set up HTAB and VRMA or RMA */ | ||
1072 | if (!vcpu->kvm->arch.rma_setup_done) { | 1076 | if (!vcpu->kvm->arch.rma_setup_done) { |
1073 | r = kvmppc_hv_setup_rma(vcpu); | 1077 | r = kvmppc_hv_setup_htab_rma(vcpu); |
1074 | if (r) | 1078 | if (r) |
1075 | return r; | 1079 | goto out; |
1076 | } | 1080 | } |
1077 | 1081 | ||
1078 | flush_fp_to_thread(current); | 1082 | flush_fp_to_thread(current); |
@@ -1090,6 +1094,9 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1090 | kvmppc_core_prepare_to_enter(vcpu); | 1094 | kvmppc_core_prepare_to_enter(vcpu); |
1091 | } | 1095 | } |
1092 | } while (r == RESUME_GUEST); | 1096 | } while (r == RESUME_GUEST); |
1097 | |||
1098 | out: | ||
1099 | atomic_dec(&vcpu->kvm->arch.vcpus_running); | ||
1093 | return r; | 1100 | return r; |
1094 | } | 1101 | } |
1095 | 1102 | ||
@@ -1305,7 +1312,7 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, | |||
1305 | { | 1312 | { |
1306 | } | 1313 | } |
1307 | 1314 | ||
1308 | static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) | 1315 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) |
1309 | { | 1316 | { |
1310 | int err = 0; | 1317 | int err = 0; |
1311 | struct kvm *kvm = vcpu->kvm; | 1318 | struct kvm *kvm = vcpu->kvm; |
@@ -1324,6 +1331,15 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) | |||
1324 | if (kvm->arch.rma_setup_done) | 1331 | if (kvm->arch.rma_setup_done) |
1325 | goto out; /* another vcpu beat us to it */ | 1332 | goto out; /* another vcpu beat us to it */ |
1326 | 1333 | ||
1334 | /* Allocate hashed page table (if not done already) and reset it */ | ||
1335 | if (!kvm->arch.hpt_virt) { | ||
1336 | err = kvmppc_alloc_hpt(kvm, NULL); | ||
1337 | if (err) { | ||
1338 | pr_err("KVM: Couldn't alloc HPT\n"); | ||
1339 | goto out; | ||
1340 | } | ||
1341 | } | ||
1342 | |||
1327 | /* Look up the memslot for guest physical address 0 */ | 1343 | /* Look up the memslot for guest physical address 0 */ |
1328 | memslot = gfn_to_memslot(kvm, 0); | 1344 | memslot = gfn_to_memslot(kvm, 0); |
1329 | 1345 | ||
@@ -1435,13 +1451,14 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) | |||
1435 | 1451 | ||
1436 | int kvmppc_core_init_vm(struct kvm *kvm) | 1452 | int kvmppc_core_init_vm(struct kvm *kvm) |
1437 | { | 1453 | { |
1438 | long r; | 1454 | unsigned long lpcr, lpid; |
1439 | unsigned long lpcr; | ||
1440 | 1455 | ||
1441 | /* Allocate hashed page table */ | 1456 | /* Allocate the guest's logical partition ID */ |
1442 | r = kvmppc_alloc_hpt(kvm); | 1457 | |
1443 | if (r) | 1458 | lpid = kvmppc_alloc_lpid(); |
1444 | return r; | 1459 | if (lpid < 0) |
1460 | return -ENOMEM; | ||
1461 | kvm->arch.lpid = lpid; | ||
1445 | 1462 | ||
1446 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | 1463 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); |
1447 | 1464 | ||
@@ -1451,7 +1468,6 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
1451 | 1468 | ||
1452 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { | 1469 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { |
1453 | /* PPC970; HID4 is effectively the LPCR */ | 1470 | /* PPC970; HID4 is effectively the LPCR */ |
1454 | unsigned long lpid = kvm->arch.lpid; | ||
1455 | kvm->arch.host_lpid = 0; | 1471 | kvm->arch.host_lpid = 0; |
1456 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); | 1472 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); |
1457 | lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); | 1473 | lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); |
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index e1b60f56f2a1..fb4eac290fef 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c | |||
@@ -25,6 +25,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type); | |||
25 | static struct kvmppc_linear_info *kvm_alloc_linear(int type); | 25 | static struct kvmppc_linear_info *kvm_alloc_linear(int type); |
26 | static void kvm_release_linear(struct kvmppc_linear_info *ri); | 26 | static void kvm_release_linear(struct kvmppc_linear_info *ri); |
27 | 27 | ||
28 | int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER; | ||
29 | EXPORT_SYMBOL_GPL(kvm_hpt_order); | ||
30 | |||
28 | /*************** RMA *************/ | 31 | /*************** RMA *************/ |
29 | 32 | ||
30 | /* | 33 | /* |
@@ -209,7 +212,7 @@ static void kvm_release_linear(struct kvmppc_linear_info *ri) | |||
209 | void __init kvm_linear_init(void) | 212 | void __init kvm_linear_init(void) |
210 | { | 213 | { |
211 | /* HPT */ | 214 | /* HPT */ |
212 | kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT); | 215 | kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT); |
213 | 216 | ||
214 | /* RMA */ | 217 | /* RMA */ |
215 | /* Only do this on PPC970 in HV mode */ | 218 | /* Only do this on PPC970 in HV mode */ |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index cec4daddbf31..5c70d19494f9 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -237,7 +237,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |||
237 | 237 | ||
238 | /* Find and lock the HPTEG slot to use */ | 238 | /* Find and lock the HPTEG slot to use */ |
239 | do_insert: | 239 | do_insert: |
240 | if (pte_index >= HPT_NPTE) | 240 | if (pte_index >= kvm->arch.hpt_npte) |
241 | return H_PARAMETER; | 241 | return H_PARAMETER; |
242 | if (likely((flags & H_EXACT) == 0)) { | 242 | if (likely((flags & H_EXACT) == 0)) { |
243 | pte_index &= ~7UL; | 243 | pte_index &= ~7UL; |
@@ -352,7 +352,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |||
352 | unsigned long v, r, rb; | 352 | unsigned long v, r, rb; |
353 | struct revmap_entry *rev; | 353 | struct revmap_entry *rev; |
354 | 354 | ||
355 | if (pte_index >= HPT_NPTE) | 355 | if (pte_index >= kvm->arch.hpt_npte) |
356 | return H_PARAMETER; | 356 | return H_PARAMETER; |
357 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | 357 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); |
358 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | 358 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
@@ -419,7 +419,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
419 | i = 4; | 419 | i = 4; |
420 | break; | 420 | break; |
421 | } | 421 | } |
422 | if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) { | 422 | if (req != 1 || flags == 3 || |
423 | pte_index >= kvm->arch.hpt_npte) { | ||
423 | /* parameter error */ | 424 | /* parameter error */ |
424 | args[j] = ((0xa0 | flags) << 56) + pte_index; | 425 | args[j] = ((0xa0 | flags) << 56) + pte_index; |
425 | ret = H_PARAMETER; | 426 | ret = H_PARAMETER; |
@@ -521,7 +522,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |||
521 | struct revmap_entry *rev; | 522 | struct revmap_entry *rev; |
522 | unsigned long v, r, rb, mask, bits; | 523 | unsigned long v, r, rb, mask, bits; |
523 | 524 | ||
524 | if (pte_index >= HPT_NPTE) | 525 | if (pte_index >= kvm->arch.hpt_npte) |
525 | return H_PARAMETER; | 526 | return H_PARAMETER; |
526 | 527 | ||
527 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | 528 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); |
@@ -583,7 +584,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, | |||
583 | int i, n = 1; | 584 | int i, n = 1; |
584 | struct revmap_entry *rev = NULL; | 585 | struct revmap_entry *rev = NULL; |
585 | 586 | ||
586 | if (pte_index >= HPT_NPTE) | 587 | if (pte_index >= kvm->arch.hpt_npte) |
587 | return H_PARAMETER; | 588 | return H_PARAMETER; |
588 | if (flags & H_READ_4) { | 589 | if (flags & H_READ_4) { |
589 | pte_index &= ~3; | 590 | pte_index &= ~3; |
@@ -678,7 +679,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |||
678 | somask = (1UL << 28) - 1; | 679 | somask = (1UL << 28) - 1; |
679 | vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; | 680 | vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; |
680 | } | 681 | } |
681 | hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK; | 682 | hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; |
682 | avpn = slb_v & ~(somask >> 16); /* also includes B */ | 683 | avpn = slb_v & ~(somask >> 16); /* also includes B */ |
683 | avpn |= (eaddr & somask) >> 16; | 684 | avpn |= (eaddr & somask) >> 16; |
684 | 685 | ||
@@ -723,7 +724,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |||
723 | if (val & HPTE_V_SECONDARY) | 724 | if (val & HPTE_V_SECONDARY) |
724 | break; | 725 | break; |
725 | val |= HPTE_V_SECONDARY; | 726 | val |= HPTE_V_SECONDARY; |
726 | hash = hash ^ HPT_HASH_MASK; | 727 | hash = hash ^ kvm->arch.hpt_mask; |
727 | } | 728 | } |
728 | return -1; | 729 | return -1; |
729 | } | 730 | } |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 1493c8de947b..87f4dc886076 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -246,6 +246,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
246 | #endif | 246 | #endif |
247 | #ifdef CONFIG_PPC_BOOK3S_64 | 247 | #ifdef CONFIG_PPC_BOOK3S_64 |
248 | case KVM_CAP_SPAPR_TCE: | 248 | case KVM_CAP_SPAPR_TCE: |
249 | case KVM_CAP_PPC_ALLOC_HTAB: | ||
249 | r = 1; | 250 | r = 1; |
250 | break; | 251 | break; |
251 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 252 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
@@ -802,6 +803,23 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
802 | r = -EFAULT; | 803 | r = -EFAULT; |
803 | break; | 804 | break; |
804 | } | 805 | } |
806 | |||
807 | case KVM_PPC_ALLOCATE_HTAB: { | ||
808 | struct kvm *kvm = filp->private_data; | ||
809 | u32 htab_order; | ||
810 | |||
811 | r = -EFAULT; | ||
812 | if (get_user(htab_order, (u32 __user *)argp)) | ||
813 | break; | ||
814 | r = kvmppc_alloc_reset_hpt(kvm, &htab_order); | ||
815 | if (r) | ||
816 | break; | ||
817 | r = -EFAULT; | ||
818 | if (put_user(htab_order, (u32 __user *)argp)) | ||
819 | break; | ||
820 | r = 0; | ||
821 | break; | ||
822 | } | ||
805 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 823 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
806 | 824 | ||
807 | #ifdef CONFIG_PPC_BOOK3S_64 | 825 | #ifdef CONFIG_PPC_BOOK3S_64 |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 09f2b3aa2da7..2ce09aa7d3b3 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -617,6 +617,7 @@ struct kvm_ppc_smmu_info { | |||
617 | #define KVM_CAP_SIGNAL_MSI 77 | 617 | #define KVM_CAP_SIGNAL_MSI 77 |
618 | #define KVM_CAP_PPC_GET_SMMU_INFO 78 | 618 | #define KVM_CAP_PPC_GET_SMMU_INFO 78 |
619 | #define KVM_CAP_S390_COW 79 | 619 | #define KVM_CAP_S390_COW 79 |
620 | #define KVM_CAP_PPC_ALLOC_HTAB 80 | ||
620 | 621 | ||
621 | #ifdef KVM_CAP_IRQ_ROUTING | 622 | #ifdef KVM_CAP_IRQ_ROUTING |
622 | 623 | ||
@@ -828,6 +829,8 @@ struct kvm_s390_ucas_mapping { | |||
828 | #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) | 829 | #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) |
829 | /* Available with KVM_CAP_PPC_GET_SMMU_INFO */ | 830 | /* Available with KVM_CAP_PPC_GET_SMMU_INFO */ |
830 | #define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info) | 831 | #define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info) |
832 | /* Available with KVM_CAP_PPC_ALLOC_HTAB */ | ||
833 | #define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32) | ||
831 | 834 | ||
832 | /* | 835 | /* |
833 | * ioctls for vcpu fds | 836 | * ioctls for vcpu fds |