diff options
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_cma.c | 35 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_cma.h | 5 |
3 files changed, 32 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 354f4bb21f5c..7eb5ddab1203 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #include <asm/ppc-opcode.h> | 37 | #include <asm/ppc-opcode.h> |
38 | #include <asm/cputable.h> | 38 | #include <asm/cputable.h> |
39 | 39 | ||
40 | #include "book3s_hv_cma.h" | ||
41 | |||
40 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ | 42 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ |
41 | #define MAX_LPID_970 63 | 43 | #define MAX_LPID_970 63 |
42 | 44 | ||
@@ -71,6 +73,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) | |||
71 | 73 | ||
72 | /* Next try to allocate from the preallocated pool */ | 74 | /* Next try to allocate from the preallocated pool */ |
73 | if (!hpt) { | 75 | if (!hpt) { |
76 | VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER); | ||
74 | page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); | 77 | page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); |
75 | if (page) { | 78 | if (page) { |
76 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); | 79 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); |
diff --git a/arch/powerpc/kvm/book3s_hv_cma.c b/arch/powerpc/kvm/book3s_hv_cma.c index e04b269b9c5b..d9d3d8553d51 100644 --- a/arch/powerpc/kvm/book3s_hv_cma.c +++ b/arch/powerpc/kvm/book3s_hv_cma.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <linux/sizes.h> | 24 | #include <linux/sizes.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | 26 | ||
27 | #include "book3s_hv_cma.h" | ||
28 | |||
27 | struct kvm_cma { | 29 | struct kvm_cma { |
28 | unsigned long base_pfn; | 30 | unsigned long base_pfn; |
29 | unsigned long count; | 31 | unsigned long count; |
@@ -96,6 +98,7 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) | |||
96 | int ret; | 98 | int ret; |
97 | struct page *page = NULL; | 99 | struct page *page = NULL; |
98 | struct kvm_cma *cma = &kvm_cma_area; | 100 | struct kvm_cma *cma = &kvm_cma_area; |
101 | unsigned long chunk_count, nr_chunk; | ||
99 | unsigned long mask, pfn, pageno, start = 0; | 102 | unsigned long mask, pfn, pageno, start = 0; |
100 | 103 | ||
101 | 104 | ||
@@ -107,21 +110,27 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) | |||
107 | 110 | ||
108 | if (!nr_pages) | 111 | if (!nr_pages) |
109 | return NULL; | 112 | return NULL; |
110 | 113 | /* | |
114 | * align mask with chunk size. The bit tracks pages in chunk size | ||
115 | */ | ||
111 | VM_BUG_ON(!is_power_of_2(align_pages)); | 116 | VM_BUG_ON(!is_power_of_2(align_pages)); |
112 | mask = align_pages - 1; | 117 | mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1; |
118 | BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER); | ||
119 | |||
120 | chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); | ||
121 | nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); | ||
113 | 122 | ||
114 | mutex_lock(&kvm_cma_mutex); | 123 | mutex_lock(&kvm_cma_mutex); |
115 | for (;;) { | 124 | for (;;) { |
116 | pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, | 125 | pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count, |
117 | start, nr_pages, mask); | 126 | start, nr_chunk, mask); |
118 | if (pageno >= cma->count) | 127 | if (pageno >= chunk_count) |
119 | break; | 128 | break; |
120 | 129 | ||
121 | pfn = cma->base_pfn + pageno; | 130 | pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)); |
122 | ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA); | 131 | ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA); |
123 | if (ret == 0) { | 132 | if (ret == 0) { |
124 | bitmap_set(cma->bitmap, pageno, nr_pages); | 133 | bitmap_set(cma->bitmap, pageno, nr_chunk); |
125 | page = pfn_to_page(pfn); | 134 | page = pfn_to_page(pfn); |
126 | memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT); | 135 | memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT); |
127 | break; | 136 | break; |
@@ -150,9 +159,9 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) | |||
150 | bool kvm_release_cma(struct page *pages, unsigned long nr_pages) | 159 | bool kvm_release_cma(struct page *pages, unsigned long nr_pages) |
151 | { | 160 | { |
152 | unsigned long pfn; | 161 | unsigned long pfn; |
162 | unsigned long nr_chunk; | ||
153 | struct kvm_cma *cma = &kvm_cma_area; | 163 | struct kvm_cma *cma = &kvm_cma_area; |
154 | 164 | ||
155 | |||
156 | if (!cma || !pages) | 165 | if (!cma || !pages) |
157 | return false; | 166 | return false; |
158 | 167 | ||
@@ -164,9 +173,12 @@ bool kvm_release_cma(struct page *pages, unsigned long nr_pages) | |||
164 | return false; | 173 | return false; |
165 | 174 | ||
166 | VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count); | 175 | VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count); |
176 | nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); | ||
167 | 177 | ||
168 | mutex_lock(&kvm_cma_mutex); | 178 | mutex_lock(&kvm_cma_mutex); |
169 | bitmap_clear(cma->bitmap, pfn - cma->base_pfn, nr_pages); | 179 | bitmap_clear(cma->bitmap, |
180 | (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT), | ||
181 | nr_chunk); | ||
170 | free_contig_range(pfn, nr_pages); | 182 | free_contig_range(pfn, nr_pages); |
171 | mutex_unlock(&kvm_cma_mutex); | 183 | mutex_unlock(&kvm_cma_mutex); |
172 | 184 | ||
@@ -204,13 +216,14 @@ static int __init kvm_cma_activate_area(unsigned long base_pfn, | |||
204 | static int __init kvm_cma_init_reserved_areas(void) | 216 | static int __init kvm_cma_init_reserved_areas(void) |
205 | { | 217 | { |
206 | int bitmap_size, ret; | 218 | int bitmap_size, ret; |
219 | unsigned long chunk_count; | ||
207 | struct kvm_cma *cma = &kvm_cma_area; | 220 | struct kvm_cma *cma = &kvm_cma_area; |
208 | 221 | ||
209 | pr_debug("%s()\n", __func__); | 222 | pr_debug("%s()\n", __func__); |
210 | if (!cma->count) | 223 | if (!cma->count) |
211 | return 0; | 224 | return 0; |
212 | 225 | chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); | |
213 | bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); | 226 | bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long); |
214 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 227 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
215 | if (!cma->bitmap) | 228 | if (!cma->bitmap) |
216 | return -ENOMEM; | 229 | return -ENOMEM; |
diff --git a/arch/powerpc/kvm/book3s_hv_cma.h b/arch/powerpc/kvm/book3s_hv_cma.h index 788bc3b73104..655144f75fa5 100644 --- a/arch/powerpc/kvm/book3s_hv_cma.h +++ b/arch/powerpc/kvm/book3s_hv_cma.h | |||
@@ -14,6 +14,11 @@ | |||
14 | 14 | ||
15 | #ifndef __POWERPC_KVM_CMA_ALLOC_H__ | 15 | #ifndef __POWERPC_KVM_CMA_ALLOC_H__ |
16 | #define __POWERPC_KVM_CMA_ALLOC_H__ | 16 | #define __POWERPC_KVM_CMA_ALLOC_H__ |
17 | /* | ||
18 | * Both RMA and Hash page allocation will be multiple of 256K. | ||
19 | */ | ||
20 | #define KVM_CMA_CHUNK_ORDER 18 | ||
21 | |||
17 | extern struct page *kvm_alloc_cma(unsigned long nr_pages, | 22 | extern struct page *kvm_alloc_cma(unsigned long nr_pages, |
18 | unsigned long align_pages); | 23 | unsigned long align_pages); |
19 | extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages); | 24 | extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages); |