aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLi Zhong <zhong@linux.vnet.ibm.com>2014-06-11 04:23:36 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-08-05 02:34:10 -0400
commitbd8cb03dbe77a529945aa270a18c1ba074f729c6 (patch)
tree0821484fbbca9af958605276570ca76465658d32
parenteeb03a6eaa02e9171f91e385c52a69b159fc6117 (diff)
powerpc: implement vmemmap_list_free()
This patch implements vmemmap_list_free() for vmemmap_free(). The freed entries will be removed from vmemmap_list, and form a freed list, with next as the header. The next position in the last allocated page is kept at the list tail. When allocation, if there are freed entries left, get it from the freed list; if no freed entries left, get it like before from the last allocated pages. With this change, realmode_pfn_to_page() also needs to be changed to walk all the entries in the vmemmap_list, as the virt_addr of the entries might not be stored in order anymore. It helps to reuse the memory when continuous doing memory hot-plug/remove operations, but didn't reclaim the pages already allocated, so the memory usage will only increase, but won't exceed the value for the largest memory configuration. Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Nathan Fontenot <nfont@linux.vnet.ibm.com> Acked-by: Nathan Fontenot <nfont@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/mm/init_64.c62
1 files changed, 52 insertions, 10 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index e3734edffa69..fa5d28b4e726 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -226,14 +226,24 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
226#endif /* CONFIG_PPC_BOOK3E */ 226#endif /* CONFIG_PPC_BOOK3E */
227 227
228struct vmemmap_backing *vmemmap_list; 228struct vmemmap_backing *vmemmap_list;
229static struct vmemmap_backing *next;
230static int num_left;
231static int num_freed;
229 232
230static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) 233static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
231{ 234{
232 static struct vmemmap_backing *next; 235 struct vmemmap_backing *vmem_back;
233 static int num_left; 236 /* get from freed entries first */
237 if (num_freed) {
238 num_freed--;
239 vmem_back = next;
240 next = next->list;
241
242 return vmem_back;
243 }
234 244
235 /* allocate a page when required and hand out chunks */ 245 /* allocate a page when required and hand out chunks */
236 if (!next || !num_left) { 246 if (!num_left) {
237 next = vmemmap_alloc_block(PAGE_SIZE, node); 247 next = vmemmap_alloc_block(PAGE_SIZE, node);
238 if (unlikely(!next)) { 248 if (unlikely(!next)) {
239 WARN_ON(1); 249 WARN_ON(1);
@@ -266,6 +276,38 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
266 vmemmap_list = vmem_back; 276 vmemmap_list = vmem_back;
267} 277}
268 278
279static unsigned long vmemmap_list_free(unsigned long start)
280{
281 struct vmemmap_backing *vmem_back, *vmem_back_prev;
282
283 vmem_back_prev = vmem_back = vmemmap_list;
284
285 /* look for it with prev pointer recorded */
286 for (; vmem_back; vmem_back = vmem_back->list) {
287 if (vmem_back->virt_addr == start)
288 break;
289 vmem_back_prev = vmem_back;
290 }
291
292 if (unlikely(!vmem_back)) {
293 WARN_ON(1);
294 return 0;
295 }
296
297 /* remove it from vmemmap_list */
298 if (vmem_back == vmemmap_list) /* remove head */
299 vmemmap_list = vmem_back->list;
300 else
301 vmem_back_prev->list = vmem_back->list;
302
303 /* next point to this freed entry */
304 vmem_back->list = next;
305 next = vmem_back;
306 num_freed++;
307
308 return vmem_back->phys;
309}
310
269int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 311int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
270{ 312{
271 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 313 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
@@ -331,16 +373,16 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
331 if (pg_va < vmem_back->virt_addr) 373 if (pg_va < vmem_back->virt_addr)
332 continue; 374 continue;
333 375
334 /* Check that page struct is not split between real pages */ 376 /* After vmemmap_list entry free is possible, need check all */
335 if ((pg_va + sizeof(struct page)) > 377 if ((pg_va + sizeof(struct page)) <=
336 (vmem_back->virt_addr + page_size)) 378 (vmem_back->virt_addr + page_size)) {
337 return NULL; 379 page = (struct page *) (vmem_back->phys + pg_va -
338
339 page = (struct page *) (vmem_back->phys + pg_va -
340 vmem_back->virt_addr); 380 vmem_back->virt_addr);
341 return page; 381 return page;
382 }
342 } 383 }
343 384
385 /* Probably that page struct is split between real pages */
344 return NULL; 386 return NULL;
345} 387}
346EXPORT_SYMBOL_GPL(realmode_pfn_to_page); 388EXPORT_SYMBOL_GPL(realmode_pfn_to_page);