aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/init_64.c
diff options
context:
space:
mode:
authorMark Nelson <markn@au1.ibm.com>2010-04-21 12:21:03 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-05-06 02:49:27 -0400
commit91eea67c6d8704396a98226508c56a8501e141e3 (patch)
tree94d65c6bae001fa3835398056fea936720e86ed7 /arch/powerpc/mm/init_64.c
parent13bb5339966d49942878a46b0a7fda0639d7db5f (diff)
powerpc/mm: Track backing pages allocated by vmemmap_populate()
We need to keep track of the backing pages that get allocated by vmemmap_populate() so that when we use kdump, the dump-capture kernel knows where these pages are. We use a simple linked list of structures that contain the physical address of the backing page and corresponding virtual address to track the backing pages. To save space, we just use a pointer to the next struct vmemmap_backing. We can also do this because we never remove nodes. We call the pointer "list" to be compatible with changes made to the crash utility. vmemmap_populate() is called either at boot-time or on a memory hotplug operation. We don't have to worry about the boot-time calls because they will be inherently single-threaded, and for a memory hotplug operation vmemmap_populate() is called through: sparse_add_one_section() | V kmalloc_section_memmap() | V sparse_mem_map_populate() | V vmemmap_populate() and in sparse_add_one_section() we're protected by pgdat_resize_lock(). So, we don't need a spinlock to protect the vmemmap_list. We allocate space for the vmemmap_backing structs by allocating whole pages in vmemmap_list_alloc() and then handing out chunks of this to vmemmap_list_populate(). This means that we waste at most just under one page, but this keeps the code is simple. Signed-off-by: Mark Nelson <markn@au1.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/init_64.c')
-rw-r--r--arch/powerpc/mm/init_64.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d7fa50b09b4a..e267f223fdff 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -252,6 +252,47 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
252} 252}
253#endif /* CONFIG_PPC_BOOK3E */ 253#endif /* CONFIG_PPC_BOOK3E */
254 254
255struct vmemmap_backing *vmemmap_list;
256
257static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
258{
259 static struct vmemmap_backing *next;
260 static int num_left;
261
262 /* allocate a page when required and hand out chunks */
263 if (!next || !num_left) {
264 next = vmemmap_alloc_block(PAGE_SIZE, node);
265 if (unlikely(!next)) {
266 WARN_ON(1);
267 return NULL;
268 }
269 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
270 }
271
272 num_left--;
273
274 return next++;
275}
276
277static __meminit void vmemmap_list_populate(unsigned long phys,
278 unsigned long start,
279 int node)
280{
281 struct vmemmap_backing *vmem_back;
282
283 vmem_back = vmemmap_list_alloc(node);
284 if (unlikely(!vmem_back)) {
285 WARN_ON(1);
286 return;
287 }
288
289 vmem_back->phys = phys;
290 vmem_back->virt_addr = start;
291 vmem_back->list = vmemmap_list;
292
293 vmemmap_list = vmem_back;
294}
295
255int __meminit vmemmap_populate(struct page *start_page, 296int __meminit vmemmap_populate(struct page *start_page,
256 unsigned long nr_pages, int node) 297 unsigned long nr_pages, int node)
257{ 298{
@@ -276,6 +317,8 @@ int __meminit vmemmap_populate(struct page *start_page,
276 if (!p) 317 if (!p)
277 return -ENOMEM; 318 return -ENOMEM;
278 319
320 vmemmap_list_populate(__pa(p), start, node);
321
279 pr_debug(" * %016lx..%016lx allocated at %p\n", 322 pr_debug(" * %016lx..%016lx allocated at %p\n",
280 start, start + page_size, p); 323 start, start + page_size, p);
281 324