diff options
author | Mark Nelson <markn@au1.ibm.com> | 2010-04-21 12:21:03 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-05-06 02:49:27 -0400 |
commit | 91eea67c6d8704396a98226508c56a8501e141e3 (patch) | |
tree | 94d65c6bae001fa3835398056fea936720e86ed7 /arch | |
parent | 13bb5339966d49942878a46b0a7fda0639d7db5f (diff) |
powerpc/mm: Track backing pages allocated by vmemmap_populate()
We need to keep track of the backing pages that get allocated by
vmemmap_populate() so that when we use kdump, the dump-capture kernel knows
where these pages are.
We use a simple linked list of structures that contain the physical address
of the backing page and corresponding virtual address to track the backing
pages.
To save space, we just use a pointer to the next struct vmemmap_backing. We
can also do this because we never remove nodes. We call the pointer "list"
to be compatible with changes made to the crash utility.
vmemmap_populate() is called either at boot-time or on a memory hotplug
operation. We don't have to worry about the boot-time calls because they
will be inherently single-threaded, and for a memory hotplug operation
vmemmap_populate() is called through:
sparse_add_one_section()
|
V
kmalloc_section_memmap()
|
V
sparse_mem_map_populate()
|
V
vmemmap_populate()
and in sparse_add_one_section() we're protected by pgdat_resize_lock().
So, we don't need a spinlock to protect the vmemmap_list.
We allocate space for the vmemmap_backing structs by allocating whole pages
in vmemmap_list_alloc() and then handing out chunks of this to
vmemmap_list_populate().
This means that we waste at most just under one page, but this keeps the code
is simple.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/pgalloc-64.h | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 43 |
2 files changed, 49 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 605f5c5398d1..292725cec2e3 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -11,6 +11,12 @@ | |||
11 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | 13 | ||
14 | struct vmemmap_backing { | ||
15 | struct vmemmap_backing *list; | ||
16 | unsigned long phys; | ||
17 | unsigned long virt_addr; | ||
18 | }; | ||
19 | |||
14 | /* | 20 | /* |
15 | * Functions that deal with pagetables that could be at any level of | 21 | * Functions that deal with pagetables that could be at any level of |
16 | * the table need to be passed an "index_size" so they know how to | 22 | * the table need to be passed an "index_size" so they know how to |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d7fa50b09b4a..e267f223fdff 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -252,6 +252,47 @@ static void __meminit vmemmap_create_mapping(unsigned long start, | |||
252 | } | 252 | } |
253 | #endif /* CONFIG_PPC_BOOK3E */ | 253 | #endif /* CONFIG_PPC_BOOK3E */ |
254 | 254 | ||
255 | struct vmemmap_backing *vmemmap_list; | ||
256 | |||
257 | static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) | ||
258 | { | ||
259 | static struct vmemmap_backing *next; | ||
260 | static int num_left; | ||
261 | |||
262 | /* allocate a page when required and hand out chunks */ | ||
263 | if (!next || !num_left) { | ||
264 | next = vmemmap_alloc_block(PAGE_SIZE, node); | ||
265 | if (unlikely(!next)) { | ||
266 | WARN_ON(1); | ||
267 | return NULL; | ||
268 | } | ||
269 | num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); | ||
270 | } | ||
271 | |||
272 | num_left--; | ||
273 | |||
274 | return next++; | ||
275 | } | ||
276 | |||
277 | static __meminit void vmemmap_list_populate(unsigned long phys, | ||
278 | unsigned long start, | ||
279 | int node) | ||
280 | { | ||
281 | struct vmemmap_backing *vmem_back; | ||
282 | |||
283 | vmem_back = vmemmap_list_alloc(node); | ||
284 | if (unlikely(!vmem_back)) { | ||
285 | WARN_ON(1); | ||
286 | return; | ||
287 | } | ||
288 | |||
289 | vmem_back->phys = phys; | ||
290 | vmem_back->virt_addr = start; | ||
291 | vmem_back->list = vmemmap_list; | ||
292 | |||
293 | vmemmap_list = vmem_back; | ||
294 | } | ||
295 | |||
255 | int __meminit vmemmap_populate(struct page *start_page, | 296 | int __meminit vmemmap_populate(struct page *start_page, |
256 | unsigned long nr_pages, int node) | 297 | unsigned long nr_pages, int node) |
257 | { | 298 | { |
@@ -276,6 +317,8 @@ int __meminit vmemmap_populate(struct page *start_page, | |||
276 | if (!p) | 317 | if (!p) |
277 | return -ENOMEM; | 318 | return -ENOMEM; |
278 | 319 | ||
320 | vmemmap_list_populate(__pa(p), start, node); | ||
321 | |||
279 | pr_debug(" * %016lx..%016lx allocated at %p\n", | 322 | pr_debug(" * %016lx..%016lx allocated at %p\n", |
280 | start, start + page_size, p); | 323 | start, start + page_size, p); |
281 | 324 | ||