aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-10-11 03:23:53 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-10-11 03:23:53 -0400
commit3ad26e5c4459d3793ad65bc8929037c70515df83 (patch)
tree434327df7942878383e372988eb5f3bccb25de12 /arch/powerpc/mm
parent5293bf97a27e1be8ac6096aa198ff6a9e3e6837c (diff)
parent18461960cbf50bf345ef0667d45d5f64de8fb893 (diff)
Merge branch 'for-kvm' into next
Topic branch for commits that the KVM tree might want to pull in separately. Hand merged a few files due to conflicts with the LE stuff Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/init_64.c51
1 files changed, 50 insertions, 1 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 8ed035d2edb5..e3734edffa69 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -304,5 +304,54 @@ void register_page_bootmem_memmap(unsigned long section_nr,
304 struct page *start_page, unsigned long size) 304 struct page *start_page, unsigned long size)
305{ 305{
306} 306}
307#endif /* CONFIG_SPARSEMEM_VMEMMAP */
308 307
308/*
309 * We do not have access to the sparsemem vmemmap, so we fallback to
310 * walking the list of sparsemem blocks which we already maintain for
311 * the sake of crashdump. In the long run, we might want to maintain
312 * a tree if performance of that linear walk becomes a problem.
313 *
314 * realmode_pfn_to_page functions can fail due to:
315 * 1) As real sparsemem blocks do not lay in RAM continously (they
316 * are in virtual address space which is not available in the real mode),
317 * the requested page struct can be split between blocks so get_page/put_page
318 * may fail.
319 * 2) When huge pages are used, the get_page/put_page API will fail
320 * in real mode as the linked addresses in the page struct are virtual
321 * too.
322 */
323struct page *realmode_pfn_to_page(unsigned long pfn)
324{
325 struct vmemmap_backing *vmem_back;
326 struct page *page;
327 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
328 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
329
330 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
331 if (pg_va < vmem_back->virt_addr)
332 continue;
333
334 /* Check that page struct is not split between real pages */
335 if ((pg_va + sizeof(struct page)) >
336 (vmem_back->virt_addr + page_size))
337 return NULL;
338
339 page = (struct page *) (vmem_back->phys + pg_va -
340 vmem_back->virt_addr);
341 return page;
342 }
343
344 return NULL;
345}
346EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
347
348#elif defined(CONFIG_FLATMEM)
349
350struct page *realmode_pfn_to_page(unsigned long pfn)
351{
352 struct page *page = pfn_to_page(pfn);
353 return page;
354}
355EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
356
357#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */