aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorJon Tollefson <kniht@linux.vnet.ibm.com>2008-07-24 00:27:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:19 -0400
commit658013e93eb70494f7300bc90457b09a807232a4 (patch)
treed03a23ac4fff0b35138b5d8abb761e9fad85b09c /arch/powerpc
parentec4b2c0c8312d1118c2acd00c89988ecf955d5cc (diff)
powerpc: scan device tree for gigantic pages
The 16G huge pages have to be reserved in the HMC prior to boot. The location of the pages are placed in the device tree. This patch adds code to scan the device tree during very early boot and save these page locations until hugetlbfs is ready for them. Acked-by: Adam Litke <agl@us.ibm.com> Signed-off-by: Jon Tollefson <kniht@linux.vnet.ibm.com> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c44
-rw-r--r--arch/powerpc/mm/hugetlbpage.c16
2 files changed, 59 insertions, 1 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8d3b58ebd38e..ae4c717243a5 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -68,6 +68,7 @@
68 68
69#define KB (1024) 69#define KB (1024)
70#define MB (1024*KB) 70#define MB (1024*KB)
71#define GB (1024L*MB)
71 72
72/* 73/*
73 * Note: pte --> Linux PTE 74 * Note: pte --> Linux PTE
@@ -329,6 +330,44 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
329 return 0; 330 return 0;
330} 331}
331 332
333/* Scan for 16G memory blocks that have been set aside for huge pages
334 * and reserve those blocks for 16G huge pages.
335 */
336static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
337 const char *uname, int depth,
338 void *data) {
339 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
340 unsigned long *addr_prop;
341 u32 *page_count_prop;
342 unsigned int expected_pages;
343 long unsigned int phys_addr;
344 long unsigned int block_size;
345
346 /* We are scanning "memory" nodes only */
347 if (type == NULL || strcmp(type, "memory") != 0)
348 return 0;
349
350 /* This property is the log base 2 of the number of virtual pages that
351 * will represent this memory block. */
352 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
353 if (page_count_prop == NULL)
354 return 0;
355 expected_pages = (1 << page_count_prop[0]);
356 addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
357 if (addr_prop == NULL)
358 return 0;
359 phys_addr = addr_prop[0];
360 block_size = addr_prop[1];
361 if (block_size != (16 * GB))
362 return 0;
363 printk(KERN_INFO "Huge page(16GB) memory: "
364 "addr = 0x%lX size = 0x%lX pages = %d\n",
365 phys_addr, block_size, expected_pages);
366 lmb_reserve(phys_addr, block_size * expected_pages);
367 add_gpage(phys_addr, block_size, expected_pages);
368 return 0;
369}
370
332static void __init htab_init_page_sizes(void) 371static void __init htab_init_page_sizes(void)
333{ 372{
334 int rc; 373 int rc;
@@ -418,7 +457,10 @@ static void __init htab_init_page_sizes(void)
418 ); 457 );
419 458
420#ifdef CONFIG_HUGETLB_PAGE 459#ifdef CONFIG_HUGETLB_PAGE
421 /* Init large page size. Currently, we pick 16M or 1M depending 460 /* Reserve 16G huge page memory sections for huge pages */
461 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
462
463/* Init large page size. Currently, we pick 16M or 1M depending
422 * on what is available 464 * on what is available
423 */ 465 */
424 if (mmu_psize_defs[MMU_PAGE_16M].shift) 466 if (mmu_psize_defs[MMU_PAGE_16M].shift)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5df82186fc93..e2a650a9e533 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -110,6 +110,22 @@ pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
110} 110}
111#endif 111#endif
112 112
113/* Build list of addresses of gigantic pages. This function is used in early
114 * boot before the buddy or bootmem allocator is setup.
115 */
116void add_gpage(unsigned long addr, unsigned long page_size,
117 unsigned long number_of_pages)
118{
119 if (!addr)
120 return;
121 while (number_of_pages > 0) {
122 gpage_freearray[nr_gpages] = addr;
123 nr_gpages++;
124 number_of_pages--;
125 addr += page_size;
126 }
127}
128
113/* Moves the gigantic page addresses from the temporary list to the 129/* Moves the gigantic page addresses from the temporary list to the
114 * huge_boot_pages list. */ 130 * huge_boot_pages list. */
115int alloc_bootmem_huge_page(struct hstate *h) 131int alloc_bootmem_huge_page(struct hstate *h)