diff options
author | Andi Kleen <ak@suse.de> | 2008-07-24 00:27:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:18 -0400 |
commit | 8faa8b077b2cdc4e4646842fe50b07840955a013 (patch) | |
tree | 9c55a56ad5bb36219c8cf030329e67acd1b7e2e0 /mm/hugetlb.c | |
parent | aa888a74977a8f2120ae9332376e179c39a6b07d (diff) |
hugetlb: support boot allocate different sizes
Make some infrastructure changes to allow boot-time allocation of
different hugepage page sizes.
- move all basic hstate initialisation into hugetlb_add_hstate
- create a new function hugetlb_hstate_alloc_pages() to do the
actual initial page allocations. Call this function early in
order to allocate giant pages from bootmem.
- Check for multiple hugepages= parameters
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: Nishanth Aravamudan <nacc@us.ibm.com>
Acked-by: Andrew Hastings <abh@cray.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 39 |
1 files changed, 30 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1a6fe87555b2..243a8684d180 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -981,15 +981,10 @@ static void __init gather_bootmem_prealloc(void) | |||
981 | } | 981 | } |
982 | } | 982 | } |
983 | 983 | ||
984 | static void __init hugetlb_init_one_hstate(struct hstate *h) | 984 | static void __init hugetlb_hstate_alloc_pages(struct hstate *h) |
985 | { | 985 | { |
986 | unsigned long i; | 986 | unsigned long i; |
987 | 987 | ||
988 | for (i = 0; i < MAX_NUMNODES; ++i) | ||
989 | INIT_LIST_HEAD(&h->hugepage_freelists[i]); | ||
990 | |||
991 | h->hugetlb_next_nid = first_node(node_online_map); | ||
992 | |||
993 | for (i = 0; i < h->max_huge_pages; ++i) { | 988 | for (i = 0; i < h->max_huge_pages; ++i) { |
994 | if (h->order >= MAX_ORDER) { | 989 | if (h->order >= MAX_ORDER) { |
995 | if (!alloc_bootmem_huge_page(h)) | 990 | if (!alloc_bootmem_huge_page(h)) |
@@ -997,7 +992,7 @@ static void __init hugetlb_init_one_hstate(struct hstate *h) | |||
997 | } else if (!alloc_fresh_huge_page(h)) | 992 | } else if (!alloc_fresh_huge_page(h)) |
998 | break; | 993 | break; |
999 | } | 994 | } |
1000 | h->max_huge_pages = h->free_huge_pages = h->nr_huge_pages = i; | 995 | h->max_huge_pages = i; |
1001 | } | 996 | } |
1002 | 997 | ||
1003 | static void __init hugetlb_init_hstates(void) | 998 | static void __init hugetlb_init_hstates(void) |
@@ -1005,7 +1000,9 @@ static void __init hugetlb_init_hstates(void) | |||
1005 | struct hstate *h; | 1000 | struct hstate *h; |
1006 | 1001 | ||
1007 | for_each_hstate(h) { | 1002 | for_each_hstate(h) { |
1008 | hugetlb_init_one_hstate(h); | 1003 | /* oversize hugepages were init'ed in early boot */ |
1004 | if (h->order < MAX_ORDER) | ||
1005 | hugetlb_hstate_alloc_pages(h); | ||
1009 | } | 1006 | } |
1010 | } | 1007 | } |
1011 | 1008 | ||
@@ -1301,6 +1298,8 @@ module_init(hugetlb_init); | |||
1301 | void __init hugetlb_add_hstate(unsigned order) | 1298 | void __init hugetlb_add_hstate(unsigned order) |
1302 | { | 1299 | { |
1303 | struct hstate *h; | 1300 | struct hstate *h; |
1301 | unsigned long i; | ||
1302 | |||
1304 | if (size_to_hstate(PAGE_SIZE << order)) { | 1303 | if (size_to_hstate(PAGE_SIZE << order)) { |
1305 | printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); | 1304 | printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); |
1306 | return; | 1305 | return; |
@@ -1310,15 +1309,21 @@ void __init hugetlb_add_hstate(unsigned order) | |||
1310 | h = &hstates[max_hstate++]; | 1309 | h = &hstates[max_hstate++]; |
1311 | h->order = order; | 1310 | h->order = order; |
1312 | h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); | 1311 | h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); |
1312 | h->nr_huge_pages = 0; | ||
1313 | h->free_huge_pages = 0; | ||
1314 | for (i = 0; i < MAX_NUMNODES; ++i) | ||
1315 | INIT_LIST_HEAD(&h->hugepage_freelists[i]); | ||
1316 | h->hugetlb_next_nid = first_node(node_online_map); | ||
1313 | snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", | 1317 | snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", |
1314 | huge_page_size(h)/1024); | 1318 | huge_page_size(h)/1024); |
1315 | hugetlb_init_one_hstate(h); | 1319 | |
1316 | parsed_hstate = h; | 1320 | parsed_hstate = h; |
1317 | } | 1321 | } |
1318 | 1322 | ||
1319 | static int __init hugetlb_setup(char *s) | 1323 | static int __init hugetlb_setup(char *s) |
1320 | { | 1324 | { |
1321 | unsigned long *mhp; | 1325 | unsigned long *mhp; |
1326 | static unsigned long *last_mhp; | ||
1322 | 1327 | ||
1323 | /* | 1328 | /* |
1324 | * !max_hstate means we haven't parsed a hugepagesz= parameter yet, | 1329 | * !max_hstate means we haven't parsed a hugepagesz= parameter yet, |
@@ -1329,9 +1334,25 @@ static int __init hugetlb_setup(char *s) | |||
1329 | else | 1334 | else |
1330 | mhp = &parsed_hstate->max_huge_pages; | 1335 | mhp = &parsed_hstate->max_huge_pages; |
1331 | 1336 | ||
1337 | if (mhp == last_mhp) { | ||
1338 | printk(KERN_WARNING "hugepages= specified twice without " | ||
1339 | "interleaving hugepagesz=, ignoring\n"); | ||
1340 | return 1; | ||
1341 | } | ||
1342 | |||
1332 | if (sscanf(s, "%lu", mhp) <= 0) | 1343 | if (sscanf(s, "%lu", mhp) <= 0) |
1333 | *mhp = 0; | 1344 | *mhp = 0; |
1334 | 1345 | ||
1346 | /* | ||
1347 | * Global state is always initialized later in hugetlb_init. | ||
1348 | * But we need to allocate >= MAX_ORDER hstates here early to still | ||
1349 | * use the bootmem allocator. | ||
1350 | */ | ||
1351 | if (max_hstate && parsed_hstate->order >= MAX_ORDER) | ||
1352 | hugetlb_hstate_alloc_pages(parsed_hstate); | ||
1353 | |||
1354 | last_mhp = mhp; | ||
1355 | |||
1335 | return 1; | 1356 | return 1; |
1336 | } | 1357 | } |
1337 | __setup("hugepages=", hugetlb_setup); | 1358 | __setup("hugepages=", hugetlb_setup); |