aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/mm/hugetlbpage.c4
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--include/linux/pageblock-flags.h25
-rw-r--r--mm/page_alloc.c67
5 files changed, 80 insertions, 25 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 802b082e216d..f80f5e2aec87 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -54,6 +54,11 @@ config ARCH_HAS_ILOG2_U64
54 bool 54 bool
55 default n 55 default n
56 56
57config HUGETLB_PAGE_SIZE_VARIABLE
58 bool
59 depends on HUGETLB_PAGE
60 default y
61
57config GENERIC_FIND_NEXT_BIT 62config GENERIC_FIND_NEXT_BIT
58 bool 63 bool
59 default y 64 default y
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index a9ff685aea25..d3ce8f3bcaa6 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -194,6 +194,6 @@ static int __init hugetlb_setup_sz(char *str)
194 * override here with new page shift. 194 * override here with new page shift.
195 */ 195 */
196 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2); 196 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
197 return 1; 197 return 0;
198} 198}
199__setup("hugepagesz=", hugetlb_setup_sz); 199early_param("hugepagesz", hugetlb_setup_sz);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fef08c6cf75e..87a4045580f2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -235,7 +235,7 @@ struct zone {
235 235
236#ifndef CONFIG_SPARSEMEM 236#ifndef CONFIG_SPARSEMEM
237 /* 237 /*
238 * Flags for a MAX_ORDER_NR_PAGES block. See pageblock-flags.h. 238 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
239 * In SPARSEMEM, this map is stored in struct mem_section 239 * In SPARSEMEM, this map is stored in struct mem_section
240 */ 240 */
241 unsigned long *pageblock_flags; 241 unsigned long *pageblock_flags;
@@ -740,7 +740,7 @@ extern struct zone *next_zone(struct zone *zone);
740#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 740#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
741 741
742#define SECTION_BLOCKFLAGS_BITS \ 742#define SECTION_BLOCKFLAGS_BITS \
743 ((1 << (PFN_SECTION_SHIFT - (MAX_ORDER-1))) * NR_PAGEBLOCK_BITS) 743 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
744 744
745#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 745#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
746#error Allocator MAX_ORDER exceeds SECTION_SIZE 746#error Allocator MAX_ORDER exceeds SECTION_SIZE
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 5456da6b4ade..9ea130829fb4 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Macros for manipulating and testing flags related to a 2 * Macros for manipulating and testing flags related to a
3 * MAX_ORDER_NR_PAGES block of pages. 3 * pageblock_nr_pages number of pages.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -35,6 +35,29 @@ enum pageblock_bits {
35 NR_PAGEBLOCK_BITS 35 NR_PAGEBLOCK_BITS
36}; 36};
37 37
38#ifdef CONFIG_HUGETLB_PAGE
39
40#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
41
42/* Huge page sizes are variable */
43extern int pageblock_order;
44
45#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
46
47/* Huge pages are a constant size */
48#define pageblock_order HUGETLB_PAGE_ORDER
49
50#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
51
52#else /* CONFIG_HUGETLB_PAGE */
53
54/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
55#define pageblock_order (MAX_ORDER-1)
56
57#endif /* CONFIG_HUGETLB_PAGE */
58
59#define pageblock_nr_pages (1UL << pageblock_order)
60
38/* Forward declaration */ 61/* Forward declaration */
39struct page; 62struct page;
40 63
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 942498fba942..b9bc7369c48d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -67,6 +67,10 @@ unsigned long totalreserve_pages __read_mostly;
67long nr_swap_pages; 67long nr_swap_pages;
68int percpu_pagelist_fraction; 68int percpu_pagelist_fraction;
69 69
70#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
71int pageblock_order __read_mostly;
72#endif
73
70static void __free_pages_ok(struct page *page, unsigned int order); 74static void __free_pages_ok(struct page *page, unsigned int order);
71 75
72/* 76/*
@@ -709,7 +713,7 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
709 713
710/* 714/*
711 * Move the free pages in a range to the free lists of the requested type. 715 * Move the free pages in a range to the free lists of the requested type.
712 * Note that start_page and end_pages are not aligned in a MAX_ORDER_NR_PAGES 716 * Note that start_page and end_pages are not aligned on a pageblock
713 * boundary. If alignment is required, use move_freepages_block() 717 * boundary. If alignment is required, use move_freepages_block()
714 */ 718 */
715int move_freepages(struct zone *zone, 719int move_freepages(struct zone *zone,
@@ -759,10 +763,10 @@ int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
759 struct page *start_page, *end_page; 763 struct page *start_page, *end_page;
760 764
761 start_pfn = page_to_pfn(page); 765 start_pfn = page_to_pfn(page);
762 start_pfn = start_pfn & ~(MAX_ORDER_NR_PAGES-1); 766 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
763 start_page = pfn_to_page(start_pfn); 767 start_page = pfn_to_page(start_pfn);
764 end_page = start_page + MAX_ORDER_NR_PAGES - 1; 768 end_page = start_page + pageblock_nr_pages - 1;
765 end_pfn = start_pfn + MAX_ORDER_NR_PAGES - 1; 769 end_pfn = start_pfn + pageblock_nr_pages - 1;
766 770
767 /* Do not cross zone boundaries */ 771 /* Do not cross zone boundaries */
768 if (start_pfn < zone->zone_start_pfn) 772 if (start_pfn < zone->zone_start_pfn)
@@ -826,14 +830,14 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
826 * back for a reclaimable kernel allocation, be more 830 * back for a reclaimable kernel allocation, be more
827 * agressive about taking ownership of free pages 831 * agressive about taking ownership of free pages
828 */ 832 */
829 if (unlikely(current_order >= MAX_ORDER / 2) || 833 if (unlikely(current_order >= (pageblock_order >> 1)) ||
830 start_migratetype == MIGRATE_RECLAIMABLE) { 834 start_migratetype == MIGRATE_RECLAIMABLE) {
831 unsigned long pages; 835 unsigned long pages;
832 pages = move_freepages_block(zone, page, 836 pages = move_freepages_block(zone, page,
833 start_migratetype); 837 start_migratetype);
834 838
835 /* Claim the whole block if over half of it is free */ 839 /* Claim the whole block if over half of it is free */
836 if (pages >= (1 << (MAX_ORDER-2))) 840 if (pages >= (1 << (pageblock_order-1)))
837 set_pageblock_migratetype(page, 841 set_pageblock_migratetype(page,
838 start_migratetype); 842 start_migratetype);
839 843
@@ -846,7 +850,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
846 __mod_zone_page_state(zone, NR_FREE_PAGES, 850 __mod_zone_page_state(zone, NR_FREE_PAGES,
847 -(1UL << order)); 851 -(1UL << order));
848 852
849 if (current_order == MAX_ORDER - 1) 853 if (current_order == pageblock_order)
850 set_pageblock_migratetype(page, 854 set_pageblock_migratetype(page,
851 start_migratetype); 855 start_migratetype);
852 856
@@ -2385,7 +2389,7 @@ void build_all_zonelists(void)
2385 * made on memory-hotadd so a system can start with mobility 2389 * made on memory-hotadd so a system can start with mobility
2386 * disabled and enable it later 2390 * disabled and enable it later
2387 */ 2391 */
2388 if (vm_total_pages < (MAX_ORDER_NR_PAGES * MIGRATE_TYPES)) 2392 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2389 page_group_by_mobility_disabled = 1; 2393 page_group_by_mobility_disabled = 1;
2390 else 2394 else
2391 page_group_by_mobility_disabled = 0; 2395 page_group_by_mobility_disabled = 0;
@@ -2470,7 +2474,7 @@ static inline unsigned long wait_table_bits(unsigned long size)
2470#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 2474#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2471 2475
2472/* 2476/*
2473 * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number 2477 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2474 * of blocks reserved is based on zone->pages_min. The memory within the 2478 * of blocks reserved is based on zone->pages_min. The memory within the
2475 * reserve will tend to store contiguous free pages. Setting min_free_kbytes 2479 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2476 * higher will lead to a bigger reserve which will get freed as contiguous 2480 * higher will lead to a bigger reserve which will get freed as contiguous
@@ -2485,9 +2489,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2485 /* Get the start pfn, end pfn and the number of blocks to reserve */ 2489 /* Get the start pfn, end pfn and the number of blocks to reserve */
2486 start_pfn = zone->zone_start_pfn; 2490 start_pfn = zone->zone_start_pfn;
2487 end_pfn = start_pfn + zone->spanned_pages; 2491 end_pfn = start_pfn + zone->spanned_pages;
2488 reserve = roundup(zone->pages_min, MAX_ORDER_NR_PAGES) >> (MAX_ORDER-1); 2492 reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2493 pageblock_order;
2489 2494
2490 for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES) { 2495 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2491 if (!pfn_valid(pfn)) 2496 if (!pfn_valid(pfn))
2492 continue; 2497 continue;
2493 page = pfn_to_page(pfn); 2498 page = pfn_to_page(pfn);
@@ -2562,7 +2567,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2562 * the start are marked MIGRATE_RESERVE by 2567 * the start are marked MIGRATE_RESERVE by
2563 * setup_zone_migrate_reserve() 2568 * setup_zone_migrate_reserve()
2564 */ 2569 */
2565 if ((pfn & (MAX_ORDER_NR_PAGES-1))) 2570 if ((pfn & (pageblock_nr_pages-1)))
2566 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 2571 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2567 2572
2568 INIT_LIST_HEAD(&page->lru); 2573 INIT_LIST_HEAD(&page->lru);
@@ -3266,8 +3271,8 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3266#ifndef CONFIG_SPARSEMEM 3271#ifndef CONFIG_SPARSEMEM
3267/* 3272/*
3268 * Calculate the size of the zone->blockflags rounded to an unsigned long 3273 * Calculate the size of the zone->blockflags rounded to an unsigned long
3269 * Start by making sure zonesize is a multiple of MAX_ORDER-1 by rounding up 3274 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3270 * Then figure 1 NR_PAGEBLOCK_BITS worth of bits per MAX_ORDER-1, finally 3275 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3271 * round what is now in bits to nearest long in bits, then return it in 3276 * round what is now in bits to nearest long in bits, then return it in
3272 * bytes. 3277 * bytes.
3273 */ 3278 */
@@ -3275,8 +3280,8 @@ static unsigned long __init usemap_size(unsigned long zonesize)
3275{ 3280{
3276 unsigned long usemapsize; 3281 unsigned long usemapsize;
3277 3282
3278 usemapsize = roundup(zonesize, MAX_ORDER_NR_PAGES); 3283 usemapsize = roundup(zonesize, pageblock_nr_pages);
3279 usemapsize = usemapsize >> (MAX_ORDER-1); 3284 usemapsize = usemapsize >> pageblock_order;
3280 usemapsize *= NR_PAGEBLOCK_BITS; 3285 usemapsize *= NR_PAGEBLOCK_BITS;
3281 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 3286 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3282 3287
@@ -3298,6 +3303,27 @@ static void inline setup_usemap(struct pglist_data *pgdat,
3298 struct zone *zone, unsigned long zonesize) {} 3303 struct zone *zone, unsigned long zonesize) {}
3299#endif /* CONFIG_SPARSEMEM */ 3304#endif /* CONFIG_SPARSEMEM */
3300 3305
3306#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3307/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3308static inline void __init set_pageblock_order(unsigned int order)
3309{
3310 /* Check that pageblock_nr_pages has not already been setup */
3311 if (pageblock_order)
3312 return;
3313
3314 /*
3315 * Assume the largest contiguous order of interest is a huge page.
3316 * This value may be variable depending on boot parameters on IA64
3317 */
3318 pageblock_order = order;
3319}
3320#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3321
3322/* Defined this way to avoid accidently referencing HUGETLB_PAGE_ORDER */
3323#define set_pageblock_order(x) do {} while (0)
3324
3325#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3326
3301/* 3327/*
3302 * Set up the zone data structures: 3328 * Set up the zone data structures:
3303 * - mark all pages reserved 3329 * - mark all pages reserved
@@ -3378,6 +3404,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
3378 if (!size) 3404 if (!size)
3379 continue; 3405 continue;
3380 3406
3407 set_pageblock_order(HUGETLB_PAGE_ORDER);
3381 setup_usemap(pgdat, zone, size); 3408 setup_usemap(pgdat, zone, size);
3382 ret = init_currently_empty_zone(zone, zone_start_pfn, 3409 ret = init_currently_empty_zone(zone, zone_start_pfn,
3383 size, MEMMAP_EARLY); 3410 size, MEMMAP_EARLY);
@@ -4375,15 +4402,15 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4375{ 4402{
4376#ifdef CONFIG_SPARSEMEM 4403#ifdef CONFIG_SPARSEMEM
4377 pfn &= (PAGES_PER_SECTION-1); 4404 pfn &= (PAGES_PER_SECTION-1);
4378 return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS; 4405 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4379#else 4406#else
4380 pfn = pfn - zone->zone_start_pfn; 4407 pfn = pfn - zone->zone_start_pfn;
4381 return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS; 4408 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4382#endif /* CONFIG_SPARSEMEM */ 4409#endif /* CONFIG_SPARSEMEM */
4383} 4410}
4384 4411
4385/** 4412/**
4386 * get_pageblock_flags_group - Return the requested group of flags for the MAX_ORDER_NR_PAGES block of pages 4413 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4387 * @page: The page within the block of interest 4414 * @page: The page within the block of interest
4388 * @start_bitidx: The first bit of interest to retrieve 4415 * @start_bitidx: The first bit of interest to retrieve
4389 * @end_bitidx: The last bit of interest 4416 * @end_bitidx: The last bit of interest
@@ -4411,7 +4438,7 @@ unsigned long get_pageblock_flags_group(struct page *page,
4411} 4438}
4412 4439
4413/** 4440/**
4414 * set_pageblock_flags_group - Set the requested group of flags for a MAX_ORDER_NR_PAGES block of pages 4441 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4415 * @page: The page within the block of interest 4442 * @page: The page within the block of interest
4416 * @start_bitidx: The first bit of interest 4443 * @start_bitidx: The first bit of interest
4417 * @end_bitidx: The last bit of interest 4444 * @end_bitidx: The last bit of interest