diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 97d6827c7d66..253a450c400d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/mempolicy.h> | 39 | #include <linux/mempolicy.h> |
40 | 40 | ||
41 | #include <asm/tlbflush.h> | 41 | #include <asm/tlbflush.h> |
42 | #include <asm/div64.h> | ||
42 | #include "internal.h" | 43 | #include "internal.h" |
43 | 44 | ||
44 | /* | 45 | /* |
@@ -232,11 +233,13 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) | |||
232 | * zone->lock is already acquired when we use these. | 233 | * zone->lock is already acquired when we use these. |
233 | * So, we don't need atomic page->flags operations here. | 234 | * So, we don't need atomic page->flags operations here. |
234 | */ | 235 | */ |
235 | static inline unsigned long page_order(struct page *page) { | 236 | static inline unsigned long page_order(struct page *page) |
237 | { | ||
236 | return page_private(page); | 238 | return page_private(page); |
237 | } | 239 | } |
238 | 240 | ||
239 | static inline void set_page_order(struct page *page, int order) { | 241 | static inline void set_page_order(struct page *page, int order) |
242 | { | ||
240 | set_page_private(page, order); | 243 | set_page_private(page, order); |
241 | __SetPageBuddy(page); | 244 | __SetPageBuddy(page); |
242 | } | 245 | } |
@@ -299,9 +302,9 @@ static inline int page_is_buddy(struct page *page, int order) | |||
299 | 302 | ||
300 | if (PageBuddy(page) && page_order(page) == order) { | 303 | if (PageBuddy(page) && page_order(page) == order) { |
301 | BUG_ON(page_count(page) != 0); | 304 | BUG_ON(page_count(page) != 0); |
302 | return 1; | 305 | return 1; |
303 | } | 306 | } |
304 | return 0; | 307 | return 0; |
305 | } | 308 | } |
306 | 309 | ||
307 | /* | 310 | /* |
@@ -948,7 +951,7 @@ restart: | |||
948 | goto got_pg; | 951 | goto got_pg; |
949 | 952 | ||
950 | do { | 953 | do { |
951 | if (cpuset_zone_allowed(*z, gfp_mask)) | 954 | if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL)) |
952 | wakeup_kswapd(*z, order); | 955 | wakeup_kswapd(*z, order); |
953 | } while (*(++z)); | 956 | } while (*(++z)); |
954 | 957 | ||
@@ -967,7 +970,8 @@ restart: | |||
967 | alloc_flags |= ALLOC_HARDER; | 970 | alloc_flags |= ALLOC_HARDER; |
968 | if (gfp_mask & __GFP_HIGH) | 971 | if (gfp_mask & __GFP_HIGH) |
969 | alloc_flags |= ALLOC_HIGH; | 972 | alloc_flags |= ALLOC_HIGH; |
970 | alloc_flags |= ALLOC_CPUSET; | 973 | if (wait) |
974 | alloc_flags |= ALLOC_CPUSET; | ||
971 | 975 | ||
972 | /* | 976 | /* |
973 | * Go through the zonelist again. Let __GFP_HIGH and allocations | 977 | * Go through the zonelist again. Let __GFP_HIGH and allocations |
@@ -1960,7 +1964,7 @@ static inline void free_zone_pagesets(int cpu) | |||
1960 | } | 1964 | } |
1961 | } | 1965 | } |
1962 | 1966 | ||
1963 | static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, | 1967 | static int pageset_cpuup_callback(struct notifier_block *nfb, |
1964 | unsigned long action, | 1968 | unsigned long action, |
1965 | void *hcpu) | 1969 | void *hcpu) |
1966 | { | 1970 | { |
@@ -2121,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) | |||
2121 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 2125 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
2122 | /* ia64 gets its own node_mem_map, before this, without bootmem */ | 2126 | /* ia64 gets its own node_mem_map, before this, without bootmem */ |
2123 | if (!pgdat->node_mem_map) { | 2127 | if (!pgdat->node_mem_map) { |
2124 | unsigned long size; | 2128 | unsigned long size, start, end; |
2125 | struct page *map; | 2129 | struct page *map; |
2126 | 2130 | ||
2127 | size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); | 2131 | /* |
2132 | * The zone's endpoints aren't required to be MAX_ORDER | ||
2133 | * aligned but the node_mem_map endpoints must be in order | ||
2134 | * for the buddy allocator to function correctly. | ||
2135 | */ | ||
2136 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); | ||
2137 | end = pgdat->node_start_pfn + pgdat->node_spanned_pages; | ||
2138 | end = ALIGN(end, MAX_ORDER_NR_PAGES); | ||
2139 | size = (end - start) * sizeof(struct page); | ||
2128 | map = alloc_remap(pgdat->node_id, size); | 2140 | map = alloc_remap(pgdat->node_id, size); |
2129 | if (!map) | 2141 | if (!map) |
2130 | map = alloc_bootmem_node(pgdat, size); | 2142 | map = alloc_bootmem_node(pgdat, size); |
2131 | pgdat->node_mem_map = map; | 2143 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
2132 | } | 2144 | } |
2133 | #ifdef CONFIG_FLATMEM | 2145 | #ifdef CONFIG_FLATMEM |
2134 | /* | 2146 | /* |
@@ -2564,9 +2576,11 @@ void setup_per_zone_pages_min(void) | |||
2564 | } | 2576 | } |
2565 | 2577 | ||
2566 | for_each_zone(zone) { | 2578 | for_each_zone(zone) { |
2567 | unsigned long tmp; | 2579 | u64 tmp; |
2580 | |||
2568 | spin_lock_irqsave(&zone->lru_lock, flags); | 2581 | spin_lock_irqsave(&zone->lru_lock, flags); |
2569 | tmp = (pages_min * zone->present_pages) / lowmem_pages; | 2582 | tmp = (u64)pages_min * zone->present_pages; |
2583 | do_div(tmp, lowmem_pages); | ||
2570 | if (is_highmem(zone)) { | 2584 | if (is_highmem(zone)) { |
2571 | /* | 2585 | /* |
2572 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't | 2586 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't |
@@ -2593,8 +2607,8 @@ void setup_per_zone_pages_min(void) | |||
2593 | zone->pages_min = tmp; | 2607 | zone->pages_min = tmp; |
2594 | } | 2608 | } |
2595 | 2609 | ||
2596 | zone->pages_low = zone->pages_min + tmp / 4; | 2610 | zone->pages_low = zone->pages_min + (tmp >> 2); |
2597 | zone->pages_high = zone->pages_min + tmp / 2; | 2611 | zone->pages_high = zone->pages_min + (tmp >> 1); |
2598 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 2612 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
2599 | } | 2613 | } |
2600 | 2614 | ||