diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-05-25 12:40:08 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-05-25 12:40:08 -0400 |
commit | c6a756795d5ba0637aae8da89dd11bb7e3a1ee74 (patch) | |
tree | 1c19f951f2604dbb6b867a6dcdf94d20c204cc5c /mm/page_alloc.c | |
parent | 382066da251132f768380f4852ed5afb72d88f80 (diff) | |
parent | a8bd60705aa17a998516837d9c1e503ad4cbd7fc (diff) |
Merge branch 'master'
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 30 |
1 files changed, 21 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ea77c999047e..253a450c400d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/mempolicy.h> | 39 | #include <linux/mempolicy.h> |
40 | 40 | ||
41 | #include <asm/tlbflush.h> | 41 | #include <asm/tlbflush.h> |
42 | #include <asm/div64.h> | ||
42 | #include "internal.h" | 43 | #include "internal.h" |
43 | 44 | ||
44 | /* | 45 | /* |
@@ -950,7 +951,7 @@ restart: | |||
950 | goto got_pg; | 951 | goto got_pg; |
951 | 952 | ||
952 | do { | 953 | do { |
953 | if (cpuset_zone_allowed(*z, gfp_mask)) | 954 | if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL)) |
954 | wakeup_kswapd(*z, order); | 955 | wakeup_kswapd(*z, order); |
955 | } while (*(++z)); | 956 | } while (*(++z)); |
956 | 957 | ||
@@ -969,7 +970,8 @@ restart: | |||
969 | alloc_flags |= ALLOC_HARDER; | 970 | alloc_flags |= ALLOC_HARDER; |
970 | if (gfp_mask & __GFP_HIGH) | 971 | if (gfp_mask & __GFP_HIGH) |
971 | alloc_flags |= ALLOC_HIGH; | 972 | alloc_flags |= ALLOC_HIGH; |
972 | alloc_flags |= ALLOC_CPUSET; | 973 | if (wait) |
974 | alloc_flags |= ALLOC_CPUSET; | ||
973 | 975 | ||
974 | /* | 976 | /* |
975 | * Go through the zonelist again. Let __GFP_HIGH and allocations | 977 | * Go through the zonelist again. Let __GFP_HIGH and allocations |
@@ -2123,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) | |||
2123 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 2125 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
2124 | /* ia64 gets its own node_mem_map, before this, without bootmem */ | 2126 | /* ia64 gets its own node_mem_map, before this, without bootmem */ |
2125 | if (!pgdat->node_mem_map) { | 2127 | if (!pgdat->node_mem_map) { |
2126 | unsigned long size; | 2128 | unsigned long size, start, end; |
2127 | struct page *map; | 2129 | struct page *map; |
2128 | 2130 | ||
2129 | size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); | 2131 | /* |
2132 | * The zone's endpoints aren't required to be MAX_ORDER | ||
2133 | * aligned but the node_mem_map endpoints must be in order | ||
2134 | * for the buddy allocator to function correctly. | ||
2135 | */ | ||
2136 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); | ||
2137 | end = pgdat->node_start_pfn + pgdat->node_spanned_pages; | ||
2138 | end = ALIGN(end, MAX_ORDER_NR_PAGES); | ||
2139 | size = (end - start) * sizeof(struct page); | ||
2130 | map = alloc_remap(pgdat->node_id, size); | 2140 | map = alloc_remap(pgdat->node_id, size); |
2131 | if (!map) | 2141 | if (!map) |
2132 | map = alloc_bootmem_node(pgdat, size); | 2142 | map = alloc_bootmem_node(pgdat, size); |
2133 | pgdat->node_mem_map = map; | 2143 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
2134 | } | 2144 | } |
2135 | #ifdef CONFIG_FLATMEM | 2145 | #ifdef CONFIG_FLATMEM |
2136 | /* | 2146 | /* |
@@ -2566,9 +2576,11 @@ void setup_per_zone_pages_min(void) | |||
2566 | } | 2576 | } |
2567 | 2577 | ||
2568 | for_each_zone(zone) { | 2578 | for_each_zone(zone) { |
2569 | unsigned long tmp; | 2579 | u64 tmp; |
2580 | |||
2570 | spin_lock_irqsave(&zone->lru_lock, flags); | 2581 | spin_lock_irqsave(&zone->lru_lock, flags); |
2571 | tmp = (pages_min * zone->present_pages) / lowmem_pages; | 2582 | tmp = (u64)pages_min * zone->present_pages; |
2583 | do_div(tmp, lowmem_pages); | ||
2572 | if (is_highmem(zone)) { | 2584 | if (is_highmem(zone)) { |
2573 | /* | 2585 | /* |
2574 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't | 2586 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't |
@@ -2595,8 +2607,8 @@ void setup_per_zone_pages_min(void) | |||
2595 | zone->pages_min = tmp; | 2607 | zone->pages_min = tmp; |
2596 | } | 2608 | } |
2597 | 2609 | ||
2598 | zone->pages_low = zone->pages_min + tmp / 4; | 2610 | zone->pages_low = zone->pages_min + (tmp >> 2); |
2599 | zone->pages_high = zone->pages_min + tmp / 2; | 2611 | zone->pages_high = zone->pages_min + (tmp >> 1); |
2600 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 2612 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
2601 | } | 2613 | } |
2602 | 2614 | ||