aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-05-25 12:40:08 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-05-25 12:40:08 -0400
commitc6a756795d5ba0637aae8da89dd11bb7e3a1ee74 (patch)
tree1c19f951f2604dbb6b867a6dcdf94d20c204cc5c /mm
parent382066da251132f768380f4852ed5afb72d88f80 (diff)
parenta8bd60705aa17a998516837d9c1e503ad4cbd7fc (diff)
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c30
-rw-r--r--mm/slab.c19
-rw-r--r--mm/sparse.c9
3 files changed, 39 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea77c999047..253a450c400 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
39#include <linux/mempolicy.h> 39#include <linux/mempolicy.h>
40 40
41#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
42#include <asm/div64.h>
42#include "internal.h" 43#include "internal.h"
43 44
44/* 45/*
@@ -950,7 +951,7 @@ restart:
950 goto got_pg; 951 goto got_pg;
951 952
952 do { 953 do {
953 if (cpuset_zone_allowed(*z, gfp_mask)) 954 if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL))
954 wakeup_kswapd(*z, order); 955 wakeup_kswapd(*z, order);
955 } while (*(++z)); 956 } while (*(++z));
956 957
@@ -969,7 +970,8 @@ restart:
969 alloc_flags |= ALLOC_HARDER; 970 alloc_flags |= ALLOC_HARDER;
970 if (gfp_mask & __GFP_HIGH) 971 if (gfp_mask & __GFP_HIGH)
971 alloc_flags |= ALLOC_HIGH; 972 alloc_flags |= ALLOC_HIGH;
972 alloc_flags |= ALLOC_CPUSET; 973 if (wait)
974 alloc_flags |= ALLOC_CPUSET;
973 975
974 /* 976 /*
975 * Go through the zonelist again. Let __GFP_HIGH and allocations 977 * Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -2123,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
2123#ifdef CONFIG_FLAT_NODE_MEM_MAP 2125#ifdef CONFIG_FLAT_NODE_MEM_MAP
2124 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2126 /* ia64 gets its own node_mem_map, before this, without bootmem */
2125 if (!pgdat->node_mem_map) { 2127 if (!pgdat->node_mem_map) {
2126 unsigned long size; 2128 unsigned long size, start, end;
2127 struct page *map; 2129 struct page *map;
2128 2130
2129 size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); 2131 /*
2132 * The zone's endpoints aren't required to be MAX_ORDER
2133 * aligned but the node_mem_map endpoints must be in order
2134 * for the buddy allocator to function correctly.
2135 */
2136 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
2137 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
2138 end = ALIGN(end, MAX_ORDER_NR_PAGES);
2139 size = (end - start) * sizeof(struct page);
2130 map = alloc_remap(pgdat->node_id, size); 2140 map = alloc_remap(pgdat->node_id, size);
2131 if (!map) 2141 if (!map)
2132 map = alloc_bootmem_node(pgdat, size); 2142 map = alloc_bootmem_node(pgdat, size);
2133 pgdat->node_mem_map = map; 2143 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
2134 } 2144 }
2135#ifdef CONFIG_FLATMEM 2145#ifdef CONFIG_FLATMEM
2136 /* 2146 /*
@@ -2566,9 +2576,11 @@ void setup_per_zone_pages_min(void)
2566 } 2576 }
2567 2577
2568 for_each_zone(zone) { 2578 for_each_zone(zone) {
2569 unsigned long tmp; 2579 u64 tmp;
2580
2570 spin_lock_irqsave(&zone->lru_lock, flags); 2581 spin_lock_irqsave(&zone->lru_lock, flags);
2571 tmp = (pages_min * zone->present_pages) / lowmem_pages; 2582 tmp = (u64)pages_min * zone->present_pages;
2583 do_div(tmp, lowmem_pages);
2572 if (is_highmem(zone)) { 2584 if (is_highmem(zone)) {
2573 /* 2585 /*
2574 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2586 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2595,8 +2607,8 @@ void setup_per_zone_pages_min(void)
2595 zone->pages_min = tmp; 2607 zone->pages_min = tmp;
2596 } 2608 }
2597 2609
2598 zone->pages_low = zone->pages_min + tmp / 4; 2610 zone->pages_low = zone->pages_min + (tmp >> 2);
2599 zone->pages_high = zone->pages_min + tmp / 2; 2611 zone->pages_high = zone->pages_min + (tmp >> 1);
2600 spin_unlock_irqrestore(&zone->lru_lock, flags); 2612 spin_unlock_irqrestore(&zone->lru_lock, flags);
2601 } 2613 }
2602 2614
diff --git a/mm/slab.c b/mm/slab.c
index c32af7e7581..d31a06bfbea 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -700,6 +700,14 @@ static enum {
700 FULL 700 FULL
701} g_cpucache_up; 701} g_cpucache_up;
702 702
703/*
704 * used by boot code to determine if it can use slab based allocator
705 */
706int slab_is_available(void)
707{
708 return g_cpucache_up == FULL;
709}
710
703static DEFINE_PER_CPU(struct work_struct, reap_work); 711static DEFINE_PER_CPU(struct work_struct, reap_work);
704 712
705static void free_block(struct kmem_cache *cachep, void **objpp, int len, 713static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -2192,11 +2200,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2192 check_irq_on(); 2200 check_irq_on();
2193 for_each_online_node(node) { 2201 for_each_online_node(node) {
2194 l3 = cachep->nodelists[node]; 2202 l3 = cachep->nodelists[node];
2195 if (l3) { 2203 if (l3 && l3->alien)
2204 drain_alien_cache(cachep, l3->alien);
2205 }
2206
2207 for_each_online_node(node) {
2208 l3 = cachep->nodelists[node];
2209 if (l3)
2196 drain_array(cachep, l3, l3->shared, 1, node); 2210 drain_array(cachep, l3, l3->shared, 1, node);
2197 if (l3->alien)
2198 drain_alien_cache(cachep, l3->alien);
2199 }
2200 } 2211 }
2201} 2212}
2202 2213
diff --git a/mm/sparse.c b/mm/sparse.c
index d7c32de99ee..100040c0dfb 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,7 @@ static struct mem_section *sparse_index_alloc(int nid)
32 unsigned long array_size = SECTIONS_PER_ROOT * 32 unsigned long array_size = SECTIONS_PER_ROOT *
33 sizeof(struct mem_section); 33 sizeof(struct mem_section);
34 34
35 if (system_state == SYSTEM_RUNNING) 35 if (slab_is_available())
36 section = kmalloc_node(array_size, GFP_KERNEL, nid); 36 section = kmalloc_node(array_size, GFP_KERNEL, nid);
37 else 37 else
38 section = alloc_bootmem_node(NODE_DATA(nid), array_size); 38 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
@@ -87,11 +87,8 @@ int __section_nr(struct mem_section* ms)
87 unsigned long root_nr; 87 unsigned long root_nr;
88 struct mem_section* root; 88 struct mem_section* root;
89 89
90 for (root_nr = 0; 90 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
91 root_nr < NR_MEM_SECTIONS; 91 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
92 root_nr += SECTIONS_PER_ROOT) {
93 root = __nr_to_section(root_nr);
94
95 if (!root) 92 if (!root)
96 continue; 93 continue;
97 94