aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/slab.c19
-rw-r--r--mm/sparse.c2
3 files changed, 23 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea77c999047e..813b4ec1298a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
39#include <linux/mempolicy.h> 39#include <linux/mempolicy.h>
40 40
41#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
42#include <asm/div64.h>
42#include "internal.h" 43#include "internal.h"
43 44
44/* 45/*
@@ -2566,9 +2567,11 @@ void setup_per_zone_pages_min(void)
2566 } 2567 }
2567 2568
2568 for_each_zone(zone) { 2569 for_each_zone(zone) {
2569 unsigned long tmp; 2570 u64 tmp;
2571
2570 spin_lock_irqsave(&zone->lru_lock, flags); 2572 spin_lock_irqsave(&zone->lru_lock, flags);
2571 tmp = (pages_min * zone->present_pages) / lowmem_pages; 2573 tmp = (u64)pages_min * zone->present_pages;
2574 do_div(tmp, lowmem_pages);
2572 if (is_highmem(zone)) { 2575 if (is_highmem(zone)) {
2573 /* 2576 /*
2574 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2577 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2595,8 +2598,8 @@ void setup_per_zone_pages_min(void)
2595 zone->pages_min = tmp; 2598 zone->pages_min = tmp;
2596 } 2599 }
2597 2600
2598 zone->pages_low = zone->pages_min + tmp / 4; 2601 zone->pages_low = zone->pages_min + (tmp >> 2);
2599 zone->pages_high = zone->pages_min + tmp / 2; 2602 zone->pages_high = zone->pages_min + (tmp >> 1);
2600 spin_unlock_irqrestore(&zone->lru_lock, flags); 2603 spin_unlock_irqrestore(&zone->lru_lock, flags);
2601 } 2604 }
2602 2605
diff --git a/mm/slab.c b/mm/slab.c
index c32af7e7581e..d31a06bfbea5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -700,6 +700,14 @@ static enum {
700 FULL 700 FULL
701} g_cpucache_up; 701} g_cpucache_up;
702 702
703/*
704 * used by boot code to determine if it can use slab based allocator
705 */
706int slab_is_available(void)
707{
708 return g_cpucache_up == FULL;
709}
710
703static DEFINE_PER_CPU(struct work_struct, reap_work); 711static DEFINE_PER_CPU(struct work_struct, reap_work);
704 712
705static void free_block(struct kmem_cache *cachep, void **objpp, int len, 713static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -2192,11 +2200,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2192 check_irq_on(); 2200 check_irq_on();
2193 for_each_online_node(node) { 2201 for_each_online_node(node) {
2194 l3 = cachep->nodelists[node]; 2202 l3 = cachep->nodelists[node];
2195 if (l3) { 2203 if (l3 && l3->alien)
2204 drain_alien_cache(cachep, l3->alien);
2205 }
2206
2207 for_each_online_node(node) {
2208 l3 = cachep->nodelists[node];
2209 if (l3)
2196 drain_array(cachep, l3, l3->shared, 1, node); 2210 drain_array(cachep, l3, l3->shared, 1, node);
2197 if (l3->alien)
2198 drain_alien_cache(cachep, l3->alien);
2199 }
2200 } 2211 }
2201} 2212}
2202 2213
diff --git a/mm/sparse.c b/mm/sparse.c
index d7c32de99ee8..c5e89eb9ac8f 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,7 @@ static struct mem_section *sparse_index_alloc(int nid)
32 unsigned long array_size = SECTIONS_PER_ROOT * 32 unsigned long array_size = SECTIONS_PER_ROOT *
33 sizeof(struct mem_section); 33 sizeof(struct mem_section);
34 34
35 if (system_state == SYSTEM_RUNNING) 35 if (slab_is_available())
36 section = kmalloc_node(array_size, GFP_KERNEL, nid); 36 section = kmalloc_node(array_size, GFP_KERNEL, nid);
37 else 37 else
38 section = alloc_bootmem_node(NODE_DATA(nid), array_size); 38 section = alloc_bootmem_node(NODE_DATA(nid), array_size);