aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/page_alloc.c30
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab.c46
-rw-r--r--mm/sparse.c9
-rw-r--r--mm/vmscan.c2
6 files changed, 59 insertions, 38 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1ae2b2cc3a54..70df5c0d957e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -91,8 +91,8 @@ static void grow_zone_span(struct zone *zone,
91 if (start_pfn < zone->zone_start_pfn) 91 if (start_pfn < zone->zone_start_pfn)
92 zone->zone_start_pfn = start_pfn; 92 zone->zone_start_pfn = start_pfn;
93 93
94 if (end_pfn > old_zone_end_pfn) 94 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
95 zone->spanned_pages = end_pfn - zone->zone_start_pfn; 95 zone->zone_start_pfn;
96 96
97 zone_span_writeunlock(zone); 97 zone_span_writeunlock(zone);
98} 98}
@@ -106,8 +106,8 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
106 if (start_pfn < pgdat->node_start_pfn) 106 if (start_pfn < pgdat->node_start_pfn)
107 pgdat->node_start_pfn = start_pfn; 107 pgdat->node_start_pfn = start_pfn;
108 108
109 if (end_pfn > old_pgdat_end_pfn) 109 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
110 pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn; 110 pgdat->node_start_pfn;
111} 111}
112 112
113int online_pages(unsigned long pfn, unsigned long nr_pages) 113int online_pages(unsigned long pfn, unsigned long nr_pages)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea77c999047e..253a450c400d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
39#include <linux/mempolicy.h> 39#include <linux/mempolicy.h>
40 40
41#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
42#include <asm/div64.h>
42#include "internal.h" 43#include "internal.h"
43 44
44/* 45/*
@@ -950,7 +951,7 @@ restart:
950 goto got_pg; 951 goto got_pg;
951 952
952 do { 953 do {
953 if (cpuset_zone_allowed(*z, gfp_mask)) 954 if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL))
954 wakeup_kswapd(*z, order); 955 wakeup_kswapd(*z, order);
955 } while (*(++z)); 956 } while (*(++z));
956 957
@@ -969,7 +970,8 @@ restart:
969 alloc_flags |= ALLOC_HARDER; 970 alloc_flags |= ALLOC_HARDER;
970 if (gfp_mask & __GFP_HIGH) 971 if (gfp_mask & __GFP_HIGH)
971 alloc_flags |= ALLOC_HIGH; 972 alloc_flags |= ALLOC_HIGH;
972 alloc_flags |= ALLOC_CPUSET; 973 if (wait)
974 alloc_flags |= ALLOC_CPUSET;
973 975
974 /* 976 /*
975 * Go through the zonelist again. Let __GFP_HIGH and allocations 977 * Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -2123,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
2123#ifdef CONFIG_FLAT_NODE_MEM_MAP 2125#ifdef CONFIG_FLAT_NODE_MEM_MAP
2124 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2126 /* ia64 gets its own node_mem_map, before this, without bootmem */
2125 if (!pgdat->node_mem_map) { 2127 if (!pgdat->node_mem_map) {
2126 unsigned long size; 2128 unsigned long size, start, end;
2127 struct page *map; 2129 struct page *map;
2128 2130
2129 size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); 2131 /*
2132 * The zone's endpoints aren't required to be MAX_ORDER
2133 * aligned but the node_mem_map endpoints must be in order
2134 * for the buddy allocator to function correctly.
2135 */
2136 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
2137 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
2138 end = ALIGN(end, MAX_ORDER_NR_PAGES);
2139 size = (end - start) * sizeof(struct page);
2130 map = alloc_remap(pgdat->node_id, size); 2140 map = alloc_remap(pgdat->node_id, size);
2131 if (!map) 2141 if (!map)
2132 map = alloc_bootmem_node(pgdat, size); 2142 map = alloc_bootmem_node(pgdat, size);
2133 pgdat->node_mem_map = map; 2143 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
2134 } 2144 }
2135#ifdef CONFIG_FLATMEM 2145#ifdef CONFIG_FLATMEM
2136 /* 2146 /*
@@ -2566,9 +2576,11 @@ void setup_per_zone_pages_min(void)
2566 } 2576 }
2567 2577
2568 for_each_zone(zone) { 2578 for_each_zone(zone) {
2569 unsigned long tmp; 2579 u64 tmp;
2580
2570 spin_lock_irqsave(&zone->lru_lock, flags); 2581 spin_lock_irqsave(&zone->lru_lock, flags);
2571 tmp = (pages_min * zone->present_pages) / lowmem_pages; 2582 tmp = (u64)pages_min * zone->present_pages;
2583 do_div(tmp, lowmem_pages);
2572 if (is_highmem(zone)) { 2584 if (is_highmem(zone)) {
2573 /* 2585 /*
2574 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2586 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2595,8 +2607,8 @@ void setup_per_zone_pages_min(void)
2595 zone->pages_min = tmp; 2607 zone->pages_min = tmp;
2596 } 2608 }
2597 2609
2598 zone->pages_low = zone->pages_min + tmp / 4; 2610 zone->pages_low = zone->pages_min + (tmp >> 2);
2599 zone->pages_high = zone->pages_min + tmp / 2; 2611 zone->pages_high = zone->pages_min + (tmp >> 1);
2600 spin_unlock_irqrestore(&zone->lru_lock, flags); 2612 spin_unlock_irqrestore(&zone->lru_lock, flags);
2601 } 2613 }
2602 2614
diff --git a/mm/shmem.c b/mm/shmem.c
index 4c5e68e4e9ae..1e43c8a865ba 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1780,6 +1780,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1780 if (!simple_empty(dentry)) 1780 if (!simple_empty(dentry))
1781 return -ENOTEMPTY; 1781 return -ENOTEMPTY;
1782 1782
1783 dentry->d_inode->i_nlink--;
1783 dir->i_nlink--; 1784 dir->i_nlink--;
1784 return shmem_unlink(dir, dentry); 1785 return shmem_unlink(dir, dentry);
1785} 1786}
@@ -2102,6 +2103,7 @@ static int shmem_fill_super(struct super_block *sb,
2102 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2103 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2103 sb->s_magic = TMPFS_MAGIC; 2104 sb->s_magic = TMPFS_MAGIC;
2104 sb->s_op = &shmem_ops; 2105 sb->s_op = &shmem_ops;
2106 sb->s_time_gran = 1;
2105 2107
2106 inode = shmem_get_inode(sb, S_IFDIR | mode, 0); 2108 inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2107 if (!inode) 2109 if (!inode)
diff --git a/mm/slab.c b/mm/slab.c
index c32af7e7581e..f1b644eb39d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
209 209
210/* Max number of objs-per-slab for caches which use off-slab slabs.
211 * Needed to avoid a possible looping condition in cache_grow().
212 */
213static unsigned long offslab_limit;
214
215/* 210/*
216 * struct slab 211 * struct slab
217 * 212 *
@@ -700,6 +695,14 @@ static enum {
700 FULL 695 FULL
701} g_cpucache_up; 696} g_cpucache_up;
702 697
698/*
699 * used by boot code to determine if it can use slab based allocator
700 */
701int slab_is_available(void)
702{
703 return g_cpucache_up == FULL;
704}
705
703static DEFINE_PER_CPU(struct work_struct, reap_work); 706static DEFINE_PER_CPU(struct work_struct, reap_work);
704 707
705static void free_block(struct kmem_cache *cachep, void **objpp, int len, 708static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -1348,12 +1351,6 @@ void __init kmem_cache_init(void)
1348 NULL, NULL); 1351 NULL, NULL);
1349 } 1352 }
1350 1353
1351 /* Inc off-slab bufctl limit until the ceiling is hit. */
1352 if (!(OFF_SLAB(sizes->cs_cachep))) {
1353 offslab_limit = sizes->cs_size - sizeof(struct slab);
1354 offslab_limit /= sizeof(kmem_bufctl_t);
1355 }
1356
1357 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1354 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1358 sizes->cs_size, 1355 sizes->cs_size,
1359 ARCH_KMALLOC_MINALIGN, 1356 ARCH_KMALLOC_MINALIGN,
@@ -1772,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
1772static size_t calculate_slab_order(struct kmem_cache *cachep, 1769static size_t calculate_slab_order(struct kmem_cache *cachep,
1773 size_t size, size_t align, unsigned long flags) 1770 size_t size, size_t align, unsigned long flags)
1774{ 1771{
1772 unsigned long offslab_limit;
1775 size_t left_over = 0; 1773 size_t left_over = 0;
1776 int gfporder; 1774 int gfporder;
1777 1775
@@ -1783,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1783 if (!num) 1781 if (!num)
1784 continue; 1782 continue;
1785 1783
1786 /* More than offslab_limit objects will cause problems */ 1784 if (flags & CFLGS_OFF_SLAB) {
1787 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) 1785 /*
1788 break; 1786 * Max number of objs-per-slab for caches which
1787 * use off-slab slabs. Needed to avoid a possible
1788 * looping condition in cache_grow().
1789 */
1790 offslab_limit = size - sizeof(struct slab);
1791 offslab_limit /= sizeof(kmem_bufctl_t);
1792
1793 if (num > offslab_limit)
1794 break;
1795 }
1789 1796
1790 /* Found something acceptable - save it away */ 1797 /* Found something acceptable - save it away */
1791 cachep->num = num; 1798 cachep->num = num;
@@ -2192,11 +2199,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2192 check_irq_on(); 2199 check_irq_on();
2193 for_each_online_node(node) { 2200 for_each_online_node(node) {
2194 l3 = cachep->nodelists[node]; 2201 l3 = cachep->nodelists[node];
2195 if (l3) { 2202 if (l3 && l3->alien)
2203 drain_alien_cache(cachep, l3->alien);
2204 }
2205
2206 for_each_online_node(node) {
2207 l3 = cachep->nodelists[node];
2208 if (l3)
2196 drain_array(cachep, l3, l3->shared, 1, node); 2209 drain_array(cachep, l3, l3->shared, 1, node);
2197 if (l3->alien)
2198 drain_alien_cache(cachep, l3->alien);
2199 }
2200 } 2210 }
2201} 2211}
2202 2212
diff --git a/mm/sparse.c b/mm/sparse.c
index d7c32de99ee8..100040c0dfb6 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,7 @@ static struct mem_section *sparse_index_alloc(int nid)
32 unsigned long array_size = SECTIONS_PER_ROOT * 32 unsigned long array_size = SECTIONS_PER_ROOT *
33 sizeof(struct mem_section); 33 sizeof(struct mem_section);
34 34
35 if (system_state == SYSTEM_RUNNING) 35 if (slab_is_available())
36 section = kmalloc_node(array_size, GFP_KERNEL, nid); 36 section = kmalloc_node(array_size, GFP_KERNEL, nid);
37 else 37 else
38 section = alloc_bootmem_node(NODE_DATA(nid), array_size); 38 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
@@ -87,11 +87,8 @@ int __section_nr(struct mem_section* ms)
87 unsigned long root_nr; 87 unsigned long root_nr;
88 struct mem_section* root; 88 struct mem_section* root;
89 89
90 for (root_nr = 0; 90 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
91 root_nr < NR_MEM_SECTIONS; 91 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
92 root_nr += SECTIONS_PER_ROOT) {
93 root = __nr_to_section(root_nr);
94
95 if (!root) 92 if (!root)
96 continue; 93 continue;
97 94
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4649a63a8cb6..440a733fe2e9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
1061loop_again: 1061loop_again:
1062 total_scanned = 0; 1062 total_scanned = 0;
1063 nr_reclaimed = 0; 1063 nr_reclaimed = 0;
1064 sc.may_writepage = !laptop_mode, 1064 sc.may_writepage = !laptop_mode;
1065 sc.nr_mapped = read_page_state(nr_mapped); 1065 sc.nr_mapped = read_page_state(nr_mapped);
1066 1066
1067 inc_page_state(pageoutrun); 1067 inc_page_state(pageoutrun);