aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page_cgroup.c29
-rw-r--r--mm/slab.c41
-rw-r--r--mm/slub.c16
-rw-r--r--mm/vmscan.c4
4 files changed, 58 insertions, 32 deletions
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 3dd4a909a1d..11a8a10a390 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid)
47 struct page_cgroup *base, *pc; 47 struct page_cgroup *base, *pc;
48 unsigned long table_size; 48 unsigned long table_size;
49 unsigned long start_pfn, nr_pages, index; 49 unsigned long start_pfn, nr_pages, index;
50 struct page *page;
51 unsigned int order;
52 50
53 start_pfn = NODE_DATA(nid)->node_start_pfn; 51 start_pfn = NODE_DATA(nid)->node_start_pfn;
54 nr_pages = NODE_DATA(nid)->node_spanned_pages; 52 nr_pages = NODE_DATA(nid)->node_spanned_pages;
@@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid)
57 return 0; 55 return 0;
58 56
59 table_size = sizeof(struct page_cgroup) * nr_pages; 57 table_size = sizeof(struct page_cgroup) * nr_pages;
60 order = get_order(table_size); 58
61 page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); 59 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
62 if (!page) 60 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
63 page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); 61 if (!base)
64 if (!page)
65 return -ENOMEM; 62 return -ENOMEM;
66 base = page_address(page);
67 for (index = 0; index < nr_pages; index++) { 63 for (index = 0; index < nr_pages; index++) {
68 pc = base + index; 64 pc = base + index;
69 __init_page_cgroup(pc, start_pfn + index); 65 __init_page_cgroup(pc, start_pfn + index);
@@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
73 return 0; 69 return 0;
74} 70}
75 71
76void __init page_cgroup_init(void) 72void __init page_cgroup_init_flatmem(void)
77{ 73{
78 74
79 int nid, fail; 75 int nid, fail;
@@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
117 if (!section->page_cgroup) { 113 if (!section->page_cgroup) {
118 nid = page_to_nid(pfn_to_page(pfn)); 114 nid = page_to_nid(pfn_to_page(pfn));
119 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
120 if (slab_is_available()) { 116 VM_BUG_ON(!slab_is_available());
121 base = kmalloc_node(table_size, 117 base = kmalloc_node(table_size,
122 GFP_KERNEL | __GFP_NOWARN, nid); 118 GFP_KERNEL | __GFP_NOWARN, nid);
123 if (!base) 119 if (!base)
124 base = vmalloc_node(table_size, nid); 120 base = vmalloc_node(table_size, nid);
125 } else {
126 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
127 table_size,
128 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
129 }
130 } else { 121 } else {
131 /* 122 /*
132 * We don't have to allocate page_cgroup again, but 123 * We don't have to allocate page_cgroup again, but
diff --git a/mm/slab.c b/mm/slab.c
index f46b65d124e..18e3164de09 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -304,6 +304,12 @@ struct kmem_list3 {
304}; 304};
305 305
306/* 306/*
307 * The slab allocator is initialized with interrupts disabled. Therefore, make
308 * sure early boot allocations don't accidentally enable interrupts.
309 */
310static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
311
312/*
307 * Need this for bootstrapping a per node allocator. 313 * Need this for bootstrapping a per node allocator.
308 */ 314 */
309#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 315#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -753,6 +759,7 @@ static enum {
753 NONE, 759 NONE,
754 PARTIAL_AC, 760 PARTIAL_AC,
755 PARTIAL_L3, 761 PARTIAL_L3,
762 EARLY,
756 FULL 763 FULL
757} g_cpucache_up; 764} g_cpucache_up;
758 765
@@ -761,7 +768,7 @@ static enum {
761 */ 768 */
762int slab_is_available(void) 769int slab_is_available(void)
763{ 770{
764 return g_cpucache_up == FULL; 771 return g_cpucache_up >= EARLY;
765} 772}
766 773
767static DEFINE_PER_CPU(struct delayed_work, reap_work); 774static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1625,19 +1632,27 @@ void __init kmem_cache_init(void)
1625 } 1632 }
1626 } 1633 }
1627 1634
1628 /* 6) resize the head arrays to their final sizes */ 1635 g_cpucache_up = EARLY;
1629 {
1630 struct kmem_cache *cachep;
1631 mutex_lock(&cache_chain_mutex);
1632 list_for_each_entry(cachep, &cache_chain, next)
1633 if (enable_cpucache(cachep, GFP_NOWAIT))
1634 BUG();
1635 mutex_unlock(&cache_chain_mutex);
1636 }
1637 1636
1638 /* Annotate slab for lockdep -- annotate the malloc caches */ 1637 /* Annotate slab for lockdep -- annotate the malloc caches */
1639 init_lock_keys(); 1638 init_lock_keys();
1639}
1640
1641void __init kmem_cache_init_late(void)
1642{
1643 struct kmem_cache *cachep;
1644
1645 /*
1646 * Interrupts are enabled now so all GFP allocations are safe.
1647 */
1648 slab_gfp_mask = __GFP_BITS_MASK;
1640 1649
1650 /* 6) resize the head arrays to their final sizes */
1651 mutex_lock(&cache_chain_mutex);
1652 list_for_each_entry(cachep, &cache_chain, next)
1653 if (enable_cpucache(cachep, GFP_NOWAIT))
1654 BUG();
1655 mutex_unlock(&cache_chain_mutex);
1641 1656
1642 /* Done! */ 1657 /* Done! */
1643 g_cpucache_up = FULL; 1658 g_cpucache_up = FULL;
@@ -2102,7 +2117,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2102 for_each_online_node(node) { 2117 for_each_online_node(node) {
2103 cachep->nodelists[node] = 2118 cachep->nodelists[node] =
2104 kmalloc_node(sizeof(struct kmem_list3), 2119 kmalloc_node(sizeof(struct kmem_list3),
2105 GFP_KERNEL, node); 2120 gfp, node);
2106 BUG_ON(!cachep->nodelists[node]); 2121 BUG_ON(!cachep->nodelists[node]);
2107 kmem_list3_init(cachep->nodelists[node]); 2122 kmem_list3_init(cachep->nodelists[node]);
2108 } 2123 }
@@ -3354,6 +3369,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3354 unsigned long save_flags; 3369 unsigned long save_flags;
3355 void *ptr; 3370 void *ptr;
3356 3371
3372 flags &= slab_gfp_mask;
3373
3357 lockdep_trace_alloc(flags); 3374 lockdep_trace_alloc(flags);
3358 3375
3359 if (slab_should_failslab(cachep, flags)) 3376 if (slab_should_failslab(cachep, flags))
@@ -3434,6 +3451,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3434 unsigned long save_flags; 3451 unsigned long save_flags;
3435 void *objp; 3452 void *objp;
3436 3453
3454 flags &= slab_gfp_mask;
3455
3437 lockdep_trace_alloc(flags); 3456 lockdep_trace_alloc(flags);
3438 3457
3439 if (slab_should_failslab(cachep, flags)) 3458 if (slab_should_failslab(cachep, flags))
diff --git a/mm/slub.c b/mm/slub.c
index 3964d3ce4c1..30354bfeb43 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -178,6 +178,12 @@ static enum {
178 SYSFS /* Sysfs up */ 178 SYSFS /* Sysfs up */
179} slab_state = DOWN; 179} slab_state = DOWN;
180 180
181/*
182 * The slab allocator is initialized with interrupts disabled. Therefore, make
183 * sure early boot allocations don't accidentally enable interrupts.
184 */
185static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
186
181/* A list of all slab caches on the system */ 187/* A list of all slab caches on the system */
182static DECLARE_RWSEM(slub_lock); 188static DECLARE_RWSEM(slub_lock);
183static LIST_HEAD(slab_caches); 189static LIST_HEAD(slab_caches);
@@ -1595,6 +1601,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1595 unsigned long flags; 1601 unsigned long flags;
1596 unsigned int objsize; 1602 unsigned int objsize;
1597 1603
1604 gfpflags &= slab_gfp_mask;
1605
1598 lockdep_trace_alloc(gfpflags); 1606 lockdep_trace_alloc(gfpflags);
1599 might_sleep_if(gfpflags & __GFP_WAIT); 1607 might_sleep_if(gfpflags & __GFP_WAIT);
1600 1608
@@ -3104,6 +3112,14 @@ void __init kmem_cache_init(void)
3104 nr_cpu_ids, nr_node_ids); 3112 nr_cpu_ids, nr_node_ids);
3105} 3113}
3106 3114
3115void __init kmem_cache_init_late(void)
3116{
3117 /*
3118 * Interrupts are enabled now so all GFP allocations are safe.
3119 */
3120 slab_gfp_mask = __GFP_BITS_MASK;
3121}
3122
3107/* 3123/*
3108 * Find a mergeable slab cache 3124 * Find a mergeable slab cache
3109 */ 3125 */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d254306562c..95c08a8cc2b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2056,7 +2056,7 @@ unsigned long global_lru_pages(void)
2056 + global_page_state(NR_INACTIVE_FILE); 2056 + global_page_state(NR_INACTIVE_FILE);
2057} 2057}
2058 2058
2059#ifdef CONFIG_PM 2059#ifdef CONFIG_HIBERNATION
2060/* 2060/*
2061 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages 2061 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
2062 * from LRU lists system-wide, for given pass and priority. 2062 * from LRU lists system-wide, for given pass and priority.
@@ -2196,7 +2196,7 @@ out:
2196 2196
2197 return sc.nr_reclaimed; 2197 return sc.nr_reclaimed;
2198} 2198}
2199#endif 2199#endif /* CONFIG_HIBERNATION */
2200 2200
2201/* It's optimal to keep kswapds on the same CPUs as their memory, but 2201/* It's optimal to keep kswapds on the same CPUs as their memory, but
2202 not required for correctness. So if the last cpu in a node goes 2202 not required for correctness. So if the last cpu in a node goes