diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 09:50:49 -0400 |
---|---|---|
committer | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 09:50:49 -0400 |
commit | 722f2a6c87f34ee0fd0130a8cf45f81e0705594a (patch) | |
tree | 50b054df34d2731eb0ba0cf1a6c27e43e7eed428 /mm | |
parent | 7a0aeb14e18ad59394bd9bbc6e57fb345819e748 (diff) | |
parent | 45e3e1935e2857c54783291107d33323b3ef33c8 (diff) |
Merge commit 'linus/master' into HEAD
Conflicts:
MAINTAINERS
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/maccess.c | 2 | ||||
-rw-r--r-- | mm/page_cgroup.c | 29 | ||||
-rw-r--r-- | mm/slab.c | 41 | ||||
-rw-r--r-- | mm/slub.c | 16 | ||||
-rw-r--r-- | mm/vmscan.c | 4 |
5 files changed, 59 insertions, 33 deletions
diff --git a/mm/maccess.c b/mm/maccess.c index ac40796cfb15..9073695ff25f 100644 --- a/mm/maccess.c +++ b/mm/maccess.c | |||
@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); | |||
39 | * Safely write to address @dst from the buffer at @src. If a kernel fault | 39 | * Safely write to address @dst from the buffer at @src. If a kernel fault |
40 | * happens, handle that and return -EFAULT. | 40 | * happens, handle that and return -EFAULT. |
41 | */ | 41 | */ |
42 | long probe_kernel_write(void *dst, void *src, size_t size) | 42 | long notrace __weak probe_kernel_write(void *dst, void *src, size_t size) |
43 | { | 43 | { |
44 | long ret; | 44 | long ret; |
45 | mm_segment_t old_fs = get_fs(); | 45 | mm_segment_t old_fs = get_fs(); |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 3dd4a909a1de..11a8a10a3909 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid) | |||
47 | struct page_cgroup *base, *pc; | 47 | struct page_cgroup *base, *pc; |
48 | unsigned long table_size; | 48 | unsigned long table_size; |
49 | unsigned long start_pfn, nr_pages, index; | 49 | unsigned long start_pfn, nr_pages, index; |
50 | struct page *page; | ||
51 | unsigned int order; | ||
52 | 50 | ||
53 | start_pfn = NODE_DATA(nid)->node_start_pfn; | 51 | start_pfn = NODE_DATA(nid)->node_start_pfn; |
54 | nr_pages = NODE_DATA(nid)->node_spanned_pages; | 52 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
@@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid) | |||
57 | return 0; | 55 | return 0; |
58 | 56 | ||
59 | table_size = sizeof(struct page_cgroup) * nr_pages; | 57 | table_size = sizeof(struct page_cgroup) * nr_pages; |
60 | order = get_order(table_size); | 58 | |
61 | page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); | 59 | base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), |
62 | if (!page) | 60 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
63 | page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); | 61 | if (!base) |
64 | if (!page) | ||
65 | return -ENOMEM; | 62 | return -ENOMEM; |
66 | base = page_address(page); | ||
67 | for (index = 0; index < nr_pages; index++) { | 63 | for (index = 0; index < nr_pages; index++) { |
68 | pc = base + index; | 64 | pc = base + index; |
69 | __init_page_cgroup(pc, start_pfn + index); | 65 | __init_page_cgroup(pc, start_pfn + index); |
@@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid) | |||
73 | return 0; | 69 | return 0; |
74 | } | 70 | } |
75 | 71 | ||
76 | void __init page_cgroup_init(void) | 72 | void __init page_cgroup_init_flatmem(void) |
77 | { | 73 | { |
78 | 74 | ||
79 | int nid, fail; | 75 | int nid, fail; |
@@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn) | |||
117 | if (!section->page_cgroup) { | 113 | if (!section->page_cgroup) { |
118 | nid = page_to_nid(pfn_to_page(pfn)); | 114 | nid = page_to_nid(pfn_to_page(pfn)); |
119 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; | 115 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
120 | if (slab_is_available()) { | 116 | VM_BUG_ON(!slab_is_available()); |
121 | base = kmalloc_node(table_size, | 117 | base = kmalloc_node(table_size, |
122 | GFP_KERNEL | __GFP_NOWARN, nid); | 118 | GFP_KERNEL | __GFP_NOWARN, nid); |
123 | if (!base) | 119 | if (!base) |
124 | base = vmalloc_node(table_size, nid); | 120 | base = vmalloc_node(table_size, nid); |
125 | } else { | ||
126 | base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), | ||
127 | table_size, | ||
128 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | ||
129 | } | ||
130 | } else { | 121 | } else { |
131 | /* | 122 | /* |
132 | * We don't have to allocate page_cgroup again, but | 123 | * We don't have to allocate page_cgroup again, but |
@@ -305,6 +305,12 @@ struct kmem_list3 { | |||
305 | }; | 305 | }; |
306 | 306 | ||
307 | /* | 307 | /* |
308 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
309 | * sure early boot allocations don't accidentally enable interrupts. | ||
310 | */ | ||
311 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
312 | |||
313 | /* | ||
308 | * Need this for bootstrapping a per node allocator. | 314 | * Need this for bootstrapping a per node allocator. |
309 | */ | 315 | */ |
310 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) | 316 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
@@ -673,6 +679,7 @@ static enum { | |||
673 | NONE, | 679 | NONE, |
674 | PARTIAL_AC, | 680 | PARTIAL_AC, |
675 | PARTIAL_L3, | 681 | PARTIAL_L3, |
682 | EARLY, | ||
676 | FULL | 683 | FULL |
677 | } g_cpucache_up; | 684 | } g_cpucache_up; |
678 | 685 | ||
@@ -681,7 +688,7 @@ static enum { | |||
681 | */ | 688 | */ |
682 | int slab_is_available(void) | 689 | int slab_is_available(void) |
683 | { | 690 | { |
684 | return g_cpucache_up == FULL; | 691 | return g_cpucache_up >= EARLY; |
685 | } | 692 | } |
686 | 693 | ||
687 | static DEFINE_PER_CPU(struct delayed_work, reap_work); | 694 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
@@ -1545,19 +1552,27 @@ void __init kmem_cache_init(void) | |||
1545 | } | 1552 | } |
1546 | } | 1553 | } |
1547 | 1554 | ||
1548 | /* 6) resize the head arrays to their final sizes */ | 1555 | g_cpucache_up = EARLY; |
1549 | { | ||
1550 | struct kmem_cache *cachep; | ||
1551 | mutex_lock(&cache_chain_mutex); | ||
1552 | list_for_each_entry(cachep, &cache_chain, next) | ||
1553 | if (enable_cpucache(cachep, GFP_NOWAIT)) | ||
1554 | BUG(); | ||
1555 | mutex_unlock(&cache_chain_mutex); | ||
1556 | } | ||
1557 | 1556 | ||
1558 | /* Annotate slab for lockdep -- annotate the malloc caches */ | 1557 | /* Annotate slab for lockdep -- annotate the malloc caches */ |
1559 | init_lock_keys(); | 1558 | init_lock_keys(); |
1559 | } | ||
1560 | |||
1561 | void __init kmem_cache_init_late(void) | ||
1562 | { | ||
1563 | struct kmem_cache *cachep; | ||
1564 | |||
1565 | /* | ||
1566 | * Interrupts are enabled now so all GFP allocations are safe. | ||
1567 | */ | ||
1568 | slab_gfp_mask = __GFP_BITS_MASK; | ||
1560 | 1569 | ||
1570 | /* 6) resize the head arrays to their final sizes */ | ||
1571 | mutex_lock(&cache_chain_mutex); | ||
1572 | list_for_each_entry(cachep, &cache_chain, next) | ||
1573 | if (enable_cpucache(cachep, GFP_NOWAIT)) | ||
1574 | BUG(); | ||
1575 | mutex_unlock(&cache_chain_mutex); | ||
1561 | 1576 | ||
1562 | /* Done! */ | 1577 | /* Done! */ |
1563 | g_cpucache_up = FULL; | 1578 | g_cpucache_up = FULL; |
@@ -2034,7 +2049,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2034 | for_each_online_node(node) { | 2049 | for_each_online_node(node) { |
2035 | cachep->nodelists[node] = | 2050 | cachep->nodelists[node] = |
2036 | kmalloc_node(sizeof(struct kmem_list3), | 2051 | kmalloc_node(sizeof(struct kmem_list3), |
2037 | GFP_KERNEL, node); | 2052 | gfp, node); |
2038 | BUG_ON(!cachep->nodelists[node]); | 2053 | BUG_ON(!cachep->nodelists[node]); |
2039 | kmem_list3_init(cachep->nodelists[node]); | 2054 | kmem_list3_init(cachep->nodelists[node]); |
2040 | } | 2055 | } |
@@ -3286,6 +3301,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3286 | unsigned long save_flags; | 3301 | unsigned long save_flags; |
3287 | void *ptr; | 3302 | void *ptr; |
3288 | 3303 | ||
3304 | flags &= slab_gfp_mask; | ||
3305 | |||
3289 | lockdep_trace_alloc(flags); | 3306 | lockdep_trace_alloc(flags); |
3290 | 3307 | ||
3291 | if (slab_should_failslab(cachep, flags)) | 3308 | if (slab_should_failslab(cachep, flags)) |
@@ -3369,6 +3386,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3369 | unsigned long save_flags; | 3386 | unsigned long save_flags; |
3370 | void *objp; | 3387 | void *objp; |
3371 | 3388 | ||
3389 | flags &= slab_gfp_mask; | ||
3390 | |||
3372 | lockdep_trace_alloc(flags); | 3391 | lockdep_trace_alloc(flags); |
3373 | 3392 | ||
3374 | if (slab_should_failslab(cachep, flags)) | 3393 | if (slab_should_failslab(cachep, flags)) |
@@ -179,6 +179,12 @@ static enum { | |||
179 | SYSFS /* Sysfs up */ | 179 | SYSFS /* Sysfs up */ |
180 | } slab_state = DOWN; | 180 | } slab_state = DOWN; |
181 | 181 | ||
182 | /* | ||
183 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
184 | * sure early boot allocations don't accidentally enable interrupts. | ||
185 | */ | ||
186 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
187 | |||
182 | /* A list of all slab caches on the system */ | 188 | /* A list of all slab caches on the system */ |
183 | static DECLARE_RWSEM(slub_lock); | 189 | static DECLARE_RWSEM(slub_lock); |
184 | static LIST_HEAD(slab_caches); | 190 | static LIST_HEAD(slab_caches); |
@@ -1618,6 +1624,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1618 | unsigned long flags; | 1624 | unsigned long flags; |
1619 | unsigned int objsize; | 1625 | unsigned int objsize; |
1620 | 1626 | ||
1627 | gfpflags &= slab_gfp_mask; | ||
1628 | |||
1621 | lockdep_trace_alloc(gfpflags); | 1629 | lockdep_trace_alloc(gfpflags); |
1622 | might_sleep_if(gfpflags & __GFP_WAIT); | 1630 | might_sleep_if(gfpflags & __GFP_WAIT); |
1623 | 1631 | ||
@@ -3132,6 +3140,14 @@ void __init kmem_cache_init(void) | |||
3132 | nr_cpu_ids, nr_node_ids); | 3140 | nr_cpu_ids, nr_node_ids); |
3133 | } | 3141 | } |
3134 | 3142 | ||
3143 | void __init kmem_cache_init_late(void) | ||
3144 | { | ||
3145 | /* | ||
3146 | * Interrupts are enabled now so all GFP allocations are safe. | ||
3147 | */ | ||
3148 | slab_gfp_mask = __GFP_BITS_MASK; | ||
3149 | } | ||
3150 | |||
3135 | /* | 3151 | /* |
3136 | * Find a mergeable slab cache | 3152 | * Find a mergeable slab cache |
3137 | */ | 3153 | */ |
diff --git a/mm/vmscan.c b/mm/vmscan.c index d254306562cd..95c08a8cc2ba 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2056,7 +2056,7 @@ unsigned long global_lru_pages(void) | |||
2056 | + global_page_state(NR_INACTIVE_FILE); | 2056 | + global_page_state(NR_INACTIVE_FILE); |
2057 | } | 2057 | } |
2058 | 2058 | ||
2059 | #ifdef CONFIG_PM | 2059 | #ifdef CONFIG_HIBERNATION |
2060 | /* | 2060 | /* |
2061 | * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages | 2061 | * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages |
2062 | * from LRU lists system-wide, for given pass and priority. | 2062 | * from LRU lists system-wide, for given pass and priority. |
@@ -2196,7 +2196,7 @@ out: | |||
2196 | 2196 | ||
2197 | return sc.nr_reclaimed; | 2197 | return sc.nr_reclaimed; |
2198 | } | 2198 | } |
2199 | #endif | 2199 | #endif /* CONFIG_HIBERNATION */ |
2200 | 2200 | ||
2201 | /* It's optimal to keep kswapds on the same CPUs as their memory, but | 2201 | /* It's optimal to keep kswapds on the same CPUs as their memory, but |
2202 | not required for correctness. So if the last cpu in a node goes | 2202 | not required for correctness. So if the last cpu in a node goes |