aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-11-25 17:23:57 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-11-25 17:23:57 -0500
commit2a859ab07b6ab66f4134c4fffc341398bd3d328c (patch)
treec5e7eaf3bffbc18feb326940e39794328d98dc07 /mm
parentcedddd812a79a4fda3885a15711aee3de78c4a24 (diff)
parente716e014384688d1a50d1aa5213ee74748c6d4e0 (diff)
Merge branch 'merge' into next
Merge my own merge branch to get various fixes from there and upstream, especially the hvc console tty refcouting fixes which which testing is quite a bit harder...
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/memblock.c24
-rw-r--r--mm/mmu_notifier.c26
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/rmap.c20
-rw-r--r--mm/slob.c6
7 files changed, 60 insertions, 26 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 2c4ce17651d8..9eef55838fca 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -346,7 +346,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
346 * pages requested were isolated. If there were any failures, 0 is 346 * pages requested were isolated. If there were any failures, 0 is
347 * returned and CMA will fail. 347 * returned and CMA will fail.
348 */ 348 */
349 if (strict && nr_strict_required != total_isolated) 349 if (strict && nr_strict_required > total_isolated)
350 total_isolated = 0; 350 total_isolated = 0;
351 351
352 if (locked) 352 if (locked)
diff --git a/mm/fremap.c b/mm/fremap.c
index 3899a86851ce..a0aaf0e56800 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -169,7 +169,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
169 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) 169 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
170 goto out; 170 goto out;
171 171
172 if (!vma->vm_ops->remap_pages) 172 if (!vma->vm_ops || !vma->vm_ops->remap_pages)
173 goto out; 173 goto out;
174 174
175 if (start < vma->vm_start || start + size > vma->vm_end) 175 if (start < vma->vm_start || start + size > vma->vm_end)
diff --git a/mm/memblock.c b/mm/memblock.c
index 931eef145af5..625905523c2a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -930,6 +930,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si
930 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 930 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
931} 931}
932 932
933void __init_memblock memblock_trim_memory(phys_addr_t align)
934{
935 int i;
936 phys_addr_t start, end, orig_start, orig_end;
937 struct memblock_type *mem = &memblock.memory;
938
939 for (i = 0; i < mem->cnt; i++) {
940 orig_start = mem->regions[i].base;
941 orig_end = mem->regions[i].base + mem->regions[i].size;
942 start = round_up(orig_start, align);
943 end = round_down(orig_end, align);
944
945 if (start == orig_start && end == orig_end)
946 continue;
947
948 if (start < end) {
949 mem->regions[i].base = start;
950 mem->regions[i].size = end - start;
951 } else {
952 memblock_remove_region(mem, i);
953 i--;
954 }
955 }
956}
933 957
934void __init_memblock memblock_set_current_limit(phys_addr_t limit) 958void __init_memblock memblock_set_current_limit(phys_addr_t limit)
935{ 959{
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 479a1e751a73..8a5ac8c686b0 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
196 BUG_ON(atomic_read(&mm->mm_users) <= 0); 196 BUG_ON(atomic_read(&mm->mm_users) <= 0);
197 197
198 /* 198 /*
199 * Verify that mmu_notifier_init() already run and the global srcu is 199 * Verify that mmu_notifier_init() already run and the global srcu is
200 * initialized. 200 * initialized.
201 */ 201 */
202 BUG_ON(!srcu.per_cpu_ref); 202 BUG_ON(!srcu.per_cpu_ref);
203 203
204 ret = -ENOMEM;
205 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
206 if (unlikely(!mmu_notifier_mm))
207 goto out;
208
204 if (take_mmap_sem) 209 if (take_mmap_sem)
205 down_write(&mm->mmap_sem); 210 down_write(&mm->mmap_sem);
206 ret = mm_take_all_locks(mm); 211 ret = mm_take_all_locks(mm);
207 if (unlikely(ret)) 212 if (unlikely(ret))
208 goto out; 213 goto out_clean;
209 214
210 if (!mm_has_notifiers(mm)) { 215 if (!mm_has_notifiers(mm)) {
211 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
212 GFP_KERNEL);
213 if (unlikely(!mmu_notifier_mm)) {
214 ret = -ENOMEM;
215 goto out_of_mem;
216 }
217 INIT_HLIST_HEAD(&mmu_notifier_mm->list); 216 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
218 spin_lock_init(&mmu_notifier_mm->lock); 217 spin_lock_init(&mmu_notifier_mm->lock);
219 218
220 mm->mmu_notifier_mm = mmu_notifier_mm; 219 mm->mmu_notifier_mm = mmu_notifier_mm;
220 mmu_notifier_mm = NULL;
221 } 221 }
222 atomic_inc(&mm->mm_count); 222 atomic_inc(&mm->mm_count);
223 223
@@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
233 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); 233 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
234 spin_unlock(&mm->mmu_notifier_mm->lock); 234 spin_unlock(&mm->mmu_notifier_mm->lock);
235 235
236out_of_mem:
237 mm_drop_all_locks(mm); 236 mm_drop_all_locks(mm);
238out: 237out_clean:
239 if (take_mmap_sem) 238 if (take_mmap_sem)
240 up_write(&mm->mmap_sem); 239 up_write(&mm->mmap_sem);
241 240 kfree(mmu_notifier_mm);
241out:
242 BUG_ON(atomic_read(&mm->mm_users) <= 0); 242 BUG_ON(atomic_read(&mm->mm_users) <= 0);
243 return ret; 243 return ret;
244} 244}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bb90971182bd..5b74de6702e0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid)
1809 int i; 1809 int i;
1810 1810
1811 for_each_online_node(i) 1811 for_each_online_node(i)
1812 if (node_distance(nid, i) <= RECLAIM_DISTANCE) { 1812 if (node_distance(nid, i) <= RECLAIM_DISTANCE)
1813 node_set(i, NODE_DATA(nid)->reclaim_nodes); 1813 node_set(i, NODE_DATA(nid)->reclaim_nodes);
1814 else
1814 zone_reclaim_mode = 1; 1815 zone_reclaim_mode = 1;
1815 }
1816} 1816}
1817 1817
1818#else /* CONFIG_NUMA */ 1818#else /* CONFIG_NUMA */
@@ -5825,7 +5825,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
5825 ret = start_isolate_page_range(pfn_max_align_down(start), 5825 ret = start_isolate_page_range(pfn_max_align_down(start),
5826 pfn_max_align_up(end), migratetype); 5826 pfn_max_align_up(end), migratetype);
5827 if (ret) 5827 if (ret)
5828 goto done; 5828 return ret;
5829 5829
5830 ret = __alloc_contig_migrate_range(&cc, start, end); 5830 ret = __alloc_contig_migrate_range(&cc, start, end);
5831 if (ret) 5831 if (ret)
diff --git a/mm/rmap.c b/mm/rmap.c
index 7df7984d476c..2ee1ef0f317b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -56,6 +56,7 @@
56#include <linux/mmu_notifier.h> 56#include <linux/mmu_notifier.h>
57#include <linux/migrate.h> 57#include <linux/migrate.h>
58#include <linux/hugetlb.h> 58#include <linux/hugetlb.h>
59#include <linux/backing-dev.h>
59 60
60#include <asm/tlbflush.h> 61#include <asm/tlbflush.h>
61 62
@@ -926,11 +927,8 @@ int page_mkclean(struct page *page)
926 927
927 if (page_mapped(page)) { 928 if (page_mapped(page)) {
928 struct address_space *mapping = page_mapping(page); 929 struct address_space *mapping = page_mapping(page);
929 if (mapping) { 930 if (mapping)
930 ret = page_mkclean_file(mapping, page); 931 ret = page_mkclean_file(mapping, page);
931 if (page_test_and_clear_dirty(page_to_pfn(page), 1))
932 ret = 1;
933 }
934 } 932 }
935 933
936 return ret; 934 return ret;
@@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page)
1116 */ 1114 */
1117void page_remove_rmap(struct page *page) 1115void page_remove_rmap(struct page *page)
1118{ 1116{
1117 struct address_space *mapping = page_mapping(page);
1119 bool anon = PageAnon(page); 1118 bool anon = PageAnon(page);
1120 bool locked; 1119 bool locked;
1121 unsigned long flags; 1120 unsigned long flags;
@@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page)
1138 * this if the page is anon, so about to be freed; but perhaps 1137 * this if the page is anon, so about to be freed; but perhaps
1139 * not if it's in swapcache - there might be another pte slot 1138 * not if it's in swapcache - there might be another pte slot
1140 * containing the swap entry, but page not yet written to swap. 1139 * containing the swap entry, but page not yet written to swap.
1140 *
1141 * And we can skip it on file pages, so long as the filesystem
1142 * participates in dirty tracking; but need to catch shm and tmpfs
1143 * and ramfs pages which have been modified since creation by read
1144 * fault.
1145 *
1146 * Note that mapping must be decided above, before decrementing
1147 * mapcount (which luckily provides a barrier): once page is unmapped,
1148 * it could be truncated and page->mapping reset to NULL at any moment.
1149 * Note also that we are relying on page_mapping(page) to set mapping
1150 * to &swapper_space when PageSwapCache(page).
1141 */ 1151 */
1142 if ((!anon || PageSwapCache(page)) && 1152 if (mapping && !mapping_cap_account_dirty(mapping) &&
1143 page_test_and_clear_dirty(page_to_pfn(page), 1)) 1153 page_test_and_clear_dirty(page_to_pfn(page), 1))
1144 set_page_dirty(page); 1154 set_page_dirty(page);
1145 /* 1155 /*
diff --git a/mm/slob.c b/mm/slob.c
index a08e4681fd0d..1e921c5e9576 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -429,7 +429,7 @@ static __always_inline void *
429__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) 429__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
430{ 430{
431 unsigned int *m; 431 unsigned int *m;
432 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 432 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
433 void *ret; 433 void *ret;
434 434
435 gfp &= gfp_allowed_mask; 435 gfp &= gfp_allowed_mask;
@@ -502,7 +502,7 @@ void kfree(const void *block)
502 502
503 sp = virt_to_page(block); 503 sp = virt_to_page(block);
504 if (PageSlab(sp)) { 504 if (PageSlab(sp)) {
505 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 505 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
506 unsigned int *m = (unsigned int *)(block - align); 506 unsigned int *m = (unsigned int *)(block - align);
507 slob_free(m, *m + align); 507 slob_free(m, *m + align);
508 } else 508 } else
@@ -521,7 +521,7 @@ size_t ksize(const void *block)
521 521
522 sp = virt_to_page(block); 522 sp = virt_to_page(block);
523 if (PageSlab(sp)) { 523 if (PageSlab(sp)) {
524 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 524 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
525 unsigned int *m = (unsigned int *)(block - align); 525 unsigned int *m = (unsigned int *)(block - align);
526 return SLOB_UNITS(*m) * SLOB_UNIT; 526 return SLOB_UNITS(*m) * SLOB_UNIT;
527 } else 527 } else