aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-11-21 14:31:52 -0500
committerThomas Gleixner <tglx@linutronix.de>2012-11-21 14:31:52 -0500
commit9c3f9e281697d02889c3b08922f3b30be75f56c2 (patch)
treee9f3d68e0019a47b982e2b8644f70eb66a3eff3c /mm
parentb8f61116c1ce342804a0897b0a80eb4df5f19453 (diff)
parentd6ad418763888f617ac5b4849823e4cd670df1dd (diff)
Merge branch 'fortglx/3.8/time' of git://git.linaro.org/people/jstultz/linux into timers/core
Fix trivial conflicts in: kernel/time/tick-sched.c Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r--mm/memblock.c24
-rw-r--r--mm/mmu_notifier.c26
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/rmap.c20
-rw-r--r--mm/vmscan.c2
5 files changed, 57 insertions, 21 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 931eef145af5..625905523c2a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -930,6 +930,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si
930 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 930 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
931} 931}
932 932
933void __init_memblock memblock_trim_memory(phys_addr_t align)
934{
935 int i;
936 phys_addr_t start, end, orig_start, orig_end;
937 struct memblock_type *mem = &memblock.memory;
938
939 for (i = 0; i < mem->cnt; i++) {
940 orig_start = mem->regions[i].base;
941 orig_end = mem->regions[i].base + mem->regions[i].size;
942 start = round_up(orig_start, align);
943 end = round_down(orig_end, align);
944
945 if (start == orig_start && end == orig_end)
946 continue;
947
948 if (start < end) {
949 mem->regions[i].base = start;
950 mem->regions[i].size = end - start;
951 } else {
952 memblock_remove_region(mem, i);
953 i--;
954 }
955 }
956}
933 957
934void __init_memblock memblock_set_current_limit(phys_addr_t limit) 958void __init_memblock memblock_set_current_limit(phys_addr_t limit)
935{ 959{
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 479a1e751a73..8a5ac8c686b0 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
196 BUG_ON(atomic_read(&mm->mm_users) <= 0); 196 BUG_ON(atomic_read(&mm->mm_users) <= 0);
197 197
198 /* 198 /*
199 * Verify that mmu_notifier_init() already run and the global srcu is 199 * Verify that mmu_notifier_init() already run and the global srcu is
200 * initialized. 200 * initialized.
201 */ 201 */
202 BUG_ON(!srcu.per_cpu_ref); 202 BUG_ON(!srcu.per_cpu_ref);
203 203
204 ret = -ENOMEM;
205 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
206 if (unlikely(!mmu_notifier_mm))
207 goto out;
208
204 if (take_mmap_sem) 209 if (take_mmap_sem)
205 down_write(&mm->mmap_sem); 210 down_write(&mm->mmap_sem);
206 ret = mm_take_all_locks(mm); 211 ret = mm_take_all_locks(mm);
207 if (unlikely(ret)) 212 if (unlikely(ret))
208 goto out; 213 goto out_clean;
209 214
210 if (!mm_has_notifiers(mm)) { 215 if (!mm_has_notifiers(mm)) {
211 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
212 GFP_KERNEL);
213 if (unlikely(!mmu_notifier_mm)) {
214 ret = -ENOMEM;
215 goto out_of_mem;
216 }
217 INIT_HLIST_HEAD(&mmu_notifier_mm->list); 216 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
218 spin_lock_init(&mmu_notifier_mm->lock); 217 spin_lock_init(&mmu_notifier_mm->lock);
219 218
220 mm->mmu_notifier_mm = mmu_notifier_mm; 219 mm->mmu_notifier_mm = mmu_notifier_mm;
220 mmu_notifier_mm = NULL;
221 } 221 }
222 atomic_inc(&mm->mm_count); 222 atomic_inc(&mm->mm_count);
223 223
@@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
233 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); 233 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
234 spin_unlock(&mm->mmu_notifier_mm->lock); 234 spin_unlock(&mm->mmu_notifier_mm->lock);
235 235
236out_of_mem:
237 mm_drop_all_locks(mm); 236 mm_drop_all_locks(mm);
238out: 237out_clean:
239 if (take_mmap_sem) 238 if (take_mmap_sem)
240 up_write(&mm->mmap_sem); 239 up_write(&mm->mmap_sem);
241 240 kfree(mmu_notifier_mm);
241out:
242 BUG_ON(atomic_read(&mm->mm_users) <= 0); 242 BUG_ON(atomic_read(&mm->mm_users) <= 0);
243 return ret; 243 return ret;
244} 244}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bb90971182bd..5b74de6702e0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid)
1809 int i; 1809 int i;
1810 1810
1811 for_each_online_node(i) 1811 for_each_online_node(i)
1812 if (node_distance(nid, i) <= RECLAIM_DISTANCE) { 1812 if (node_distance(nid, i) <= RECLAIM_DISTANCE)
1813 node_set(i, NODE_DATA(nid)->reclaim_nodes); 1813 node_set(i, NODE_DATA(nid)->reclaim_nodes);
1814 else
1814 zone_reclaim_mode = 1; 1815 zone_reclaim_mode = 1;
1815 }
1816} 1816}
1817 1817
1818#else /* CONFIG_NUMA */ 1818#else /* CONFIG_NUMA */
@@ -5825,7 +5825,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
5825 ret = start_isolate_page_range(pfn_max_align_down(start), 5825 ret = start_isolate_page_range(pfn_max_align_down(start),
5826 pfn_max_align_up(end), migratetype); 5826 pfn_max_align_up(end), migratetype);
5827 if (ret) 5827 if (ret)
5828 goto done; 5828 return ret;
5829 5829
5830 ret = __alloc_contig_migrate_range(&cc, start, end); 5830 ret = __alloc_contig_migrate_range(&cc, start, end);
5831 if (ret) 5831 if (ret)
diff --git a/mm/rmap.c b/mm/rmap.c
index 7df7984d476c..2ee1ef0f317b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -56,6 +56,7 @@
56#include <linux/mmu_notifier.h> 56#include <linux/mmu_notifier.h>
57#include <linux/migrate.h> 57#include <linux/migrate.h>
58#include <linux/hugetlb.h> 58#include <linux/hugetlb.h>
59#include <linux/backing-dev.h>
59 60
60#include <asm/tlbflush.h> 61#include <asm/tlbflush.h>
61 62
@@ -926,11 +927,8 @@ int page_mkclean(struct page *page)
926 927
927 if (page_mapped(page)) { 928 if (page_mapped(page)) {
928 struct address_space *mapping = page_mapping(page); 929 struct address_space *mapping = page_mapping(page);
929 if (mapping) { 930 if (mapping)
930 ret = page_mkclean_file(mapping, page); 931 ret = page_mkclean_file(mapping, page);
931 if (page_test_and_clear_dirty(page_to_pfn(page), 1))
932 ret = 1;
933 }
934 } 932 }
935 933
936 return ret; 934 return ret;
@@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page)
1116 */ 1114 */
1117void page_remove_rmap(struct page *page) 1115void page_remove_rmap(struct page *page)
1118{ 1116{
1117 struct address_space *mapping = page_mapping(page);
1119 bool anon = PageAnon(page); 1118 bool anon = PageAnon(page);
1120 bool locked; 1119 bool locked;
1121 unsigned long flags; 1120 unsigned long flags;
@@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page)
1138 * this if the page is anon, so about to be freed; but perhaps 1137 * this if the page is anon, so about to be freed; but perhaps
1139 * not if it's in swapcache - there might be another pte slot 1138 * not if it's in swapcache - there might be another pte slot
1140 * containing the swap entry, but page not yet written to swap. 1139 * containing the swap entry, but page not yet written to swap.
1140 *
1141 * And we can skip it on file pages, so long as the filesystem
1142 * participates in dirty tracking; but need to catch shm and tmpfs
1143 * and ramfs pages which have been modified since creation by read
1144 * fault.
1145 *
1146 * Note that mapping must be decided above, before decrementing
1147 * mapcount (which luckily provides a barrier): once page is unmapped,
1148 * it could be truncated and page->mapping reset to NULL at any moment.
1149 * Note also that we are relying on page_mapping(page) to set mapping
1150 * to &swapper_space when PageSwapCache(page).
1141 */ 1151 */
1142 if ((!anon || PageSwapCache(page)) && 1152 if (mapping && !mapping_cap_account_dirty(mapping) &&
1143 page_test_and_clear_dirty(page_to_pfn(page), 1)) 1153 page_test_and_clear_dirty(page_to_pfn(page), 1))
1144 set_page_dirty(page); 1154 set_page_dirty(page);
1145 /* 1155 /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2624edcfb420..8b055e9379bc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3017,6 +3017,8 @@ static int kswapd(void *p)
3017 &balanced_classzone_idx); 3017 &balanced_classzone_idx);
3018 } 3018 }
3019 } 3019 }
3020
3021 current->reclaim_state = NULL;
3020 return 0; 3022 return 0;
3021} 3023}
3022 3024