aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile2
-rw-r--r--mm/debug.c1
-rw-r--r--mm/filemap.c114
-rw-r--r--mm/internal.h10
-rw-r--r--mm/memory.c19
-rw-r--r--mm/migrate.c127
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c7
-rw-r--r--mm/vmstat.c4
10 files changed, 20 insertions, 269 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 98d28edd36a5..98c4eaeabdcb 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -56,7 +56,7 @@ obj-$(CONFIG_KASAN) += kasan/
56obj-$(CONFIG_FAILSLAB) += failslab.o 56obj-$(CONFIG_FAILSLAB) += failslab.o
57obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 57obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
58obj-$(CONFIG_MEMTEST) += memtest.o 58obj-$(CONFIG_MEMTEST) += memtest.o
59obj-$(CONFIG_MIGRATION) += migrate.o replication.o 59obj-$(CONFIG_MIGRATION) += migrate.o
60obj-$(CONFIG_QUICKLIST) += quicklist.o 60obj-$(CONFIG_QUICKLIST) += quicklist.o
61obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o 61obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
62obj-$(CONFIG_PAGE_COUNTER) += page_counter.o 62obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
diff --git a/mm/debug.c b/mm/debug.c
index dbc3ea81dde7..3eb3ac2fcee7 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -36,7 +36,6 @@ static const struct trace_print_flags pageflag_names[] = {
36 {1UL << PG_reclaim, "reclaim" }, 36 {1UL << PG_reclaim, "reclaim" },
37 {1UL << PG_swapbacked, "swapbacked" }, 37 {1UL << PG_swapbacked, "swapbacked" },
38 {1UL << PG_unevictable, "unevictable" }, 38 {1UL << PG_unevictable, "unevictable" },
39 {1UL << PG_replicated, "replicated" },
40#ifdef CONFIG_MMU 39#ifdef CONFIG_MMU
41 {1UL << PG_mlocked, "mlocked" }, 40 {1UL << PG_mlocked, "mlocked" },
42#endif 41#endif
diff --git a/mm/filemap.c b/mm/filemap.c
index 93853e337f07..6bf5e42d560a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -36,9 +36,6 @@
36#include <linux/rmap.h> 36#include <linux/rmap.h>
37#include "internal.h" 37#include "internal.h"
38 38
39#include <litmus/litmus.h>
40#include <litmus/mc2_common.h>
41
42#define CREATE_TRACE_POINTS 39#define CREATE_TRACE_POINTS
43#include <trace/events/filemap.h> 40#include <trace/events/filemap.h>
44 41
@@ -976,18 +973,6 @@ repeat:
976 page = radix_tree_deref_slot(pagep); 973 page = radix_tree_deref_slot(pagep);
977 if (unlikely(!page)) 974 if (unlikely(!page))
978 goto out; 975 goto out;
979 if (is_pcache_desc(page)) {
980 struct pcache_desc *pcd;
981printk(KERN_INFO "PCACHE_DESC\n");
982 pcd = ptr_to_pcache_desc(page);
983 page = pcd->master;
984 page_cache_get_speculative(page);
985
986 unreplicate_pcache(mapping, page->index, 0);
987
988 goto out;
989 }
990
991 if (radix_tree_exception(page)) { 976 if (radix_tree_exception(page)) {
992 if (radix_tree_deref_retry(page)) 977 if (radix_tree_deref_retry(page))
993 goto repeat; 978 goto repeat;
@@ -1186,20 +1171,6 @@ repeat:
1186 page = radix_tree_deref_slot(slot); 1171 page = radix_tree_deref_slot(slot);
1187 if (unlikely(!page)) 1172 if (unlikely(!page))
1188 continue; 1173 continue;
1189
1190 if (is_pcache_desc(page)) {
1191 struct pcache_desc *pcd;
1192 printk(KERN_INFO "PCACHE_DESC\n");
1193
1194 pcd = ptr_to_pcache_desc(page);
1195 page = pcd->master;
1196 page_cache_get_speculative(page);
1197
1198 unreplicate_pcache(mapping, page->index, 0);
1199
1200 goto export;
1201 }
1202
1203 if (radix_tree_exception(page)) { 1174 if (radix_tree_exception(page)) {
1204 if (radix_tree_deref_retry(page)) 1175 if (radix_tree_deref_retry(page))
1205 goto restart; 1176 goto restart;
@@ -1263,20 +1234,6 @@ repeat:
1263 if (unlikely(!page)) 1234 if (unlikely(!page))
1264 continue; 1235 continue;
1265 1236
1266 if (is_pcache_desc(page)) {
1267 struct pcache_desc *pcd;
1268
1269 printk(KERN_INFO "PCACHE_DESC\n");
1270
1271 pcd = ptr_to_pcache_desc(page);
1272 page = pcd->master;
1273 page_cache_get_speculative(page);
1274
1275 unreplicate_pcache(mapping, page->index, 0);
1276
1277 goto export;
1278 }
1279
1280 if (radix_tree_exception(page)) { 1237 if (radix_tree_exception(page)) {
1281 if (radix_tree_deref_retry(page)) { 1238 if (radix_tree_deref_retry(page)) {
1282 /* 1239 /*
@@ -1304,7 +1261,6 @@ repeat:
1304 goto repeat; 1261 goto repeat;
1305 } 1262 }
1306 1263
1307export:
1308 pages[ret] = page; 1264 pages[ret] = page;
1309 if (++ret == nr_pages) 1265 if (++ret == nr_pages)
1310 break; 1266 break;
@@ -1346,20 +1302,6 @@ repeat:
1346 if (unlikely(!page)) 1302 if (unlikely(!page))
1347 break; 1303 break;
1348 1304
1349 if (is_pcache_desc(page)) {
1350 struct pcache_desc *pcd;
1351
1352 printk(KERN_INFO "PCACHE_DESC\n");
1353
1354 pcd = ptr_to_pcache_desc(page);
1355 page = pcd->master;
1356 page_cache_get_speculative(page);
1357
1358 unreplicate_pcache(mapping, page->index, 0);
1359
1360 goto export;
1361 }
1362
1363 if (radix_tree_exception(page)) { 1305 if (radix_tree_exception(page)) {
1364 if (radix_tree_deref_retry(page)) { 1306 if (radix_tree_deref_retry(page)) {
1365 /* 1307 /*
@@ -1385,7 +1327,7 @@ repeat:
1385 page_cache_release(page); 1327 page_cache_release(page);
1386 goto repeat; 1328 goto repeat;
1387 } 1329 }
1388export: 1330
1389 /* 1331 /*
1390 * must check mapping and index after taking the ref. 1332 * must check mapping and index after taking the ref.
1391 * otherwise we can get both false positives and false 1333 * otherwise we can get both false positives and false
@@ -1436,20 +1378,6 @@ repeat:
1436 if (unlikely(!page)) 1378 if (unlikely(!page))
1437 continue; 1379 continue;
1438 1380
1439 if (is_pcache_desc(page)) {
1440 struct pcache_desc *pcd;
1441
1442 printk(KERN_INFO "PCACHE_DESC BUG!!!!!!!!!!\n");
1443
1444 pcd = ptr_to_pcache_desc(page);
1445 page = pcd->master;
1446 page_cache_get_speculative(page);
1447
1448 unreplicate_pcache(mapping, page->index, 0);
1449
1450 goto export;
1451 }
1452
1453 if (radix_tree_exception(page)) { 1381 if (radix_tree_exception(page)) {
1454 if (radix_tree_deref_retry(page)) { 1382 if (radix_tree_deref_retry(page)) {
1455 /* 1383 /*
@@ -1481,7 +1409,7 @@ repeat:
1481 page_cache_release(page); 1409 page_cache_release(page);
1482 goto repeat; 1410 goto repeat;
1483 } 1411 }
1484export: 1412
1485 pages[ret] = page; 1413 pages[ret] = page;
1486 if (++ret == nr_pages) 1414 if (++ret == nr_pages)
1487 break; 1415 break;
@@ -1557,11 +1485,7 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1557 1485
1558 cond_resched(); 1486 cond_resched();
1559find_page: 1487find_page:
1560 if (is_realtime(current)) 1488 page = find_get_page(mapping, index);
1561 page = find_get_page_readonly(mapping, index);
1562 else
1563 page = find_get_page(mapping, index);
1564
1565 if (!page) { 1489 if (!page) {
1566 page_cache_sync_readahead(mapping, 1490 page_cache_sync_readahead(mapping,
1567 ra, filp, 1491 ra, filp,
@@ -1713,8 +1637,7 @@ readpage:
1713 unlock_page(page); 1637 unlock_page(page);
1714 } 1638 }
1715 1639
1716 page_cache_release(page); 1640 goto page_ok;
1717 goto find_page;
1718 1641
1719readpage_error: 1642readpage_error:
1720 /* UHHUH! A synchronous read error occurred. Report it */ 1643 /* UHHUH! A synchronous read error occurred. Report it */
@@ -1958,11 +1881,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1958 /* 1881 /*
1959 * Do we have something in the page cache already? 1882 * Do we have something in the page cache already?
1960 */ 1883 */
1961 if ((vmf->flags & FAULT_FLAG_WRITE) || !is_realtime(current)) 1884 page = find_get_page(mapping, offset);
1962 page = find_get_page(mapping, offset);
1963 else
1964 page = find_get_page_readonly(mapping, offset);
1965
1966 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1885 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1967 /* 1886 /*
1968 * We found the page, so try async readahead before 1887 * We found the page, so try async readahead before
@@ -1976,10 +1895,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1976 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1895 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1977 ret = VM_FAULT_MAJOR; 1896 ret = VM_FAULT_MAJOR;
1978retry_find: 1897retry_find:
1979 if ((vmf->flags & FAULT_FLAG_WRITE) || !is_realtime(current)) 1898 page = find_get_page(mapping, offset);
1980 page = find_get_page(mapping, offset);
1981 else
1982 page = find_get_page_readonly(mapping, offset);
1983 if (!page) 1899 if (!page)
1984 goto no_cached_page; 1900 goto no_cached_page;
1985 } 1901 }
@@ -2087,22 +2003,6 @@ repeat:
2087 page = radix_tree_deref_slot(slot); 2003 page = radix_tree_deref_slot(slot);
2088 if (unlikely(!page)) 2004 if (unlikely(!page))
2089 goto next; 2005 goto next;
2090
2091 if (is_pcache_desc(page)) {
2092 struct pcache_desc *pcd;
2093
2094printk(KERN_INFO "PCACHE_DESC FILE_MAP_PAGES\n");
2095
2096 pcd = ptr_to_pcache_desc(page);
2097 page = pcd->master;
2098 if (!page_cache_get_speculative(page))
2099 goto repeat;
2100
2101 //unreplicate_pcache(mapping, page->index, 0);
2102
2103 goto export;
2104 }
2105
2106 if (radix_tree_exception(page)) { 2006 if (radix_tree_exception(page)) {
2107 if (radix_tree_deref_retry(page)) 2007 if (radix_tree_deref_retry(page))
2108 break; 2008 break;
@@ -2118,7 +2018,7 @@ printk(KERN_INFO "PCACHE_DESC FILE_MAP_PAGES\n");
2118 page_cache_release(page); 2018 page_cache_release(page);
2119 goto repeat; 2019 goto repeat;
2120 } 2020 }
2121export: 2021
2122 if (!PageUptodate(page) || 2022 if (!PageUptodate(page) ||
2123 PageReadahead(page) || 2023 PageReadahead(page) ||
2124 PageHWPoison(page)) 2024 PageHWPoison(page))
diff --git a/mm/internal.h b/mm/internal.h
index ccc349b59d00..a25e359a4039 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -433,14 +433,4 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
433#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ 433#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
434#define ALLOC_FAIR 0x100 /* fair zone allocation */ 434#define ALLOC_FAIR 0x100 /* fair zone allocation */
435 435
436extern int reclaim_replicated_page(struct address_space *mapping,
437 struct page *page);
438extern struct page *find_get_page_readonly(struct address_space *mapping,
439 unsigned long offset);
440extern int is_pcache_desc(void *ptr);
441extern struct pcache_desc *ptr_to_pcache_desc(void *ptr);
442extern void *pcache_desc_to_ptr(struct pcache_desc *pcd);
443extern void unreplicate_pcache(struct address_space *mapping, unsigned long offset, int locked);
444int page_write_fault_retry(struct page *page);
445
446#endif /* __MM_INTERNAL_H */ 436#endif /* __MM_INTERNAL_H */
diff --git a/mm/memory.c b/mm/memory.c
index 1fc358bec6d5..22e037e3364e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2231,24 +2231,15 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2231 * read-only shared pages can get COWed by 2231 * read-only shared pages can get COWed by
2232 * get_user_pages(.write=1, .force=1). 2232 * get_user_pages(.write=1, .force=1).
2233 */ 2233 */
2234// if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 2234 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2235 {
2236 int tmp; 2235 int tmp;
2237 2236
2238 pte_unmap_unlock(page_table, ptl); 2237 pte_unmap_unlock(page_table, ptl);
2239 2238 tmp = do_page_mkwrite(vma, old_page, address);
2240 if (page_write_fault_retry(old_page)) { 2239 if (unlikely(!tmp || (tmp &
2240 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2241 page_cache_release(old_page); 2241 page_cache_release(old_page);
2242 return 0; 2242 return tmp;
2243 }
2244
2245 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2246 tmp = do_page_mkwrite(vma, old_page, address);
2247 if (unlikely(!tmp || (tmp &
2248 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2249 page_cache_release(old_page);
2250 return tmp;
2251 }
2252 } 2243 }
2253 /* 2244 /*
2254 * Since we dropped the lock we need to revalidate 2245 * Since we dropped the lock we need to revalidate
diff --git a/mm/migrate.c b/mm/migrate.c
index d25cc2c2736d..a2e9cad083d5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -405,7 +405,6 @@ int replicate_page_move_mapping(struct address_space *mapping,
405 struct buffer_head *head, enum migrate_mode mode, 405 struct buffer_head *head, enum migrate_mode mode,
406 int extra_count) 406 int extra_count)
407{ 407{
408 int expected_count = 1 + extra_count;
409 int prev_count = page_count(page); 408 int prev_count = page_count(page);
410 void **pslot; 409 void **pslot;
411 410
@@ -415,38 +414,6 @@ int replicate_page_move_mapping(struct address_space *mapping,
415 414
416 pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); 415 pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page));
417 416
418 expected_count += 1 + page_has_private(page);
419
420 TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page));
421/*
422 if (page_count(page) != expected_count ||
423 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
424 spin_unlock_irq(&mapping->tree_lock);
425 TRACE_TASK(current, "1\n");
426 return -EAGAIN;
427 }
428
429 if (!page_freeze_refs(page, expected_count)) { // if page_count(page) == expected_count, then set page_count = 0
430 spin_unlock_irq(&mapping->tree_lock);
431 TRACE_TASK(current, "2\n");
432 return -EAGAIN;
433 }
434*/
435 /*
436 * In the async migration case of moving a page with buffers, lock the
437 * buffers using trylock before the mapping is moved. If the mapping
438 * was moved, we later failed to lock the buffers and could not move
439 * the mapping back due to an elevated page count, we would have to
440 * block waiting on other references to be dropped.
441 */
442/* if (mode == MIGRATE_ASYNC && head &&
443 !buffer_migrate_lock_buffers(head, mode)) {
444 page_unfreeze_refs(page, expected_count);
445 spin_unlock_irq(&mapping->tree_lock);
446 TRACE_TASK(current, "3\n");
447 return -EAGAIN;
448 }
449*/
450 /* 417 /*
451 * Now we know that no one else is looking at the page. 418 * Now we know that no one else is looking at the page.
452 */ 419 */
@@ -456,15 +423,11 @@ int replicate_page_move_mapping(struct address_space *mapping,
456 set_page_private(newpage, page_private(page)); 423 set_page_private(newpage, page_private(page));
457 } 424 }
458 425
459 //radix_tree_replace_slot(pslot, newpage);
460 //radix_tree_replace_slot(pslot, page);
461
462 /* 426 /*
463 * Drop cache reference from old page by unfreezing 427 * Drop cache reference from old page by unfreezing
464 * to one less reference. 428 * to the previous reference.
465 * We know this isn't the last reference. 429 * We know this isn't the last reference.
466 */ 430 */
467 //page_unfreeze_refs(page, expected_count - 1);
468 page_unfreeze_refs(page, prev_count); 431 page_unfreeze_refs(page, prev_count);
469 432
470 /* 433 /*
@@ -702,7 +665,6 @@ void replicate_page_copy(struct page *newpage, struct page *page)
702 */ 665 */
703 if (PageWriteback(newpage)) 666 if (PageWriteback(newpage))
704 end_page_writeback(newpage); 667 end_page_writeback(newpage);
705 TRACE_TASK(current, "replicate_page_copy done!\n");
706} 668}
707 669
708/************************************************************ 670/************************************************************
@@ -742,7 +704,6 @@ int replicate_page(struct address_space *mapping,
742 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 704 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
743 705
744 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count); 706 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count);
745 TRACE_TASK(current, "replicate_page_move_mapping returned %d\n", rc);
746 if (rc != MIGRATEPAGE_SUCCESS) 707 if (rc != MIGRATEPAGE_SUCCESS)
747 return rc; 708 return rc;
748 709
@@ -975,19 +936,9 @@ static int copy_to_new_page(struct page *newpage, struct page *page,
975 rc = migrate_page(mapping, newpage, page, mode); 936 rc = migrate_page(mapping, newpage, page, mode);
976 } 937 }
977 else if (mapping->a_ops->migratepage) { 938 else if (mapping->a_ops->migratepage) {
978 TRACE_TASK(current, "ops migration callback\n");
979 /*
980 * Most pages have a mapping and most filesystems provide a
981 * migratepage callback. Anonymous pages are part of swap
982 * space which also has its own migratepage callback. This
983 * is the most common path for page migration.
984 */
985 //rc = mapping->a_ops->migratepage(mapping,
986 // newpage, page, mode);
987 rc = replicate_page(mapping, newpage, page, mode, has_replica); 939 rc = replicate_page(mapping, newpage, page, mode, has_replica);
988 } 940 }
989 else { 941 else {
990 TRACE_TASK(current, "fallback function\n");
991 rc = fallback_migrate_page(mapping, newpage, page, mode); 942 rc = fallback_migrate_page(mapping, newpage, page, mode);
992 } 943 }
993 944
@@ -995,10 +946,8 @@ static int copy_to_new_page(struct page *newpage, struct page *page,
995 newpage->mapping = NULL; 946 newpage->mapping = NULL;
996 } else { 947 } else {
997 if (page_was_mapped) { 948 if (page_was_mapped) {
998 TRACE_TASK(current, "PAGE_WAS_MAPPED = 1\n");
999 remove_migration_ptes(page, newpage); 949 remove_migration_ptes(page, newpage);
1000 } 950 }
1001 //page->mapping = NULL;
1002 } 951 }
1003 952
1004 unlock_page(newpage); 953 unlock_page(newpage);
@@ -1178,76 +1127,18 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1178 1127
1179 if (PageWriteback(page)) { 1128 if (PageWriteback(page)) {
1180 /* 1129 /*
1181 * Only in the case of a full synchronous migration is it 1130 * The code of shared library cannot be written.
1182 * necessary to wait for PageWriteback. In the async case,
1183 * the retry loop is too short and in the sync-light case,
1184 * the overhead of stalling is too much
1185 */ 1131 */
1186 BUG(); 1132 BUG();
1187 /*
1188 if (mode != MIGRATE_SYNC) {
1189 rc = -EBUSY;
1190 goto out_unlock;
1191 }
1192 if (!force)
1193 goto out_unlock;
1194 wait_on_page_writeback(page);
1195 */
1196 } 1133 }
1197 /* 1134
1198 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1199 * we cannot notice that anon_vma is freed while we migrates a page.
1200 * This get_anon_vma() delays freeing anon_vma pointer until the end
1201 * of migration. File cache pages are no problem because of page_lock()
1202 * File Caches may use write_page() or lock_page() in migration, then,
1203 * just care Anon page here.
1204 */
1205 if (PageAnon(page) && !PageKsm(page)) { 1135 if (PageAnon(page) && !PageKsm(page)) {
1206 printk(KERN_INFO "ANON but not KSM\n"); 1136 /* The shared library pages must be backed by a file. */
1207 BUG(); 1137 BUG();
1208 /*
1209 * Only page_lock_anon_vma_read() understands the subtleties of
1210 * getting a hold on an anon_vma from outside one of its mms.
1211 */
1212/*
1213 anon_vma = page_get_anon_vma(page);
1214 if (anon_vma) {
1215*/
1216 /*
1217 * Anon page
1218 */
1219/*
1220 } else if (PageSwapCache(page)) {
1221*/
1222 /*
1223 * We cannot be sure that the anon_vma of an unmapped
1224 * swapcache page is safe to use because we don't
1225 * know in advance if the VMA that this page belonged
1226 * to still exists. If the VMA and others sharing the
1227 * data have been freed, then the anon_vma could
1228 * already be invalid.
1229 *
1230 * To avoid this possibility, swapcache pages get
1231 * migrated but are not remapped when migration
1232 * completes
1233 */
1234/* } else {
1235 goto out_unlock;
1236 }
1237*/
1238 } 1138 }
1239 1139
1240 if (unlikely(isolated_balloon_page(page))) { 1140 if (unlikely(isolated_balloon_page(page))) {
1241 BUG(); 1141 BUG();
1242 /*
1243 * A ballooned page does not need any special attention from
1244 * physical to virtual reverse mapping procedures.
1245 * Skip any attempt to unmap PTEs or to remap swap cache,
1246 * in order to avoid burning cycles at rmap level, and perform
1247 * the page migration right away (proteced by page lock).
1248 */
1249 rc = balloon_page_migrate(newpage, page, mode);
1250 goto out_unlock;
1251 } 1142 }
1252 1143
1253 /* 1144 /*
@@ -1273,22 +1164,17 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1273 1164
1274 /* Establish migration ptes or remove ptes */ 1165 /* Establish migration ptes or remove ptes */
1275 if (page_mapped(page)) { 1166 if (page_mapped(page)) {
1276 // ttu_ret = try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1277 struct rmap_walk_control rwc = { 1167 struct rmap_walk_control rwc = {
1278 .rmap_one = try_to_unmap_one_only, 1168 .rmap_one = try_to_unmap_one_only,
1279 .arg = (void *)(TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS), 1169 .arg = (void *)(TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS),
1280 }; 1170 };
1281
1282 ttu_ret = rmap_walk(page, &rwc); 1171 ttu_ret = rmap_walk(page, &rwc);
1283 1172
1284 page_was_mapped = 1; 1173 page_was_mapped = 1;
1285 TRACE_TASK(current, "Page %d unmapped from all PTEs\n", page_to_pfn(page));
1286 } 1174 }
1287 1175
1288skip_unmap: 1176skip_unmap:
1289 //if (!page_mapped(page)) {
1290 if (ttu_ret == SWAP_SUCCESS) { 1177 if (ttu_ret == SWAP_SUCCESS) {
1291 TRACE_TASK(current, "Call copy_to_new_page\n");
1292 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica); 1178 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica);
1293 } else if (ttu_ret == SWAP_AGAIN) 1179 } else if (ttu_ret == SWAP_AGAIN)
1294 printk(KERN_ERR "rmap_walk returned SWAP_AGAIN\n"); 1180 printk(KERN_ERR "rmap_walk returned SWAP_AGAIN\n");
@@ -1418,16 +1304,13 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1418 newpage = get_new_page(page, private, &result); 1304 newpage = get_new_page(page, private, &result);
1419 if (!newpage) 1305 if (!newpage)
1420 return -ENOMEM; 1306 return -ENOMEM;
1421 //printk(KERN_ERR "Page %lx allocated\n", page_to_pfn(newpage));
1422 } else { 1307 } else {
1423 newpage = lib_page->r_page[cpu]; 1308 newpage = lib_page->r_page[cpu];
1424 has_replica = 1; 1309 has_replica = 1;
1425 //printk(KERN_ERR "Page %lx found\n", page_to_pfn(newpage));
1426 } 1310 }
1427 1311
1428 if (page_count(page) == 1) { 1312 if (page_count(page) == 1) {
1429 /* page was freed from under us. So we are done. */ 1313 /* page was freed from under us. So we are done. */
1430 TRACE_TASK(current, "page %x _count == 1\n", page_to_pfn(page));
1431 goto out; 1314 goto out;
1432 } 1315 }
1433 1316
@@ -1443,7 +1326,6 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1443 } 1326 }
1444 1327
1445out: 1328out:
1446TRACE_TASK(current, "__unmap_and_copy returned %s\n", rc==MIGRATEPAGE_SUCCESS?"SUCCESS":"FAIL");
1447 if (rc != -EAGAIN) { 1329 if (rc != -EAGAIN) {
1448 /* 1330 /*
1449 * A page that has been migrated has all references 1331 * A page that has been migrated has all references
@@ -1457,7 +1339,6 @@ TRACE_TASK(current, "__unmap_and_copy returned %s\n", rc==MIGRATEPAGE_SUCCESS?"S
1457 putback_lru_page(page); 1339 putback_lru_page(page);
1458 } 1340 }
1459 1341
1460//TRACE_TASK(current, "old page freed\n");
1461 /* 1342 /*
1462 * If migration was not successful and there's a freeing callback, use 1343 * If migration was not successful and there's a freeing callback, use
1463 * it. Otherwise, putback_lru_page() will drop the reference grabbed 1344 * it. Otherwise, putback_lru_page() will drop the reference grabbed
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 161af608b7e2..7e39ffceb566 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2287,8 +2287,7 @@ int clear_page_dirty_for_io(struct page *page)
2287 2287
2288 BUG_ON(!PageLocked(page)); 2288 BUG_ON(!PageLocked(page));
2289 2289
2290 //if (mapping && mapping_cap_account_dirty(mapping)) { 2290 if (mapping && mapping_cap_account_dirty(mapping)) {
2291 if (mapping) {
2292 /* 2291 /*
2293 * Yes, Virginia, this is indeed insane. 2292 * Yes, Virginia, this is indeed insane.
2294 * 2293 *
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3ffde2a09765..950c002bbb45 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -662,7 +662,7 @@ static inline int free_pages_check(struct page *page)
662 if (unlikely(page_mapcount(page))) 662 if (unlikely(page_mapcount(page)))
663 bad_reason = "nonzero mapcount"; 663 bad_reason = "nonzero mapcount";
664 if (unlikely(page->mapping != NULL)) 664 if (unlikely(page->mapping != NULL))
665 bad_reason = "non-NULL mapping free_check"; 665 bad_reason = "non-NULL mapping";
666 if (unlikely(atomic_read(&page->_count) != 0)) 666 if (unlikely(atomic_read(&page->_count) != 0))
667 bad_reason = "nonzero _count"; 667 bad_reason = "nonzero _count";
668 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 668 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b9b6bef90169..5e8eadd71bac 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -582,7 +582,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
582 BUG_ON(!PageLocked(page)); 582 BUG_ON(!PageLocked(page));
583 BUG_ON(mapping != page_mapping(page)); 583 BUG_ON(mapping != page_mapping(page));
584 584
585again:
586 spin_lock_irq(&mapping->tree_lock); 585 spin_lock_irq(&mapping->tree_lock);
587 /* 586 /*
588 * The non racy check for a busy page. 587 * The non racy check for a busy page.
@@ -641,11 +640,7 @@ again:
641 if (reclaimed && page_is_file_cache(page) && 640 if (reclaimed && page_is_file_cache(page) &&
642 !mapping_exiting(mapping)) 641 !mapping_exiting(mapping))
643 shadow = workingset_eviction(mapping, page); 642 shadow = workingset_eviction(mapping, page);
644 if (PageReplicated(page)) { 643 __delete_from_page_cache(page, shadow);
645 if (reclaim_replicated_page(mapping, page))
646 goto again;
647 } else
648 __delete_from_page_cache(page, shadow);
649 spin_unlock_irq(&mapping->tree_lock); 644 spin_unlock_irq(&mapping->tree_lock);
650 645
651 if (freepage != NULL) 646 if (freepage != NULL)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6af8ea00cbef..4f5cd974e11a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -708,7 +708,6 @@ const char * const vmstat_text[] = {
708 "nr_anon_pages", 708 "nr_anon_pages",
709 "nr_mapped", 709 "nr_mapped",
710 "nr_file_pages", 710 "nr_file_pages",
711 "nr_repl_pages",
712 "nr_dirty", 711 "nr_dirty",
713 "nr_writeback", 712 "nr_writeback",
714 "nr_slab_reclaimable", 713 "nr_slab_reclaimable",
@@ -761,9 +760,6 @@ const char * const vmstat_text[] = {
761 "pgfault", 760 "pgfault",
762 "pgmajfault", 761 "pgmajfault",
763 762
764 "pgreplicated",
765 "pgreplicazap",
766
767 TEXTS_FOR_ZONES("pgrefill") 763 TEXTS_FOR_ZONES("pgrefill")
768 TEXTS_FOR_ZONES("pgsteal_kswapd") 764 TEXTS_FOR_ZONES("pgsteal_kswapd")
769 TEXTS_FOR_ZONES("pgsteal_direct") 765 TEXTS_FOR_ZONES("pgsteal_direct")