aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2016-09-29 15:05:48 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2016-09-29 15:05:48 -0400
commit0e6a44a09800be09924707025646b3f3e3700306 (patch)
treecdfb12473efd82a6935e84b5225c17c54fcd4793
parenta75060efea2b2ce6e5bf8ab540c123de42395755 (diff)
9/29/2016 find_get_page_readonly checks SCHED_LITMUS
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/page-flags.h6
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--init/main.c1
-rw-r--r--litmus/litmus.c6
-rw-r--r--mm/Makefile2
-rw-r--r--mm/debug.c1
-rw-r--r--mm/filemap.c119
-rw-r--r--mm/internal.h10
-rw-r--r--mm/memory.c19
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/vmscan.c7
-rw-r--r--mm/vmstat.c4
14 files changed, 160 insertions, 22 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0755b9fd03a7..55df1f8bf4cb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2200,5 +2200,7 @@ void __init setup_nr_node_ids(void);
2200static inline void setup_nr_node_ids(void) {} 2200static inline void setup_nr_node_ids(void) {}
2201#endif 2201#endif
2202 2202
2203extern void replication_init(void);
2204
2203#endif /* __KERNEL__ */ 2205#endif /* __KERNEL__ */
2204#endif /* _LINUX_MM_H */ 2206#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 54d74f6eb233..abc63c255d44 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -126,6 +126,7 @@ enum zone_stat_item {
126 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 126 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
127 only modified from process context */ 127 only modified from process context */
128 NR_FILE_PAGES, 128 NR_FILE_PAGES,
129 NR_REPL_PAGES,
129 NR_FILE_DIRTY, 130 NR_FILE_DIRTY,
130 NR_WRITEBACK, 131 NR_WRITEBACK,
131 NR_SLAB_RECLAIMABLE, 132 NR_SLAB_RECLAIMABLE,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..8b0d7723f3c9 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -97,6 +97,7 @@ enum pageflags {
97 PG_reclaim, /* To be reclaimed asap */ 97 PG_reclaim, /* To be reclaimed asap */
98 PG_swapbacked, /* Page is backed by RAM/swap */ 98 PG_swapbacked, /* Page is backed by RAM/swap */
99 PG_unevictable, /* Page is "unevictable" */ 99 PG_unevictable, /* Page is "unevictable" */
100 PG_replicated, /* Page is replicated pagecache */
100#ifdef CONFIG_MMU 101#ifdef CONFIG_MMU
101 PG_mlocked, /* Page is vma mlocked */ 102 PG_mlocked, /* Page is vma mlocked */
102#endif 103#endif
@@ -289,6 +290,11 @@ PAGEFLAG_FALSE(HWPoison)
289#define __PG_HWPOISON 0 290#define __PG_HWPOISON 0
290#endif 291#endif
291 292
293#define PageReplicated(page) test_bit(PG_replicated, &(page)->flags)
294#define __SetPageReplicated(page) do { BUG_ON(PageDirty(page) || PageWriteback(page)); __set_bit(PG_replicated, &(page)->flags); } while (0)
295#define SetPageReplicated(page) do { BUG_ON(PageDirty(page) || PageWriteback(page)); set_bit(PG_replicated, &(page)->flags); } while (0)
296#define ClearPageReplicated(page) clear_bit(PG_replicated, &(page)->flags)
297
292/* 298/*
293 * On an anonymous page mapped into a user virtual memory area, 299 * On an anonymous page mapped into a user virtual memory area,
294 * page->mapping points to its anon_vma, not to a struct address_space; 300 * page->mapping points to its anon_vma, not to a struct address_space;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 9246d32dc973..62820318d8ad 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -25,6 +25,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
25 FOR_ALL_ZONES(PGALLOC), 25 FOR_ALL_ZONES(PGALLOC),
26 PGFREE, PGACTIVATE, PGDEACTIVATE, 26 PGFREE, PGACTIVATE, PGDEACTIVATE,
27 PGFAULT, PGMAJFAULT, 27 PGFAULT, PGMAJFAULT,
28 PGREPLICATED, PGREPLICAZAP,
28 FOR_ALL_ZONES(PGREFILL), 29 FOR_ALL_ZONES(PGREFILL),
29 FOR_ALL_ZONES(PGSTEAL_KSWAPD), 30 FOR_ALL_ZONES(PGSTEAL_KSWAPD),
30 FOR_ALL_ZONES(PGSTEAL_DIRECT), 31 FOR_ALL_ZONES(PGSTEAL_DIRECT),
diff --git a/init/main.c b/init/main.c
index 2a89545e0a5d..88917d93fbe4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -628,6 +628,7 @@ asmlinkage __visible void __init start_kernel(void)
628 kmemleak_init(); 628 kmemleak_init();
629 setup_per_cpu_pageset(); 629 setup_per_cpu_pageset();
630 numa_policy_init(); 630 numa_policy_init();
631 replication_init();
631 if (late_time_init) 632 if (late_time_init)
632 late_time_init(); 633 late_time_init();
633 sched_clock_init(); 634 sched_clock_init();
diff --git a/litmus/litmus.c b/litmus/litmus.c
index f88cd16ab86d..d31138c9b9a6 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -571,6 +571,12 @@ asmlinkage long sys_set_page_color(int cpu)
571 put_page(old_page); 571 put_page(old_page);
572 continue; 572 continue;
573 } 573 }
574
575 if (page_count(old_page) - page_mapcount(old_page) == 1) {
576 put_page(old_page);
577 continue;
578 }
579
574 TRACE_TASK(current, "addr: %08x, pfn: %ld, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page)); 580 TRACE_TASK(current, "addr: %08x, pfn: %ld, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page));
575 put_page(old_page); 581 put_page(old_page);
576 } 582 }
diff --git a/mm/Makefile b/mm/Makefile
index 98c4eaeabdcb..98d28edd36a5 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -56,7 +56,7 @@ obj-$(CONFIG_KASAN) += kasan/
56obj-$(CONFIG_FAILSLAB) += failslab.o 56obj-$(CONFIG_FAILSLAB) += failslab.o
57obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 57obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
58obj-$(CONFIG_MEMTEST) += memtest.o 58obj-$(CONFIG_MEMTEST) += memtest.o
59obj-$(CONFIG_MIGRATION) += migrate.o 59obj-$(CONFIG_MIGRATION) += migrate.o replication.o
60obj-$(CONFIG_QUICKLIST) += quicklist.o 60obj-$(CONFIG_QUICKLIST) += quicklist.o
61obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o 61obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
62obj-$(CONFIG_PAGE_COUNTER) += page_counter.o 62obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
diff --git a/mm/debug.c b/mm/debug.c
index 3eb3ac2fcee7..dbc3ea81dde7 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -36,6 +36,7 @@ static const struct trace_print_flags pageflag_names[] = {
36 {1UL << PG_reclaim, "reclaim" }, 36 {1UL << PG_reclaim, "reclaim" },
37 {1UL << PG_swapbacked, "swapbacked" }, 37 {1UL << PG_swapbacked, "swapbacked" },
38 {1UL << PG_unevictable, "unevictable" }, 38 {1UL << PG_unevictable, "unevictable" },
39 {1UL << PG_replicated, "replicated" },
39#ifdef CONFIG_MMU 40#ifdef CONFIG_MMU
40 {1UL << PG_mlocked, "mlocked" }, 41 {1UL << PG_mlocked, "mlocked" },
41#endif 42#endif
diff --git a/mm/filemap.c b/mm/filemap.c
index 8ea609718839..93853e337f07 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -973,13 +973,21 @@ repeat:
973 page = NULL; 973 page = NULL;
974 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 974 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
975 if (pagep) { 975 if (pagep) {
976 void *pdesc; 976 page = radix_tree_deref_slot(pagep);
977 pdesc = radix_tree_deref_slot(pagep);
978 if (pdesc)
979 page = (struct page*)pdesc;
980 //page = radix_tree_deref_slot(pagep);
981 if (unlikely(!page)) 977 if (unlikely(!page))
982 goto out; 978 goto out;
979 if (is_pcache_desc(page)) {
980 struct pcache_desc *pcd;
981printk(KERN_INFO "PCACHE_DESC\n");
982 pcd = ptr_to_pcache_desc(page);
983 page = pcd->master;
984 page_cache_get_speculative(page);
985
986 unreplicate_pcache(mapping, page->index, 0);
987
988 goto out;
989 }
990
983 if (radix_tree_exception(page)) { 991 if (radix_tree_exception(page)) {
984 if (radix_tree_deref_retry(page)) 992 if (radix_tree_deref_retry(page))
985 goto repeat; 993 goto repeat;
@@ -1178,6 +1186,20 @@ repeat:
1178 page = radix_tree_deref_slot(slot); 1186 page = radix_tree_deref_slot(slot);
1179 if (unlikely(!page)) 1187 if (unlikely(!page))
1180 continue; 1188 continue;
1189
1190 if (is_pcache_desc(page)) {
1191 struct pcache_desc *pcd;
1192 printk(KERN_INFO "PCACHE_DESC\n");
1193
1194 pcd = ptr_to_pcache_desc(page);
1195 page = pcd->master;
1196 page_cache_get_speculative(page);
1197
1198 unreplicate_pcache(mapping, page->index, 0);
1199
1200 goto export;
1201 }
1202
1181 if (radix_tree_exception(page)) { 1203 if (radix_tree_exception(page)) {
1182 if (radix_tree_deref_retry(page)) 1204 if (radix_tree_deref_retry(page))
1183 goto restart; 1205 goto restart;
@@ -1241,6 +1263,20 @@ repeat:
1241 if (unlikely(!page)) 1263 if (unlikely(!page))
1242 continue; 1264 continue;
1243 1265
1266 if (is_pcache_desc(page)) {
1267 struct pcache_desc *pcd;
1268
1269 printk(KERN_INFO "PCACHE_DESC\n");
1270
1271 pcd = ptr_to_pcache_desc(page);
1272 page = pcd->master;
1273 page_cache_get_speculative(page);
1274
1275 unreplicate_pcache(mapping, page->index, 0);
1276
1277 goto export;
1278 }
1279
1244 if (radix_tree_exception(page)) { 1280 if (radix_tree_exception(page)) {
1245 if (radix_tree_deref_retry(page)) { 1281 if (radix_tree_deref_retry(page)) {
1246 /* 1282 /*
@@ -1268,6 +1304,7 @@ repeat:
1268 goto repeat; 1304 goto repeat;
1269 } 1305 }
1270 1306
1307export:
1271 pages[ret] = page; 1308 pages[ret] = page;
1272 if (++ret == nr_pages) 1309 if (++ret == nr_pages)
1273 break; 1310 break;
@@ -1309,6 +1346,20 @@ repeat:
1309 if (unlikely(!page)) 1346 if (unlikely(!page))
1310 break; 1347 break;
1311 1348
1349 if (is_pcache_desc(page)) {
1350 struct pcache_desc *pcd;
1351
1352 printk(KERN_INFO "PCACHE_DESC\n");
1353
1354 pcd = ptr_to_pcache_desc(page);
1355 page = pcd->master;
1356 page_cache_get_speculative(page);
1357
1358 unreplicate_pcache(mapping, page->index, 0);
1359
1360 goto export;
1361 }
1362
1312 if (radix_tree_exception(page)) { 1363 if (radix_tree_exception(page)) {
1313 if (radix_tree_deref_retry(page)) { 1364 if (radix_tree_deref_retry(page)) {
1314 /* 1365 /*
@@ -1334,7 +1385,7 @@ repeat:
1334 page_cache_release(page); 1385 page_cache_release(page);
1335 goto repeat; 1386 goto repeat;
1336 } 1387 }
1337 1388export:
1338 /* 1389 /*
1339 * must check mapping and index after taking the ref. 1390 * must check mapping and index after taking the ref.
1340 * otherwise we can get both false positives and false 1391 * otherwise we can get both false positives and false
@@ -1385,6 +1436,20 @@ repeat:
1385 if (unlikely(!page)) 1436 if (unlikely(!page))
1386 continue; 1437 continue;
1387 1438
1439 if (is_pcache_desc(page)) {
1440 struct pcache_desc *pcd;
1441
1442 printk(KERN_INFO "PCACHE_DESC BUG!!!!!!!!!!\n");
1443
1444 pcd = ptr_to_pcache_desc(page);
1445 page = pcd->master;
1446 page_cache_get_speculative(page);
1447
1448 unreplicate_pcache(mapping, page->index, 0);
1449
1450 goto export;
1451 }
1452
1388 if (radix_tree_exception(page)) { 1453 if (radix_tree_exception(page)) {
1389 if (radix_tree_deref_retry(page)) { 1454 if (radix_tree_deref_retry(page)) {
1390 /* 1455 /*
@@ -1416,7 +1481,7 @@ repeat:
1416 page_cache_release(page); 1481 page_cache_release(page);
1417 goto repeat; 1482 goto repeat;
1418 } 1483 }
1419 1484export:
1420 pages[ret] = page; 1485 pages[ret] = page;
1421 if (++ret == nr_pages) 1486 if (++ret == nr_pages)
1422 break; 1487 break;
@@ -1492,7 +1557,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1492 1557
1493 cond_resched(); 1558 cond_resched();
1494find_page: 1559find_page:
1495 page = find_get_page(mapping, index); 1560 if (is_realtime(current))
1561 page = find_get_page_readonly(mapping, index);
1562 else
1563 page = find_get_page(mapping, index);
1564
1496 if (!page) { 1565 if (!page) {
1497 page_cache_sync_readahead(mapping, 1566 page_cache_sync_readahead(mapping,
1498 ra, filp, 1567 ra, filp,
@@ -1644,7 +1713,8 @@ readpage:
1644 unlock_page(page); 1713 unlock_page(page);
1645 } 1714 }
1646 1715
1647 goto page_ok; 1716 page_cache_release(page);
1717 goto find_page;
1648 1718
1649readpage_error: 1719readpage_error:
1650 /* UHHUH! A synchronous read error occurred. Report it */ 1720 /* UHHUH! A synchronous read error occurred. Report it */
@@ -1888,9 +1958,11 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1888 /* 1958 /*
1889 * Do we have something in the page cache already? 1959 * Do we have something in the page cache already?
1890 */ 1960 */
1891if (is_realtime(current)) 1961 if ((vmf->flags & FAULT_FLAG_WRITE) || !is_realtime(current))
1892 printk("FILEMAP_FAULT %ld\n", vma->vm_start); 1962 page = find_get_page(mapping, offset);
1893 page = find_get_page(mapping, offset); 1963 else
1964 page = find_get_page_readonly(mapping, offset);
1965
1894 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1966 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1895 /* 1967 /*
1896 * We found the page, so try async readahead before 1968 * We found the page, so try async readahead before
@@ -1904,7 +1976,10 @@ if (is_realtime(current))
1904 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1976 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1905 ret = VM_FAULT_MAJOR; 1977 ret = VM_FAULT_MAJOR;
1906retry_find: 1978retry_find:
1907 page = find_get_page(mapping, offset); 1979 if ((vmf->flags & FAULT_FLAG_WRITE) || !is_realtime(current))
1980 page = find_get_page(mapping, offset);
1981 else
1982 page = find_get_page_readonly(mapping, offset);
1908 if (!page) 1983 if (!page)
1909 goto no_cached_page; 1984 goto no_cached_page;
1910 } 1985 }
@@ -2012,6 +2087,22 @@ repeat:
2012 page = radix_tree_deref_slot(slot); 2087 page = radix_tree_deref_slot(slot);
2013 if (unlikely(!page)) 2088 if (unlikely(!page))
2014 goto next; 2089 goto next;
2090
2091 if (is_pcache_desc(page)) {
2092 struct pcache_desc *pcd;
2093
2094printk(KERN_INFO "PCACHE_DESC FILE_MAP_PAGES\n");
2095
2096 pcd = ptr_to_pcache_desc(page);
2097 page = pcd->master;
2098 if (!page_cache_get_speculative(page))
2099 goto repeat;
2100
2101 //unreplicate_pcache(mapping, page->index, 0);
2102
2103 goto export;
2104 }
2105
2015 if (radix_tree_exception(page)) { 2106 if (radix_tree_exception(page)) {
2016 if (radix_tree_deref_retry(page)) 2107 if (radix_tree_deref_retry(page))
2017 break; 2108 break;
@@ -2027,7 +2118,7 @@ repeat:
2027 page_cache_release(page); 2118 page_cache_release(page);
2028 goto repeat; 2119 goto repeat;
2029 } 2120 }
2030 2121export:
2031 if (!PageUptodate(page) || 2122 if (!PageUptodate(page) ||
2032 PageReadahead(page) || 2123 PageReadahead(page) ||
2033 PageHWPoison(page)) 2124 PageHWPoison(page))
diff --git a/mm/internal.h b/mm/internal.h
index a25e359a4039..ccc349b59d00 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -433,4 +433,14 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
433#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ 433#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
434#define ALLOC_FAIR 0x100 /* fair zone allocation */ 434#define ALLOC_FAIR 0x100 /* fair zone allocation */
435 435
436extern int reclaim_replicated_page(struct address_space *mapping,
437 struct page *page);
438extern struct page *find_get_page_readonly(struct address_space *mapping,
439 unsigned long offset);
440extern int is_pcache_desc(void *ptr);
441extern struct pcache_desc *ptr_to_pcache_desc(void *ptr);
442extern void *pcache_desc_to_ptr(struct pcache_desc *pcd);
443extern void unreplicate_pcache(struct address_space *mapping, unsigned long offset, int locked);
444int page_write_fault_retry(struct page *page);
445
436#endif /* __MM_INTERNAL_H */ 446#endif /* __MM_INTERNAL_H */
diff --git a/mm/memory.c b/mm/memory.c
index 22e037e3364e..1fc358bec6d5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2231,15 +2231,24 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2231 * read-only shared pages can get COWed by 2231 * read-only shared pages can get COWed by
2232 * get_user_pages(.write=1, .force=1). 2232 * get_user_pages(.write=1, .force=1).
2233 */ 2233 */
2234 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 2234// if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2235 {
2235 int tmp; 2236 int tmp;
2236 2237
2237 pte_unmap_unlock(page_table, ptl); 2238 pte_unmap_unlock(page_table, ptl);
2238 tmp = do_page_mkwrite(vma, old_page, address); 2239
2239 if (unlikely(!tmp || (tmp & 2240 if (page_write_fault_retry(old_page)) {
2240 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2241 page_cache_release(old_page); 2241 page_cache_release(old_page);
2242 return tmp; 2242 return 0;
2243 }
2244
2245 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2246 tmp = do_page_mkwrite(vma, old_page, address);
2247 if (unlikely(!tmp || (tmp &
2248 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2249 page_cache_release(old_page);
2250 return tmp;
2251 }
2243 } 2252 }
2244 /* 2253 /*
2245 * Since we dropped the lock we need to revalidate 2254 * Since we dropped the lock we need to revalidate
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7e39ffceb566..161af608b7e2 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2287,7 +2287,8 @@ int clear_page_dirty_for_io(struct page *page)
2287 2287
2288 BUG_ON(!PageLocked(page)); 2288 BUG_ON(!PageLocked(page));
2289 2289
2290 if (mapping && mapping_cap_account_dirty(mapping)) { 2290 //if (mapping && mapping_cap_account_dirty(mapping)) {
2291 if (mapping) {
2291 /* 2292 /*
2292 * Yes, Virginia, this is indeed insane. 2293 * Yes, Virginia, this is indeed insane.
2293 * 2294 *
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5e8eadd71bac..b9b6bef90169 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -582,6 +582,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
582 BUG_ON(!PageLocked(page)); 582 BUG_ON(!PageLocked(page));
583 BUG_ON(mapping != page_mapping(page)); 583 BUG_ON(mapping != page_mapping(page));
584 584
585again:
585 spin_lock_irq(&mapping->tree_lock); 586 spin_lock_irq(&mapping->tree_lock);
586 /* 587 /*
587 * The non racy check for a busy page. 588 * The non racy check for a busy page.
@@ -640,7 +641,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
640 if (reclaimed && page_is_file_cache(page) && 641 if (reclaimed && page_is_file_cache(page) &&
641 !mapping_exiting(mapping)) 642 !mapping_exiting(mapping))
642 shadow = workingset_eviction(mapping, page); 643 shadow = workingset_eviction(mapping, page);
643 __delete_from_page_cache(page, shadow); 644 if (PageReplicated(page)) {
645 if (reclaim_replicated_page(mapping, page))
646 goto again;
647 } else
648 __delete_from_page_cache(page, shadow);
644 spin_unlock_irq(&mapping->tree_lock); 649 spin_unlock_irq(&mapping->tree_lock);
645 650
646 if (freepage != NULL) 651 if (freepage != NULL)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4f5cd974e11a..6af8ea00cbef 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -708,6 +708,7 @@ const char * const vmstat_text[] = {
708 "nr_anon_pages", 708 "nr_anon_pages",
709 "nr_mapped", 709 "nr_mapped",
710 "nr_file_pages", 710 "nr_file_pages",
711 "nr_repl_pages",
711 "nr_dirty", 712 "nr_dirty",
712 "nr_writeback", 713 "nr_writeback",
713 "nr_slab_reclaimable", 714 "nr_slab_reclaimable",
@@ -760,6 +761,9 @@ const char * const vmstat_text[] = {
760 "pgfault", 761 "pgfault",
761 "pgmajfault", 762 "pgmajfault",
762 763
764 "pgreplicated",
765 "pgreplicazap",
766
763 TEXTS_FOR_ZONES("pgrefill") 767 TEXTS_FOR_ZONES("pgrefill")
764 TEXTS_FOR_ZONES("pgsteal_kswapd") 768 TEXTS_FOR_ZONES("pgsteal_kswapd")
765 TEXTS_FOR_ZONES("pgsteal_direct") 769 TEXTS_FOR_ZONES("pgsteal_direct")