aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/irq.c4
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/page-flags.h6
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/litmus/replicate_lib.h5
-rw-r--r--init/main.c1
-rw-r--r--litmus/bank_proc.c7
-rw-r--r--litmus/cache_proc.c26
-rw-r--r--litmus/litmus.c103
-rw-r--r--litmus/polling_reservations.c2
-rw-r--r--litmus/reservation.c4
-rw-r--r--litmus/sched_mc2.c4
-rw-r--r--mm/Makefile2
-rw-r--r--mm/debug.c1
-rw-r--r--mm/filemap.c114
-rw-r--r--mm/internal.h10
-rw-r--r--mm/memory.c19
-rw-r--r--mm/migrate.c127
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c7
-rw-r--r--mm/vmstat.c4
24 files changed, 77 insertions, 385 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 350f188c92d2..720b45e232f2 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -44,6 +44,8 @@
44#include <asm/mach/irq.h> 44#include <asm/mach/irq.h>
45#include <asm/mach/time.h> 45#include <asm/mach/time.h>
46 46
47#include <litmus/cache_proc.h>
48
47unsigned long irq_err_count; 49unsigned long irq_err_count;
48 50
49int arch_show_interrupts(struct seq_file *p, int prec) 51int arch_show_interrupts(struct seq_file *p, int prec)
@@ -66,7 +68,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
66 */ 68 */
67void handle_IRQ(unsigned int irq, struct pt_regs *regs) 69void handle_IRQ(unsigned int irq, struct pt_regs *regs)
68{ 70{
71 enter_irq_mode();
69 __handle_domain_irq(NULL, irq, false, regs); 72 __handle_domain_irq(NULL, irq, false, regs);
73 exit_irq_mode();
70} 74}
71 75
72/* 76/*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 55df1f8bf4cb..0755b9fd03a7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2200,7 +2200,5 @@ void __init setup_nr_node_ids(void);
2200static inline void setup_nr_node_ids(void) {} 2200static inline void setup_nr_node_ids(void) {}
2201#endif 2201#endif
2202 2202
2203extern void replication_init(void);
2204
2205#endif /* __KERNEL__ */ 2203#endif /* __KERNEL__ */
2206#endif /* _LINUX_MM_H */ 2204#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index e8011e019864..8d37e26a1007 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,7 +12,6 @@
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/uprobes.h> 13#include <linux/uprobes.h>
14#include <linux/page-flags-layout.h> 14#include <linux/page-flags-layout.h>
15#include <linux/radix-tree.h>
16#include <asm/page.h> 15#include <asm/page.h>
17#include <asm/mmu.h> 16#include <asm/mmu.h>
18 17
@@ -546,10 +545,4 @@ typedef struct {
546 unsigned long val; 545 unsigned long val;
547} swp_entry_t; 546} swp_entry_t;
548 547
549struct pcache_desc {
550 struct page *master;
551 cpumask_t cpus_present;
552 struct radix_tree_root page_tree;
553};
554
555#endif /* _LINUX_MM_TYPES_H */ 548#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index abc63c255d44..54d74f6eb233 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -126,7 +126,6 @@ enum zone_stat_item {
126 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 126 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
127 only modified from process context */ 127 only modified from process context */
128 NR_FILE_PAGES, 128 NR_FILE_PAGES,
129 NR_REPL_PAGES,
130 NR_FILE_DIRTY, 129 NR_FILE_DIRTY,
131 NR_WRITEBACK, 130 NR_WRITEBACK,
132 NR_SLAB_RECLAIMABLE, 131 NR_SLAB_RECLAIMABLE,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 8b0d7723f3c9..f34e040b34e9 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -97,7 +97,6 @@ enum pageflags {
97 PG_reclaim, /* To be reclaimed asap */ 97 PG_reclaim, /* To be reclaimed asap */
98 PG_swapbacked, /* Page is backed by RAM/swap */ 98 PG_swapbacked, /* Page is backed by RAM/swap */
99 PG_unevictable, /* Page is "unevictable" */ 99 PG_unevictable, /* Page is "unevictable" */
100 PG_replicated, /* Page is replicated pagecache */
101#ifdef CONFIG_MMU 100#ifdef CONFIG_MMU
102 PG_mlocked, /* Page is vma mlocked */ 101 PG_mlocked, /* Page is vma mlocked */
103#endif 102#endif
@@ -290,11 +289,6 @@ PAGEFLAG_FALSE(HWPoison)
290#define __PG_HWPOISON 0 289#define __PG_HWPOISON 0
291#endif 290#endif
292 291
293#define PageReplicated(page) test_bit(PG_replicated, &(page)->flags)
294#define __SetPageReplicated(page) do { BUG_ON(PageDirty(page) || PageWriteback(page)); __set_bit(PG_replicated, &(page)->flags); } while (0)
295#define SetPageReplicated(page) do { BUG_ON(PageDirty(page) || PageWriteback(page)); set_bit(PG_replicated, &(page)->flags); } while (0)
296#define ClearPageReplicated(page) clear_bit(PG_replicated, &(page)->flags)
297
298/* 292/*
299 * On an anonymous page mapped into a user virtual memory area, 293 * On an anonymous page mapped into a user virtual memory area,
300 * page->mapping points to its anon_vma, not to a struct address_space; 294 * page->mapping points to its anon_vma, not to a struct address_space;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 62820318d8ad..9246d32dc973 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -25,7 +25,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
25 FOR_ALL_ZONES(PGALLOC), 25 FOR_ALL_ZONES(PGALLOC),
26 PGFREE, PGACTIVATE, PGDEACTIVATE, 26 PGFREE, PGACTIVATE, PGDEACTIVATE,
27 PGFAULT, PGMAJFAULT, 27 PGFAULT, PGMAJFAULT,
28 PGREPLICATED, PGREPLICAZAP,
29 FOR_ALL_ZONES(PGREFILL), 28 FOR_ALL_ZONES(PGREFILL),
30 FOR_ALL_ZONES(PGSTEAL_KSWAPD), 29 FOR_ALL_ZONES(PGSTEAL_KSWAPD),
31 FOR_ALL_ZONES(PGSTEAL_DIRECT), 30 FOR_ALL_ZONES(PGSTEAL_DIRECT),
diff --git a/include/litmus/replicate_lib.h b/include/litmus/replicate_lib.h
index 773468497416..186837be08a5 100644
--- a/include/litmus/replicate_lib.h
+++ b/include/litmus/replicate_lib.h
@@ -5,11 +5,12 @@
5#include <linux/mm_types.h> 5#include <linux/mm_types.h>
6#include <linux/mm_inline.h> 6#include <linux/mm_inline.h>
7 7
8/* Data structure for the "master" list */
8struct shared_lib_page { 9struct shared_lib_page {
9 struct page *master_page; 10 struct page *master_page;
10 struct page *r_page[NR_CPUS]; 11 struct page *r_page[NR_CPUS+1];
11 unsigned long int master_pfn; 12 unsigned long int master_pfn;
12 unsigned long int r_pfn[NR_CPUS]; 13 unsigned long int r_pfn[NR_CPUS+1];
13 struct list_head list; 14 struct list_head list;
14}; 15};
15 16
diff --git a/init/main.c b/init/main.c
index 88917d93fbe4..2a89545e0a5d 100644
--- a/init/main.c
+++ b/init/main.c
@@ -628,7 +628,6 @@ asmlinkage __visible void __init start_kernel(void)
628 kmemleak_init(); 628 kmemleak_init();
629 setup_per_cpu_pageset(); 629 setup_per_cpu_pageset();
630 numa_policy_init(); 630 numa_policy_init();
631 replication_init();
632 if (late_time_init) 631 if (late_time_init)
633 late_time_init(); 632 late_time_init();
634 sched_clock_init(); 633 sched_clock_init();
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
index 6103611211ce..df9f5730ed05 100644
--- a/litmus/bank_proc.c
+++ b/litmus/bank_proc.c
@@ -24,10 +24,12 @@
24// This Address Decoding is used in imx6-sabredsd platform 24// This Address Decoding is used in imx6-sabredsd platform
25#define BANK_MASK 0x38000000 25#define BANK_MASK 0x38000000
26#define BANK_SHIFT 27 26#define BANK_SHIFT 27
27
27#define CACHE_MASK 0x0000f000 28#define CACHE_MASK 0x0000f000
28#define CACHE_SHIFT 12 29#define CACHE_SHIFT 12
29 30
30#define PAGES_PER_COLOR 1024 31#define PAGES_PER_COLOR 2000
32#define PAGES_PER_COLOR_HALF 1000
31unsigned int NUM_PAGE_LIST; //8*16 33unsigned int NUM_PAGE_LIST; //8*16
32 34
33unsigned int number_banks; 35unsigned int number_banks;
@@ -245,7 +247,8 @@ static int do_add_pages(void)
245 counter[color]++; 247 counter[color]++;
246 // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); 248 // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
247 //show_nr_pages(); 249 //show_nr_pages();
248 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) { 250 //if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) {
251 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) {
249 //if ( PAGES_PER_COLOR && color>=16*2) { 252 //if ( PAGES_PER_COLOR && color>=16*2) {
250 add_page_to_color_list(page); 253 add_page_to_color_list(page);
251 // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); 254 // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page));
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
index 49e98f6ed86a..87077d4366dc 100644
--- a/litmus/cache_proc.c
+++ b/litmus/cache_proc.c
@@ -357,17 +357,9 @@ int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
357 writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + 357 writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
358 i * L2X0_LOCKDOWN_STRIDE); 358 i * L2X0_LOCKDOWN_STRIDE);
359 } 359 }
360/* 360
361 for (i = 0; i < nr_lockregs; i++) {
362 barrier();
363 mem_lock(UNLOCK_ALL, i);
364 barrier();
365 //writel_relaxed(nr_unlocked_way[16], ld_d_reg(i));
366 //writel_relaxed(nr_unlocked_way[16], ld_i_reg(i));
367 }
368*/
369 } 361 }
370 printk("LOCK_ALL HANDLER\n"); 362
371 local_irq_save(flags); 363 local_irq_save(flags);
372 print_lockdown_registers(smp_processor_id()); 364 print_lockdown_registers(smp_processor_id());
373 l2c310_flush_all(); 365 l2c310_flush_all();
@@ -379,9 +371,6 @@ out:
379 371
380void cache_lockdown(u32 lock_val, int cpu) 372void cache_lockdown(u32 lock_val, int cpu)
381{ 373{
382 //unsigned long flags;
383 //raw_spin_lock_irqsave(&cache_lock, flags);
384
385 __asm__ __volatile__ ( 374 __asm__ __volatile__ (
386" str %[lockval], [%[dcachereg]]\n" 375" str %[lockval], [%[dcachereg]]\n"
387" str %[lockval], [%[icachereg]]\n" 376" str %[lockval], [%[icachereg]]\n"
@@ -390,8 +379,6 @@ void cache_lockdown(u32 lock_val, int cpu)
390 [icachereg] "r" (ld_i_reg(cpu)), 379 [icachereg] "r" (ld_i_reg(cpu)),
391 [lockval] "r" (lock_val) 380 [lockval] "r" (lock_val)
392 : "cc"); 381 : "cc");
393
394 //raw_spin_unlock_irqrestore(&cache_lock, flags);
395} 382}
396 383
397void do_partition(enum crit_level lv, int cpu) 384void do_partition(enum crit_level lv, int cpu)
@@ -421,14 +408,12 @@ void do_partition(enum crit_level lv, int cpu)
421 408
422 } 409 }
423 barrier(); 410 barrier();
424 //cache_lockdown(regs, cpu); 411
425 writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE); 412 writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
426 writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE); 413 writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
427 barrier(); 414 barrier();
428 415
429 raw_spin_unlock_irqrestore(&cache_lock, flags); 416 raw_spin_unlock_irqrestore(&cache_lock, flags);
430
431 flush_cache(0);
432} 417}
433 418
434void lock_cache(int cpu, u32 val) 419void lock_cache(int cpu, u32 val)
@@ -544,7 +529,6 @@ void inline enter_irq_mode(void)
544 529
545 if (os_isolation == 0) 530 if (os_isolation == 0)
546 return; 531 return;
547
548 prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu)); 532 prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
549 prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu)); 533 prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
550 534
@@ -1011,7 +995,6 @@ int setup_flusher_array(void)
1011 ret = -EINVAL; 995 ret = -EINVAL;
1012 goto out; 996 goto out;
1013 } 997 }
1014
1015 for (way = 0; way < MAX_NR_WAYS; way++) { 998 for (way = 0; way < MAX_NR_WAYS; way++) {
1016 void **flusher_color_arr; 999 void **flusher_color_arr;
1017 flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages) 1000 flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
@@ -1023,7 +1006,7 @@ int setup_flusher_array(void)
1023 } 1006 }
1024 1007
1025 flusher_pages[way] = flusher_color_arr; 1008 flusher_pages[way] = flusher_color_arr;
1026 1009 /* This is ugly. */
1027 for (color = 0; color < MAX_NR_COLORS; color++) { 1010 for (color = 0; color < MAX_NR_COLORS; color++) {
1028 int node; 1011 int node;
1029 switch (color) { 1012 switch (color) {
@@ -1090,6 +1073,7 @@ int setup_flusher_array(void)
1090 } 1073 }
1091 } 1074 }
1092 } 1075 }
1076
1093out: 1077out:
1094 return ret; 1078 return ret;
1095out_free: 1079out_free:
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 11e4c5da9c10..1f5e49114b2c 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -352,28 +352,32 @@ extern int isolate_lru_page(struct page *page);
352extern void putback_movable_page(struct page *page); 352extern void putback_movable_page(struct page *page);
353extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x); 353extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x);
354 354
355DECLARE_PER_CPU(struct list_head, shared_lib_page_list);
356#define INVALID_PFN (0xffffffff) 355#define INVALID_PFN (0xffffffff)
357LIST_HEAD(shared_lib_pages); 356LIST_HEAD(shared_lib_pages);
358//struct list_head shared_lib_pages = LIST_HEAD_INIT(shared_lib_pages); 357
359EXPORT_SYMBOL(shared_lib_pages); 358EXPORT_SYMBOL(shared_lib_pages);
360 359
360/* Reallocate pages of a task
361 * Private pages - Migrate to a new page.
362 * Shared pages - Use a replica. Make a replica if necessary.
363 * @cpu : CPU id of the calling task
364 * returns the number of pages that is not moved.
365 */
361asmlinkage long sys_set_page_color(int cpu) 366asmlinkage long sys_set_page_color(int cpu)
362{ 367{
363 long ret = 0; 368 long ret = 0;
364 //struct page *page_itr = NULL;
365 struct vm_area_struct *vma_itr = NULL; 369 struct vm_area_struct *vma_itr = NULL;
366 int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0; 370 int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0;
367 unsigned long node; 371 unsigned long node;
368 enum crit_level lv; 372 enum crit_level lv;
369 struct mm_struct *mm; 373 struct mm_struct *mm;
370 //struct list_head *shared_pagelist = this_cpu_ptr(&shared_lib_page_list);
371 374
372 LIST_HEAD(pagelist); 375 LIST_HEAD(pagelist);
373 LIST_HEAD(task_shared_pagelist); 376 LIST_HEAD(task_shared_pagelist);
374 377
375 migrate_prep(); 378 migrate_prep();
376 379
380 /* Find the current mm_struct */
377 rcu_read_lock(); 381 rcu_read_lock();
378 get_task_struct(current); 382 get_task_struct(current);
379 rcu_read_unlock(); 383 rcu_read_unlock();
@@ -383,16 +387,14 @@ asmlinkage long sys_set_page_color(int cpu)
383 down_read(&mm->mmap_sem); 387 down_read(&mm->mmap_sem);
384 TRACE_TASK(current, "SYSCALL set_page_color\n"); 388 TRACE_TASK(current, "SYSCALL set_page_color\n");
385 vma_itr = mm->mmap; 389 vma_itr = mm->mmap;
390 /* Iterate all vm_area_struct */
386 while (vma_itr != NULL) { 391 while (vma_itr != NULL) {
387 unsigned int num_pages = 0, i; 392 unsigned int num_pages = 0, i;
388 struct page *old_page = NULL; 393 struct page *old_page = NULL;
389 int pages_in_vma = 0; 394 int pages_in_vma = 0;
390 395
391 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; 396 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
392 // print vma flags 397 /* Traverse all pages in vm_area_struct */
393 //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags);
394 //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE);
395 //printk(KERN_INFO "vm_page_prot: 0x%lx\n", vma_itr->vm_page_prot);
396 for (i = 0; i < num_pages; i++) { 398 for (i = 0; i < num_pages; i++) {
397 old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); 399 old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT);
398 400
@@ -410,14 +412,13 @@ asmlinkage long sys_set_page_color(int cpu)
410 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); 412 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-");
411 pages_in_vma++; 413 pages_in_vma++;
412 414
413// for simple debug 415 /* Conditions for replicable pages */
414 if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) { 416 if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) {
415 //if (page_count(old_page) < 10 && page_count(old_page) > 3 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) {
416 struct shared_lib_page *lib_page; 417 struct shared_lib_page *lib_page;
417 int is_exist = 0; 418 int is_exist = 0;
418 419
419 /* update PSL list */ 420 /* Update PSL (Per-core shared library (master)) list */
420 /* check if this page is in the PSL list */ 421 /* Check if this page is in the PSL list */
421 rcu_read_lock(); 422 rcu_read_lock();
422 list_for_each_entry(lib_page, &shared_lib_pages, list) 423 list_for_each_entry(lib_page, &shared_lib_pages, list)
423 { 424 {
@@ -432,10 +433,8 @@ asmlinkage long sys_set_page_color(int cpu)
432 int cpu_i; 433 int cpu_i;
433 lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL); 434 lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL);
434 lib_page->master_page = old_page; 435 lib_page->master_page = old_page;
435 //lib_page->r_page = NULL;
436 lib_page->master_pfn = page_to_pfn(old_page); 436 lib_page->master_pfn = page_to_pfn(old_page);
437 //lib_page->r_pfn = INVALID_PFN; 437 for (cpu_i = 0; cpu_i < NR_CPUS+1; cpu_i++) {
438 for (cpu_i = 0; cpu_i < NR_CPUS; cpu_i++) {
439 lib_page->r_page[cpu_i] = NULL; 438 lib_page->r_page[cpu_i] = NULL;
440 lib_page->r_pfn[cpu_i] = INVALID_PFN; 439 lib_page->r_pfn[cpu_i] = INVALID_PFN;
441 } 440 }
@@ -452,9 +451,8 @@ asmlinkage long sys_set_page_color(int cpu)
452 list_add_tail(&old_page->lru, &task_shared_pagelist); 451 list_add_tail(&old_page->lru, &task_shared_pagelist);
453 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); 452 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
454 nr_shared_pages++; 453 nr_shared_pages++;
455 TRACE_TASK(current, "SHARED isolate_lru_page success\n");
456 } else { 454 } else {
457 TRACE_TASK(current, "SHARED isolate_lru_page failed\n"); 455 TRACE_TASK(current, "isolate_lru_page for a shared page failed\n");
458 nr_failed++; 456 nr_failed++;
459 } 457 }
460 put_page(old_page); 458 put_page(old_page);
@@ -466,34 +464,28 @@ asmlinkage long sys_set_page_color(int cpu)
466 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); 464 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
467 nr_pages++; 465 nr_pages++;
468 } else { 466 } else {
469 TRACE_TASK(current, "isolate_lru_page failed\n"); 467 TRACE_TASK(current, "isolate_lru_page for a private page failed\n");
470 nr_failed++; 468 nr_failed++;
471 } 469 }
472 //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page));
473 put_page(old_page); 470 put_page(old_page);
474 //TRACE_TASK(current, "PRIVATE\n");
475 } 471 }
476 } 472 }
477 TRACE_TASK(current, "PAGES_IN_VMA = %d size = %d KB\n", pages_in_vma, pages_in_vma*4); 473 TRACE_TASK(current, "PAGES_IN_VMA = %d size = %d KB\n", pages_in_vma, pages_in_vma*4);
478 vma_itr = vma_itr->vm_next; 474 vma_itr = vma_itr->vm_next;
479 } 475 }
480
481 //list_for_each_entry(page_itr, &pagelist, lru) {
482// printk(KERN_INFO "B _mapcount = %d, _count = %d\n", page_mapcount(page_itr), page_count(page_itr));
483// }
484 476
485 ret = 0; 477 ret = 0;
486 if (!is_realtime(current)) 478 if (!is_realtime(current))
487 lv = 1; 479 node = 8;
488 else { 480 else {
489 lv = tsk_rt(current)->mc2_data->crit; 481 lv = tsk_rt(current)->mc2_data->crit;
482 if (cpu == -1)
483 node = 8;
484 else
485 node = cpu*2 + lv;
490 } 486 }
491 487
492 if (cpu == -1) 488 /* Migrate private pages */
493 node = 8;
494 else
495 node = cpu*2 + lv;
496
497 if (!list_empty(&pagelist)) { 489 if (!list_empty(&pagelist)) {
498 ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); 490 ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
499 TRACE_TASK(current, "%ld pages not migrated.\n", ret); 491 TRACE_TASK(current, "%ld pages not migrated.\n", ret);
@@ -502,52 +494,24 @@ asmlinkage long sys_set_page_color(int cpu)
502 putback_movable_pages(&pagelist); 494 putback_movable_pages(&pagelist);
503 } 495 }
504 } 496 }
505/* 497
506 { 498 /* Replicate shared pages */
507 struct list_head *pos, *q;
508 list_for_each_safe(pos, q, &task_shared_pagelist) {
509 struct page *p_entry = NULL;
510 struct shared_lib_page *lib_desc = NULL;
511
512 p_entry = list_entry(pos, struct page, lru);
513 list_for_each_entry(lib_desc, &shared_lib_pages, list) {
514 if (p_entry == lib_desc->r_page) {
515 list_del(pos);
516 }
517 }
518 }
519 }
520*/
521 if (!list_empty(&task_shared_pagelist)) { 499 if (!list_empty(&task_shared_pagelist)) {
522 if (node != 8) 500 ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
523 ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
524 else
525 ret = nr_shared_pages;
526 TRACE_TASK(current, "%ld shared pages not migrated.\n", ret); 501 TRACE_TASK(current, "%ld shared pages not migrated.\n", ret);
527 nr_not_migrated += ret; 502 nr_not_migrated += ret;
528 if (ret) { 503 if (ret) {
529 putback_movable_pages(&task_shared_pagelist); 504 putback_movable_pages(&task_shared_pagelist);
530 } 505 }
531 } 506 }
532 507
533 /* handle sigpage and litmus ctrl_page */
534/* vma_itr = current->mm->mmap;
535 while (vma_itr != NULL) {
536 if (vma_itr->vm_start == tsk_rt(current)->addr_ctrl_page) {
537 TRACE("litmus ctrl_page = %08x\n", vma_itr->vm_start);
538 vma_itr->vm_page_prot = PAGE_SHARED;
539 break;
540 }
541 vma_itr = vma_itr->vm_next;
542 }
543*/
544 up_read(&mm->mmap_sem); 508 up_read(&mm->mmap_sem);
545 509
546 510 TRACE_TASK(current, "nr_pages = %d nr_failed = %d nr_not_migrated = %d\n", nr_pages, nr_failed, nr_not_migrated);
547 TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed);
548 printk(KERN_INFO "node = %ld, nr_private_pages = %d, nr_shared_pages = %d, nr_failed_to_isolate_lru = %d, nr_not_migrated = %d\n", node, nr_pages, nr_shared_pages, nr_failed, nr_not_migrated); 511 printk(KERN_INFO "node = %ld, nr_private_pages = %d, nr_shared_pages = %d, nr_failed_to_isolate_lru = %d, nr_not_migrated = %d\n", node, nr_pages, nr_shared_pages, nr_failed, nr_not_migrated);
549 512
550 flush_cache(1); 513 flush_cache(1);
514
551/* for debug START */ 515/* for debug START */
552 TRACE_TASK(current, "PSL PAGES\n"); 516 TRACE_TASK(current, "PSL PAGES\n");
553 { 517 {
@@ -556,11 +520,11 @@ asmlinkage long sys_set_page_color(int cpu)
556 rcu_read_lock(); 520 rcu_read_lock();
557 list_for_each_entry(lpage, &shared_lib_pages, list) 521 list_for_each_entry(lpage, &shared_lib_pages, list)
558 { 522 {
559 TRACE_TASK(current, "master_PFN = %05lx r_PFN = %05lx, %05lx, %05lx, %05lx\n", lpage->master_pfn, lpage->r_pfn[0], lpage->r_pfn[1], lpage->r_pfn[2], lpage->r_pfn[3]); 523 TRACE_TASK(current, "master_PFN = %05lx r_PFN = %05lx, %05lx, %05lx, %05lx, %05lx\n", lpage->master_pfn, lpage->r_pfn[0], lpage->r_pfn[1], lpage->r_pfn[2], lpage->r_pfn[3], lpage->r_pfn[4]);
560 } 524 }
561 rcu_read_unlock(); 525 rcu_read_unlock();
562 } 526 }
563 527#if 0
564 TRACE_TASK(current, "AFTER migration\n"); 528 TRACE_TASK(current, "AFTER migration\n");
565 down_read(&mm->mmap_sem); 529 down_read(&mm->mmap_sem);
566 vma_itr = mm->mmap; 530 vma_itr = mm->mmap;
@@ -595,8 +559,9 @@ asmlinkage long sys_set_page_color(int cpu)
595 } 559 }
596 up_read(&mm->mmap_sem); 560 up_read(&mm->mmap_sem);
597/* for debug FIN. */ 561/* for debug FIN. */
562#endif
598 563
599 return ret; 564 return nr_not_migrated;
600} 565}
601 566
602/* sys_test_call() is a test system call for developing */ 567/* sys_test_call() is a test system call for developing */
@@ -644,7 +609,7 @@ asmlinkage long sys_test_call(unsigned int param)
644 continue; 609 continue;
645 } 610 }
646 611
647 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); 612 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping));
648 put_page(old_page); 613 put_page(old_page);
649 } 614 }
650 vma_itr = vma_itr->vm_next; 615 vma_itr = vma_itr->vm_next;
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c
index 4a2fee575127..06bc1f5b9267 100644
--- a/litmus/polling_reservations.c
+++ b/litmus/polling_reservations.c
@@ -4,6 +4,8 @@
4#include <litmus/reservation.h> 4#include <litmus/reservation.h>
5#include <litmus/polling_reservations.h> 5#include <litmus/polling_reservations.h>
6 6
7#define TRACE(fmt, args...) do {} while (false)
8#define TRACE_TASK(fmt, args...) do {} while (false)
7 9
8static void periodic_polling_client_arrives( 10static void periodic_polling_client_arrives(
9 struct reservation* res, 11 struct reservation* res,
diff --git a/litmus/reservation.c b/litmus/reservation.c
index 07e38cb7d138..cdda89d4208f 100644
--- a/litmus/reservation.c
+++ b/litmus/reservation.c
@@ -4,8 +4,8 @@
4#include <litmus/litmus.h> 4#include <litmus/litmus.h>
5#include <litmus/reservation.h> 5#include <litmus/reservation.h>
6 6
7//#define TRACE(fmt, args...) do {} while (false) 7#define TRACE(fmt, args...) do {} while (false)
8//#define TRACE_TASK(fmt, args...) do {} while (false) 8#define TRACE_TASK(fmt, args...) do {} while (false)
9 9
10#define BUDGET_ENFORCEMENT_AT_C 0 10#define BUDGET_ENFORCEMENT_AT_C 0
11 11
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 5c88a36aacec..588f78e2107f 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -27,8 +27,8 @@
27#include <litmus/reservation.h> 27#include <litmus/reservation.h>
28#include <litmus/polling_reservations.h> 28#include <litmus/polling_reservations.h>
29 29
30//#define TRACE(fmt, args...) do {} while (false) 30#define TRACE(fmt, args...) do {} while (false)
31//#define TRACE_TASK(fmt, args...) do {} while (false) 31#define TRACE_TASK(fmt, args...) do {} while (false)
32 32
33#define BUDGET_ENFORCEMENT_AT_C 0 33#define BUDGET_ENFORCEMENT_AT_C 0
34 34
diff --git a/mm/Makefile b/mm/Makefile
index 98d28edd36a5..98c4eaeabdcb 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -56,7 +56,7 @@ obj-$(CONFIG_KASAN) += kasan/
56obj-$(CONFIG_FAILSLAB) += failslab.o 56obj-$(CONFIG_FAILSLAB) += failslab.o
57obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 57obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
58obj-$(CONFIG_MEMTEST) += memtest.o 58obj-$(CONFIG_MEMTEST) += memtest.o
59obj-$(CONFIG_MIGRATION) += migrate.o replication.o 59obj-$(CONFIG_MIGRATION) += migrate.o
60obj-$(CONFIG_QUICKLIST) += quicklist.o 60obj-$(CONFIG_QUICKLIST) += quicklist.o
61obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o 61obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
62obj-$(CONFIG_PAGE_COUNTER) += page_counter.o 62obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
diff --git a/mm/debug.c b/mm/debug.c
index dbc3ea81dde7..3eb3ac2fcee7 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -36,7 +36,6 @@ static const struct trace_print_flags pageflag_names[] = {
36 {1UL << PG_reclaim, "reclaim" }, 36 {1UL << PG_reclaim, "reclaim" },
37 {1UL << PG_swapbacked, "swapbacked" }, 37 {1UL << PG_swapbacked, "swapbacked" },
38 {1UL << PG_unevictable, "unevictable" }, 38 {1UL << PG_unevictable, "unevictable" },
39 {1UL << PG_replicated, "replicated" },
40#ifdef CONFIG_MMU 39#ifdef CONFIG_MMU
41 {1UL << PG_mlocked, "mlocked" }, 40 {1UL << PG_mlocked, "mlocked" },
42#endif 41#endif
diff --git a/mm/filemap.c b/mm/filemap.c
index 93853e337f07..6bf5e42d560a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -36,9 +36,6 @@
36#include <linux/rmap.h> 36#include <linux/rmap.h>
37#include "internal.h" 37#include "internal.h"
38 38
39#include <litmus/litmus.h>
40#include <litmus/mc2_common.h>
41
42#define CREATE_TRACE_POINTS 39#define CREATE_TRACE_POINTS
43#include <trace/events/filemap.h> 40#include <trace/events/filemap.h>
44 41
@@ -976,18 +973,6 @@ repeat:
976 page = radix_tree_deref_slot(pagep); 973 page = radix_tree_deref_slot(pagep);
977 if (unlikely(!page)) 974 if (unlikely(!page))
978 goto out; 975 goto out;
979 if (is_pcache_desc(page)) {
980 struct pcache_desc *pcd;
981printk(KERN_INFO "PCACHE_DESC\n");
982 pcd = ptr_to_pcache_desc(page);
983 page = pcd->master;
984 page_cache_get_speculative(page);
985
986 unreplicate_pcache(mapping, page->index, 0);
987
988 goto out;
989 }
990
991 if (radix_tree_exception(page)) { 976 if (radix_tree_exception(page)) {
992 if (radix_tree_deref_retry(page)) 977 if (radix_tree_deref_retry(page))
993 goto repeat; 978 goto repeat;
@@ -1186,20 +1171,6 @@ repeat:
1186 page = radix_tree_deref_slot(slot); 1171 page = radix_tree_deref_slot(slot);
1187 if (unlikely(!page)) 1172 if (unlikely(!page))
1188 continue; 1173 continue;
1189
1190 if (is_pcache_desc(page)) {
1191 struct pcache_desc *pcd;
1192 printk(KERN_INFO "PCACHE_DESC\n");
1193
1194 pcd = ptr_to_pcache_desc(page);
1195 page = pcd->master;
1196 page_cache_get_speculative(page);
1197
1198 unreplicate_pcache(mapping, page->index, 0);
1199
1200 goto export;
1201 }
1202
1203 if (radix_tree_exception(page)) { 1174 if (radix_tree_exception(page)) {
1204 if (radix_tree_deref_retry(page)) 1175 if (radix_tree_deref_retry(page))
1205 goto restart; 1176 goto restart;
@@ -1263,20 +1234,6 @@ repeat:
1263 if (unlikely(!page)) 1234 if (unlikely(!page))
1264 continue; 1235 continue;
1265 1236
1266 if (is_pcache_desc(page)) {
1267 struct pcache_desc *pcd;
1268
1269 printk(KERN_INFO "PCACHE_DESC\n");
1270
1271 pcd = ptr_to_pcache_desc(page);
1272 page = pcd->master;
1273 page_cache_get_speculative(page);
1274
1275 unreplicate_pcache(mapping, page->index, 0);
1276
1277 goto export;
1278 }
1279
1280 if (radix_tree_exception(page)) { 1237 if (radix_tree_exception(page)) {
1281 if (radix_tree_deref_retry(page)) { 1238 if (radix_tree_deref_retry(page)) {
1282 /* 1239 /*
@@ -1304,7 +1261,6 @@ repeat:
1304 goto repeat; 1261 goto repeat;
1305 } 1262 }
1306 1263
1307export:
1308 pages[ret] = page; 1264 pages[ret] = page;
1309 if (++ret == nr_pages) 1265 if (++ret == nr_pages)
1310 break; 1266 break;
@@ -1346,20 +1302,6 @@ repeat:
1346 if (unlikely(!page)) 1302 if (unlikely(!page))
1347 break; 1303 break;
1348 1304
1349 if (is_pcache_desc(page)) {
1350 struct pcache_desc *pcd;
1351
1352 printk(KERN_INFO "PCACHE_DESC\n");
1353
1354 pcd = ptr_to_pcache_desc(page);
1355 page = pcd->master;
1356 page_cache_get_speculative(page);
1357
1358 unreplicate_pcache(mapping, page->index, 0);
1359
1360 goto export;
1361 }
1362
1363 if (radix_tree_exception(page)) { 1305 if (radix_tree_exception(page)) {
1364 if (radix_tree_deref_retry(page)) { 1306 if (radix_tree_deref_retry(page)) {
1365 /* 1307 /*
@@ -1385,7 +1327,7 @@ repeat:
1385 page_cache_release(page); 1327 page_cache_release(page);
1386 goto repeat; 1328 goto repeat;
1387 } 1329 }
1388export: 1330
1389 /* 1331 /*
1390 * must check mapping and index after taking the ref. 1332 * must check mapping and index after taking the ref.
1391 * otherwise we can get both false positives and false 1333 * otherwise we can get both false positives and false
@@ -1436,20 +1378,6 @@ repeat:
1436 if (unlikely(!page)) 1378 if (unlikely(!page))
1437 continue; 1379 continue;
1438 1380
1439 if (is_pcache_desc(page)) {
1440 struct pcache_desc *pcd;
1441
1442 printk(KERN_INFO "PCACHE_DESC BUG!!!!!!!!!!\n");
1443
1444 pcd = ptr_to_pcache_desc(page);
1445 page = pcd->master;
1446 page_cache_get_speculative(page);
1447
1448 unreplicate_pcache(mapping, page->index, 0);
1449
1450 goto export;
1451 }
1452
1453 if (radix_tree_exception(page)) { 1381 if (radix_tree_exception(page)) {
1454 if (radix_tree_deref_retry(page)) { 1382 if (radix_tree_deref_retry(page)) {
1455 /* 1383 /*
@@ -1481,7 +1409,7 @@ repeat:
1481 page_cache_release(page); 1409 page_cache_release(page);
1482 goto repeat; 1410 goto repeat;
1483 } 1411 }
1484export: 1412
1485 pages[ret] = page; 1413 pages[ret] = page;
1486 if (++ret == nr_pages) 1414 if (++ret == nr_pages)
1487 break; 1415 break;
@@ -1557,11 +1485,7 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1557 1485
1558 cond_resched(); 1486 cond_resched();
1559find_page: 1487find_page:
1560 if (is_realtime(current)) 1488 page = find_get_page(mapping, index);
1561 page = find_get_page_readonly(mapping, index);
1562 else
1563 page = find_get_page(mapping, index);
1564
1565 if (!page) { 1489 if (!page) {
1566 page_cache_sync_readahead(mapping, 1490 page_cache_sync_readahead(mapping,
1567 ra, filp, 1491 ra, filp,
@@ -1713,8 +1637,7 @@ readpage:
1713 unlock_page(page); 1637 unlock_page(page);
1714 } 1638 }
1715 1639
1716 page_cache_release(page); 1640 goto page_ok;
1717 goto find_page;
1718 1641
1719readpage_error: 1642readpage_error:
1720 /* UHHUH! A synchronous read error occurred. Report it */ 1643 /* UHHUH! A synchronous read error occurred. Report it */
@@ -1958,11 +1881,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1958 /* 1881 /*
1959 * Do we have something in the page cache already? 1882 * Do we have something in the page cache already?
1960 */ 1883 */
1961 if ((vmf->flags & FAULT_FLAG_WRITE) || !is_realtime(current)) 1884 page = find_get_page(mapping, offset);
1962 page = find_get_page(mapping, offset);
1963 else
1964 page = find_get_page_readonly(mapping, offset);
1965
1966 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1885 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1967 /* 1886 /*
1968 * We found the page, so try async readahead before 1887 * We found the page, so try async readahead before
@@ -1976,10 +1895,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1976 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1895 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1977 ret = VM_FAULT_MAJOR; 1896 ret = VM_FAULT_MAJOR;
1978retry_find: 1897retry_find:
1979 if ((vmf->flags & FAULT_FLAG_WRITE) || !is_realtime(current)) 1898 page = find_get_page(mapping, offset);
1980 page = find_get_page(mapping, offset);
1981 else
1982 page = find_get_page_readonly(mapping, offset);
1983 if (!page) 1899 if (!page)
1984 goto no_cached_page; 1900 goto no_cached_page;
1985 } 1901 }
@@ -2087,22 +2003,6 @@ repeat:
2087 page = radix_tree_deref_slot(slot); 2003 page = radix_tree_deref_slot(slot);
2088 if (unlikely(!page)) 2004 if (unlikely(!page))
2089 goto next; 2005 goto next;
2090
2091 if (is_pcache_desc(page)) {
2092 struct pcache_desc *pcd;
2093
2094printk(KERN_INFO "PCACHE_DESC FILE_MAP_PAGES\n");
2095
2096 pcd = ptr_to_pcache_desc(page);
2097 page = pcd->master;
2098 if (!page_cache_get_speculative(page))
2099 goto repeat;
2100
2101 //unreplicate_pcache(mapping, page->index, 0);
2102
2103 goto export;
2104 }
2105
2106 if (radix_tree_exception(page)) { 2006 if (radix_tree_exception(page)) {
2107 if (radix_tree_deref_retry(page)) 2007 if (radix_tree_deref_retry(page))
2108 break; 2008 break;
@@ -2118,7 +2018,7 @@ printk(KERN_INFO "PCACHE_DESC FILE_MAP_PAGES\n");
2118 page_cache_release(page); 2018 page_cache_release(page);
2119 goto repeat; 2019 goto repeat;
2120 } 2020 }
2121export: 2021
2122 if (!PageUptodate(page) || 2022 if (!PageUptodate(page) ||
2123 PageReadahead(page) || 2023 PageReadahead(page) ||
2124 PageHWPoison(page)) 2024 PageHWPoison(page))
diff --git a/mm/internal.h b/mm/internal.h
index ccc349b59d00..a25e359a4039 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -433,14 +433,4 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
433#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ 433#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
434#define ALLOC_FAIR 0x100 /* fair zone allocation */ 434#define ALLOC_FAIR 0x100 /* fair zone allocation */
435 435
436extern int reclaim_replicated_page(struct address_space *mapping,
437 struct page *page);
438extern struct page *find_get_page_readonly(struct address_space *mapping,
439 unsigned long offset);
440extern int is_pcache_desc(void *ptr);
441extern struct pcache_desc *ptr_to_pcache_desc(void *ptr);
442extern void *pcache_desc_to_ptr(struct pcache_desc *pcd);
443extern void unreplicate_pcache(struct address_space *mapping, unsigned long offset, int locked);
444int page_write_fault_retry(struct page *page);
445
446#endif /* __MM_INTERNAL_H */ 436#endif /* __MM_INTERNAL_H */
diff --git a/mm/memory.c b/mm/memory.c
index 1fc358bec6d5..22e037e3364e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2231,24 +2231,15 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2231 * read-only shared pages can get COWed by 2231 * read-only shared pages can get COWed by
2232 * get_user_pages(.write=1, .force=1). 2232 * get_user_pages(.write=1, .force=1).
2233 */ 2233 */
2234// if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 2234 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2235 {
2236 int tmp; 2235 int tmp;
2237 2236
2238 pte_unmap_unlock(page_table, ptl); 2237 pte_unmap_unlock(page_table, ptl);
2239 2238 tmp = do_page_mkwrite(vma, old_page, address);
2240 if (page_write_fault_retry(old_page)) { 2239 if (unlikely(!tmp || (tmp &
2240 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2241 page_cache_release(old_page); 2241 page_cache_release(old_page);
2242 return 0; 2242 return tmp;
2243 }
2244
2245 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2246 tmp = do_page_mkwrite(vma, old_page, address);
2247 if (unlikely(!tmp || (tmp &
2248 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2249 page_cache_release(old_page);
2250 return tmp;
2251 }
2252 } 2243 }
2253 /* 2244 /*
2254 * Since we dropped the lock we need to revalidate 2245 * Since we dropped the lock we need to revalidate
diff --git a/mm/migrate.c b/mm/migrate.c
index d25cc2c2736d..a2e9cad083d5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -405,7 +405,6 @@ int replicate_page_move_mapping(struct address_space *mapping,
405 struct buffer_head *head, enum migrate_mode mode, 405 struct buffer_head *head, enum migrate_mode mode,
406 int extra_count) 406 int extra_count)
407{ 407{
408 int expected_count = 1 + extra_count;
409 int prev_count = page_count(page); 408 int prev_count = page_count(page);
410 void **pslot; 409 void **pslot;
411 410
@@ -415,38 +414,6 @@ int replicate_page_move_mapping(struct address_space *mapping,
415 414
416 pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); 415 pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page));
417 416
418 expected_count += 1 + page_has_private(page);
419
420 TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page));
421/*
422 if (page_count(page) != expected_count ||
423 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
424 spin_unlock_irq(&mapping->tree_lock);
425 TRACE_TASK(current, "1\n");
426 return -EAGAIN;
427 }
428
429 if (!page_freeze_refs(page, expected_count)) { // if page_count(page) == expected_count, then set page_count = 0
430 spin_unlock_irq(&mapping->tree_lock);
431 TRACE_TASK(current, "2\n");
432 return -EAGAIN;
433 }
434*/
435 /*
436 * In the async migration case of moving a page with buffers, lock the
437 * buffers using trylock before the mapping is moved. If the mapping
438 * was moved, we later failed to lock the buffers and could not move
439 * the mapping back due to an elevated page count, we would have to
440 * block waiting on other references to be dropped.
441 */
442/* if (mode == MIGRATE_ASYNC && head &&
443 !buffer_migrate_lock_buffers(head, mode)) {
444 page_unfreeze_refs(page, expected_count);
445 spin_unlock_irq(&mapping->tree_lock);
446 TRACE_TASK(current, "3\n");
447 return -EAGAIN;
448 }
449*/
450 /* 417 /*
451 * Now we know that no one else is looking at the page. 418 * Now we know that no one else is looking at the page.
452 */ 419 */
@@ -456,15 +423,11 @@ int replicate_page_move_mapping(struct address_space *mapping,
456 set_page_private(newpage, page_private(page)); 423 set_page_private(newpage, page_private(page));
457 } 424 }
458 425
459 //radix_tree_replace_slot(pslot, newpage);
460 //radix_tree_replace_slot(pslot, page);
461
462 /* 426 /*
463 * Drop cache reference from old page by unfreezing 427 * Drop cache reference from old page by unfreezing
464 * to one less reference. 428 * to the previous reference.
465 * We know this isn't the last reference. 429 * We know this isn't the last reference.
466 */ 430 */
467 //page_unfreeze_refs(page, expected_count - 1);
468 page_unfreeze_refs(page, prev_count); 431 page_unfreeze_refs(page, prev_count);
469 432
470 /* 433 /*
@@ -702,7 +665,6 @@ void replicate_page_copy(struct page *newpage, struct page *page)
702 */ 665 */
703 if (PageWriteback(newpage)) 666 if (PageWriteback(newpage))
704 end_page_writeback(newpage); 667 end_page_writeback(newpage);
705 TRACE_TASK(current, "replicate_page_copy done!\n");
706} 668}
707 669
708/************************************************************ 670/************************************************************
@@ -742,7 +704,6 @@ int replicate_page(struct address_space *mapping,
742 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 704 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
743 705
744 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count); 706 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count);
745 TRACE_TASK(current, "replicate_page_move_mapping returned %d\n", rc);
746 if (rc != MIGRATEPAGE_SUCCESS) 707 if (rc != MIGRATEPAGE_SUCCESS)
747 return rc; 708 return rc;
748 709
@@ -975,19 +936,9 @@ static int copy_to_new_page(struct page *newpage, struct page *page,
975 rc = migrate_page(mapping, newpage, page, mode); 936 rc = migrate_page(mapping, newpage, page, mode);
976 } 937 }
977 else if (mapping->a_ops->migratepage) { 938 else if (mapping->a_ops->migratepage) {
978 TRACE_TASK(current, "ops migration callback\n");
979 /*
980 * Most pages have a mapping and most filesystems provide a
981 * migratepage callback. Anonymous pages are part of swap
982 * space which also has its own migratepage callback. This
983 * is the most common path for page migration.
984 */
985 //rc = mapping->a_ops->migratepage(mapping,
986 // newpage, page, mode);
987 rc = replicate_page(mapping, newpage, page, mode, has_replica); 939 rc = replicate_page(mapping, newpage, page, mode, has_replica);
988 } 940 }
989 else { 941 else {
990 TRACE_TASK(current, "fallback function\n");
991 rc = fallback_migrate_page(mapping, newpage, page, mode); 942 rc = fallback_migrate_page(mapping, newpage, page, mode);
992 } 943 }
993 944
@@ -995,10 +946,8 @@ static int copy_to_new_page(struct page *newpage, struct page *page,
995 newpage->mapping = NULL; 946 newpage->mapping = NULL;
996 } else { 947 } else {
997 if (page_was_mapped) { 948 if (page_was_mapped) {
998 TRACE_TASK(current, "PAGE_WAS_MAPPED = 1\n");
999 remove_migration_ptes(page, newpage); 949 remove_migration_ptes(page, newpage);
1000 } 950 }
1001 //page->mapping = NULL;
1002 } 951 }
1003 952
1004 unlock_page(newpage); 953 unlock_page(newpage);
@@ -1178,76 +1127,18 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1178 1127
1179 if (PageWriteback(page)) { 1128 if (PageWriteback(page)) {
1180 /* 1129 /*
1181 * Only in the case of a full synchronous migration is it 1130 * The code of shared library cannot be written.
1182 * necessary to wait for PageWriteback. In the async case,
1183 * the retry loop is too short and in the sync-light case,
1184 * the overhead of stalling is too much
1185 */ 1131 */
1186 BUG(); 1132 BUG();
1187 /*
1188 if (mode != MIGRATE_SYNC) {
1189 rc = -EBUSY;
1190 goto out_unlock;
1191 }
1192 if (!force)
1193 goto out_unlock;
1194 wait_on_page_writeback(page);
1195 */
1196 } 1133 }
1197 /* 1134
1198 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1199 * we cannot notice that anon_vma is freed while we migrates a page.
1200 * This get_anon_vma() delays freeing anon_vma pointer until the end
1201 * of migration. File cache pages are no problem because of page_lock()
1202 * File Caches may use write_page() or lock_page() in migration, then,
1203 * just care Anon page here.
1204 */
1205 if (PageAnon(page) && !PageKsm(page)) { 1135 if (PageAnon(page) && !PageKsm(page)) {
1206 printk(KERN_INFO "ANON but not KSM\n"); 1136 /* The shared library pages must be backed by a file. */
1207 BUG(); 1137 BUG();
1208 /*
1209 * Only page_lock_anon_vma_read() understands the subtleties of
1210 * getting a hold on an anon_vma from outside one of its mms.
1211 */
1212/*
1213 anon_vma = page_get_anon_vma(page);
1214 if (anon_vma) {
1215*/
1216 /*
1217 * Anon page
1218 */
1219/*
1220 } else if (PageSwapCache(page)) {
1221*/
1222 /*
1223 * We cannot be sure that the anon_vma of an unmapped
1224 * swapcache page is safe to use because we don't
1225 * know in advance if the VMA that this page belonged
1226 * to still exists. If the VMA and others sharing the
1227 * data have been freed, then the anon_vma could
1228 * already be invalid.
1229 *
1230 * To avoid this possibility, swapcache pages get
1231 * migrated but are not remapped when migration
1232 * completes
1233 */
1234/* } else {
1235 goto out_unlock;
1236 }
1237*/
1238 } 1138 }
1239 1139
1240 if (unlikely(isolated_balloon_page(page))) { 1140 if (unlikely(isolated_balloon_page(page))) {
1241 BUG(); 1141 BUG();
1242 /*
1243 * A ballooned page does not need any special attention from
1244 * physical to virtual reverse mapping procedures.
1245 * Skip any attempt to unmap PTEs or to remap swap cache,
1246 * in order to avoid burning cycles at rmap level, and perform
1247 * the page migration right away (proteced by page lock).
1248 */
1249 rc = balloon_page_migrate(newpage, page, mode);
1250 goto out_unlock;
1251 } 1142 }
1252 1143
1253 /* 1144 /*
@@ -1273,22 +1164,17 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1273 1164
1274 /* Establish migration ptes or remove ptes */ 1165 /* Establish migration ptes or remove ptes */
1275 if (page_mapped(page)) { 1166 if (page_mapped(page)) {
1276 // ttu_ret = try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1277 struct rmap_walk_control rwc = { 1167 struct rmap_walk_control rwc = {
1278 .rmap_one = try_to_unmap_one_only, 1168 .rmap_one = try_to_unmap_one_only,
1279 .arg = (void *)(TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS), 1169 .arg = (void *)(TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS),
1280 }; 1170 };
1281
1282 ttu_ret = rmap_walk(page, &rwc); 1171 ttu_ret = rmap_walk(page, &rwc);
1283 1172
1284 page_was_mapped = 1; 1173 page_was_mapped = 1;
1285 TRACE_TASK(current, "Page %d unmapped from all PTEs\n", page_to_pfn(page));
1286 } 1174 }
1287 1175
1288skip_unmap: 1176skip_unmap:
1289 //if (!page_mapped(page)) {
1290 if (ttu_ret == SWAP_SUCCESS) { 1177 if (ttu_ret == SWAP_SUCCESS) {
1291 TRACE_TASK(current, "Call copy_to_new_page\n");
1292 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica); 1178 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica);
1293 } else if (ttu_ret == SWAP_AGAIN) 1179 } else if (ttu_ret == SWAP_AGAIN)
1294 printk(KERN_ERR "rmap_walk returned SWAP_AGAIN\n"); 1180 printk(KERN_ERR "rmap_walk returned SWAP_AGAIN\n");
@@ -1418,16 +1304,13 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1418 newpage = get_new_page(page, private, &result); 1304 newpage = get_new_page(page, private, &result);
1419 if (!newpage) 1305 if (!newpage)
1420 return -ENOMEM; 1306 return -ENOMEM;
1421 //printk(KERN_ERR "Page %lx allocated\n", page_to_pfn(newpage));
1422 } else { 1307 } else {
1423 newpage = lib_page->r_page[cpu]; 1308 newpage = lib_page->r_page[cpu];
1424 has_replica = 1; 1309 has_replica = 1;
1425 //printk(KERN_ERR "Page %lx found\n", page_to_pfn(newpage));
1426 } 1310 }
1427 1311
1428 if (page_count(page) == 1) { 1312 if (page_count(page) == 1) {
1429 /* page was freed from under us. So we are done. */ 1313 /* page was freed from under us. So we are done. */
1430 TRACE_TASK(current, "page %x _count == 1\n", page_to_pfn(page));
1431 goto out; 1314 goto out;
1432 } 1315 }
1433 1316
@@ -1443,7 +1326,6 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1443 } 1326 }
1444 1327
1445out: 1328out:
1446TRACE_TASK(current, "__unmap_and_copy returned %s\n", rc==MIGRATEPAGE_SUCCESS?"SUCCESS":"FAIL");
1447 if (rc != -EAGAIN) { 1329 if (rc != -EAGAIN) {
1448 /* 1330 /*
1449 * A page that has been migrated has all references 1331 * A page that has been migrated has all references
@@ -1457,7 +1339,6 @@ TRACE_TASK(current, "__unmap_and_copy returned %s\n", rc==MIGRATEPAGE_SUCCESS?"S
1457 putback_lru_page(page); 1339 putback_lru_page(page);
1458 } 1340 }
1459 1341
1460//TRACE_TASK(current, "old page freed\n");
1461 /* 1342 /*
1462 * If migration was not successful and there's a freeing callback, use 1343 * If migration was not successful and there's a freeing callback, use
1463 * it. Otherwise, putback_lru_page() will drop the reference grabbed 1344 * it. Otherwise, putback_lru_page() will drop the reference grabbed
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 161af608b7e2..7e39ffceb566 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2287,8 +2287,7 @@ int clear_page_dirty_for_io(struct page *page)
2287 2287
2288 BUG_ON(!PageLocked(page)); 2288 BUG_ON(!PageLocked(page));
2289 2289
2290 //if (mapping && mapping_cap_account_dirty(mapping)) { 2290 if (mapping && mapping_cap_account_dirty(mapping)) {
2291 if (mapping) {
2292 /* 2291 /*
2293 * Yes, Virginia, this is indeed insane. 2292 * Yes, Virginia, this is indeed insane.
2294 * 2293 *
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3ffde2a09765..950c002bbb45 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -662,7 +662,7 @@ static inline int free_pages_check(struct page *page)
662 if (unlikely(page_mapcount(page))) 662 if (unlikely(page_mapcount(page)))
663 bad_reason = "nonzero mapcount"; 663 bad_reason = "nonzero mapcount";
664 if (unlikely(page->mapping != NULL)) 664 if (unlikely(page->mapping != NULL))
665 bad_reason = "non-NULL mapping free_check"; 665 bad_reason = "non-NULL mapping";
666 if (unlikely(atomic_read(&page->_count) != 0)) 666 if (unlikely(atomic_read(&page->_count) != 0))
667 bad_reason = "nonzero _count"; 667 bad_reason = "nonzero _count";
668 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 668 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b9b6bef90169..5e8eadd71bac 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -582,7 +582,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
582 BUG_ON(!PageLocked(page)); 582 BUG_ON(!PageLocked(page));
583 BUG_ON(mapping != page_mapping(page)); 583 BUG_ON(mapping != page_mapping(page));
584 584
585again:
586 spin_lock_irq(&mapping->tree_lock); 585 spin_lock_irq(&mapping->tree_lock);
587 /* 586 /*
588 * The non racy check for a busy page. 587 * The non racy check for a busy page.
@@ -641,11 +640,7 @@ again:
641 if (reclaimed && page_is_file_cache(page) && 640 if (reclaimed && page_is_file_cache(page) &&
642 !mapping_exiting(mapping)) 641 !mapping_exiting(mapping))
643 shadow = workingset_eviction(mapping, page); 642 shadow = workingset_eviction(mapping, page);
644 if (PageReplicated(page)) { 643 __delete_from_page_cache(page, shadow);
645 if (reclaim_replicated_page(mapping, page))
646 goto again;
647 } else
648 __delete_from_page_cache(page, shadow);
649 spin_unlock_irq(&mapping->tree_lock); 644 spin_unlock_irq(&mapping->tree_lock);
650 645
651 if (freepage != NULL) 646 if (freepage != NULL)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6af8ea00cbef..4f5cd974e11a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -708,7 +708,6 @@ const char * const vmstat_text[] = {
708 "nr_anon_pages", 708 "nr_anon_pages",
709 "nr_mapped", 709 "nr_mapped",
710 "nr_file_pages", 710 "nr_file_pages",
711 "nr_repl_pages",
712 "nr_dirty", 711 "nr_dirty",
713 "nr_writeback", 712 "nr_writeback",
714 "nr_slab_reclaimable", 713 "nr_slab_reclaimable",
@@ -761,9 +760,6 @@ const char * const vmstat_text[] = {
761 "pgfault", 760 "pgfault",
762 "pgmajfault", 761 "pgmajfault",
763 762
764 "pgreplicated",
765 "pgreplicazap",
766
767 TEXTS_FOR_ZONES("pgrefill") 763 TEXTS_FOR_ZONES("pgrefill")
768 TEXTS_FOR_ZONES("pgsteal_kswapd") 764 TEXTS_FOR_ZONES("pgsteal_kswapd")
769 TEXTS_FOR_ZONES("pgsteal_direct") 765 TEXTS_FOR_ZONES("pgsteal_direct")