aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2016-09-30 02:06:05 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2016-09-30 02:06:05 -0400
commit1735527cd6d215863dae7d4df8c7267d4337bb5d (patch)
treeab2354eb6b8959f130950374c46f48d1545469e2
parent87c96270aa53f5c1c67fca941a2a6061178cb0a0 (diff)
seems to work
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/litmus/replicate_lib.h4
-rw-r--r--litmus/litmus.c24
-rw-r--r--mm/migrate.c55
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/rmap.c166
6 files changed, 218 insertions, 36 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c89c53a113a8..7c90e029c7c6 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -188,7 +188,8 @@ int page_referenced(struct page *, int is_locked,
188#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) 188#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
189 189
190int try_to_unmap(struct page *, enum ttu_flags flags); 190int try_to_unmap(struct page *, enum ttu_flags flags);
191 191int try_to_unmap_one_only(struct page *page, struct vm_area_struct *vma,
192 unsigned long address, void *arg);
192/* 193/*
193 * Used by uprobes to replace a userspace page safely 194 * Used by uprobes to replace a userspace page safely
194 */ 195 */
diff --git a/include/litmus/replicate_lib.h b/include/litmus/replicate_lib.h
index 98bfb9707144..16db7d81b66b 100644
--- a/include/litmus/replicate_lib.h
+++ b/include/litmus/replicate_lib.h
@@ -8,8 +8,8 @@
8struct shared_lib_page { 8struct shared_lib_page {
9 struct page *master_page; 9 struct page *master_page;
10 struct page *r_page; 10 struct page *r_page;
11 unsigned long master_pfn; 11 unsigned long int master_pfn;
12 unsigned long r_pfn; 12 unsigned long int r_pfn;
13 struct list_head list; 13 struct list_head list;
14}; 14};
15 15
diff --git a/litmus/litmus.c b/litmus/litmus.c
index d31138c9b9a6..6088de312bb5 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -407,10 +407,12 @@ asmlinkage long sys_set_page_color(int cpu)
407 continue; 407 continue;
408 } 408 }
409 409
410 TRACE_TASK(current, "addr: %08x, pfn: %ld, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); 410 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-");
411 pages_in_vma++; 411 pages_in_vma++;
412 412
413// for simple debug
413 if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) { 414 if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) {
415 //if (page_count(old_page) < 10 && page_count(old_page) > 3 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) {
414 struct shared_lib_page *lib_page; 416 struct shared_lib_page *lib_page;
415 int is_exist = 0; 417 int is_exist = 0;
416 418
@@ -433,10 +435,10 @@ asmlinkage long sys_set_page_color(int cpu)
433 lib_page->master_pfn = page_to_pfn(old_page); 435 lib_page->master_pfn = page_to_pfn(old_page);
434 lib_page->r_pfn = INVALID_PFN; 436 lib_page->r_pfn = INVALID_PFN;
435 list_add_tail(&lib_page->list, &shared_lib_pages); 437 list_add_tail(&lib_page->list, &shared_lib_pages);
436 TRACE_TASK(current, "NEW PAGE %ld ADDED.\n", lib_page->master_pfn); 438 TRACE_TASK(current, "NEW PAGE %05lx ADDED.\n", lib_page->master_pfn);
437 } 439 }
438 else { 440 else {
439 TRACE_TASK(current, "FOUND PAGE %ld in the list.\n", lib_page->master_pfn); 441 TRACE_TASK(current, "FOUND PAGE %05lx in the list.\n", lib_page->master_pfn);
440 } 442 }
441 443
442 /* add to task_shared_pagelist */ 444 /* add to task_shared_pagelist */
@@ -445,7 +447,7 @@ asmlinkage long sys_set_page_color(int cpu)
445 list_add_tail(&old_page->lru, &task_shared_pagelist); 447 list_add_tail(&old_page->lru, &task_shared_pagelist);
446 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); 448 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
447 nr_shared_pages++; 449 nr_shared_pages++;
448 TRACE_TASK(current, "SHARED isolate_lur_page success\n"); 450 TRACE_TASK(current, "SHARED isolate_lru_page success\n");
449 } else { 451 } else {
450 TRACE_TASK(current, "SHARED isolate_lru_page failed\n"); 452 TRACE_TASK(current, "SHARED isolate_lru_page failed\n");
451 } 453 }
@@ -459,7 +461,6 @@ asmlinkage long sys_set_page_color(int cpu)
459 nr_pages++; 461 nr_pages++;
460 } else { 462 } else {
461 TRACE_TASK(current, "isolate_lru_page failed\n"); 463 TRACE_TASK(current, "isolate_lru_page failed\n");
462 TRACE_TASK(current, "page_lru = %d PageLRU = %d\n", page_lru(old_page), PageLRU(old_page));
463 nr_failed++; 464 nr_failed++;
464 } 465 }
465 //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); 466 //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page));
@@ -546,7 +547,7 @@ asmlinkage long sys_set_page_color(int cpu)
546 rcu_read_lock(); 547 rcu_read_lock();
547 list_for_each_entry(lpage, &shared_lib_pages, list) 548 list_for_each_entry(lpage, &shared_lib_pages, list)
548 { 549 {
549 TRACE_TASK(current, "master_PFN = %ld r_PFN = %ld PageSwapCache=%d\n", lpage->master_pfn, lpage->r_pfn, PageSwapCache(lpage->master_page)); 550 TRACE_TASK(current, "master_PFN = %05lx r_PFN = %05lx PageSwapCache=%d\n", lpage->master_pfn, lpage->r_pfn, PageSwapCache(lpage->master_page));
550 } 551 }
551 rcu_read_unlock(); 552 rcu_read_unlock();
552 } 553 }
@@ -577,7 +578,7 @@ asmlinkage long sys_set_page_color(int cpu)
577 continue; 578 continue;
578 } 579 }
579 580
580 TRACE_TASK(current, "addr: %08x, pfn: %ld, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page)); 581 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page));
581 put_page(old_page); 582 put_page(old_page);
582 } 583 }
583 584
@@ -593,7 +594,6 @@ asmlinkage long sys_set_page_color(int cpu)
593asmlinkage long sys_test_call(unsigned int param) 594asmlinkage long sys_test_call(unsigned int param)
594{ 595{
595 long ret = 0; 596 long ret = 0;
596 unsigned long flags;
597 struct vm_area_struct *vma_itr = NULL; 597 struct vm_area_struct *vma_itr = NULL;
598 598
599 TRACE_CUR("test_call param = %d\n", param); 599 TRACE_CUR("test_call param = %d\n", param);
@@ -604,7 +604,7 @@ asmlinkage long sys_test_call(unsigned int param)
604 while (vma_itr != NULL) { 604 while (vma_itr != NULL) {
605 int i, num_pages; 605 int i, num_pages;
606 struct page* old_page; 606 struct page* old_page;
607 TRACE_TASK(current, "--------------------------------------------\n"); 607 TRACE_TASK(current, "------------------------------------------------------\n");
608 TRACE_TASK(current, "vm_start : %lx\n", vma_itr->vm_start); 608 TRACE_TASK(current, "vm_start : %lx\n", vma_itr->vm_start);
609 TRACE_TASK(current, "vm_end : %lx\n", vma_itr->vm_end); 609 TRACE_TASK(current, "vm_end : %lx\n", vma_itr->vm_end);
610 TRACE_TASK(current, "vm_flags : %lx\n", vma_itr->vm_flags); 610 TRACE_TASK(current, "vm_flags : %lx\n", vma_itr->vm_flags);
@@ -635,12 +635,12 @@ asmlinkage long sys_test_call(unsigned int param)
635 continue; 635 continue;
636 } 636 }
637 637
638 TRACE_TASK(current, "addr: %08x, pfn: %ld, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); 638 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-");
639 put_page(old_page); 639 put_page(old_page);
640 } 640 }
641 vma_itr = vma_itr->vm_next; 641 vma_itr = vma_itr->vm_next;
642 } 642 }
643 printk(KERN_INFO "--------------------------------------------\n"); 643 TRACE_TASK(current, "------------------------------------------------------\n");
644 up_read(&current->mm->mmap_sem); 644 up_read(&current->mm->mmap_sem);
645 } 645 }
646 else if (param == 1) { 646 else if (param == 1) {
diff --git a/mm/migrate.c b/mm/migrate.c
index d135547b3a3f..dbb46068a3f3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -406,6 +406,7 @@ int replicate_page_move_mapping(struct address_space *mapping,
406 int extra_count) 406 int extra_count)
407{ 407{
408 int expected_count = 1 + extra_count; 408 int expected_count = 1 + extra_count;
409 int prev_count = page_count(page);
409 void **pslot; 410 void **pslot;
410 411
411 BUG_ON(!mapping); 412 BUG_ON(!mapping);
@@ -417,7 +418,7 @@ int replicate_page_move_mapping(struct address_space *mapping,
417 expected_count += 1 + page_has_private(page); 418 expected_count += 1 + page_has_private(page);
418 419
419 TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page)); 420 TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page));
420 421/*
421 if (page_count(page) != expected_count || 422 if (page_count(page) != expected_count ||
422 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 423 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
423 spin_unlock_irq(&mapping->tree_lock); 424 spin_unlock_irq(&mapping->tree_lock);
@@ -425,12 +426,12 @@ int replicate_page_move_mapping(struct address_space *mapping,
425 return -EAGAIN; 426 return -EAGAIN;
426 } 427 }
427 428
428 if (!page_freeze_refs(page, expected_count)) { 429 if (!page_freeze_refs(page, expected_count)) { // if page_count(page) == expected_count, then set page_count = 0
429 spin_unlock_irq(&mapping->tree_lock); 430 spin_unlock_irq(&mapping->tree_lock);
430 TRACE_TASK(current, "2\n"); 431 TRACE_TASK(current, "2\n");
431 return -EAGAIN; 432 return -EAGAIN;
432 } 433 }
433 434*/
434 /* 435 /*
435 * In the async migration case of moving a page with buffers, lock the 436 * In the async migration case of moving a page with buffers, lock the
436 * buffers using trylock before the mapping is moved. If the mapping 437 * buffers using trylock before the mapping is moved. If the mapping
@@ -455,7 +456,7 @@ int replicate_page_move_mapping(struct address_space *mapping,
455 set_page_private(newpage, page_private(page)); 456 set_page_private(newpage, page_private(page));
456 } 457 }
457 458
458 radix_tree_replace_slot(pslot, newpage); 459 //radix_tree_replace_slot(pslot, newpage);
459 //radix_tree_replace_slot(pslot, page); 460 //radix_tree_replace_slot(pslot, page);
460 461
461 /* 462 /*
@@ -463,7 +464,8 @@ int replicate_page_move_mapping(struct address_space *mapping,
463 * to one less reference. 464 * to one less reference.
464 * We know this isn't the last reference. 465 * We know this isn't the last reference.
465 */ 466 */
466 page_unfreeze_refs(page, expected_count - 1); 467 //page_unfreeze_refs(page, expected_count - 1);
468 page_unfreeze_refs(page, prev_count);
467 469
468 /* 470 /*
469 * If moved to a different zone then also account 471 * If moved to a different zone then also account
@@ -738,7 +740,7 @@ int replicate_page(struct address_space *mapping,
738 int rc, extra_count = 0; 740 int rc, extra_count = 0;
739 741
740 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 742 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
741 //extra_count = page_count(page) - 2; 743
742 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count); 744 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count);
743 TRACE_TASK(current, "replicate_page_move_mapping returned %d\n", rc); 745 TRACE_TASK(current, "replicate_page_move_mapping returned %d\n", rc);
744 if (rc != MIGRATEPAGE_SUCCESS) 746 if (rc != MIGRATEPAGE_SUCCESS)
@@ -1147,6 +1149,7 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1147 int force, enum migrate_mode mode, int has_replica) 1149 int force, enum migrate_mode mode, int has_replica)
1148{ 1150{
1149 int rc = -EAGAIN; 1151 int rc = -EAGAIN;
1152 int ttu_ret = SWAP_AGAIN;
1150 int page_was_mapped = 0; 1153 int page_was_mapped = 0;
1151 struct anon_vma *anon_vma = NULL; 1154 struct anon_vma *anon_vma = NULL;
1152 1155
@@ -1270,17 +1273,27 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1270 1273
1271 /* Establish migration ptes or remove ptes */ 1274 /* Establish migration ptes or remove ptes */
1272 if (page_mapped(page)) { 1275 if (page_mapped(page)) {
1273 try_to_unmap(page, 1276 // ttu_ret = try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1274 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 1277 struct rmap_walk_control rwc = {
1278 .rmap_one = try_to_unmap_one_only,
1279 .arg = (void *)(TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS),
1280 };
1281
1282 ttu_ret = rmap_walk(page, &rwc);
1283
1275 page_was_mapped = 1; 1284 page_was_mapped = 1;
1276 TRACE_TASK(current, "Page %d unmapped from all PTEs\n", page_to_pfn(page)); 1285 TRACE_TASK(current, "Page %d unmapped from all PTEs\n", page_to_pfn(page));
1277 } 1286 }
1278 1287
1279skip_unmap: 1288skip_unmap:
1280 if (!page_mapped(page)) { 1289 //if (!page_mapped(page)) {
1290 if (ttu_ret == SWAP_SUCCESS) {
1281 TRACE_TASK(current, "Call copy_to_new_page\n"); 1291 TRACE_TASK(current, "Call copy_to_new_page\n");
1282 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica); 1292 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica);
1283 } 1293 } else if (ttu_ret == SWAP_AGAIN)
1294 printk(KERN_ERR "rmap_walk returned SWAP_AGAIN\n");
1295 else
1296 printk(KERN_ERR "rmap_walk failed\n");
1284 1297
1285 if (rc && page_was_mapped) 1298 if (rc && page_was_mapped)
1286 remove_migration_ptes(page, page); 1299 remove_migration_ptes(page, page);
@@ -1399,20 +1412,22 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1399 rcu_read_unlock(); 1412 rcu_read_unlock();
1400 1413
1401 if (is_exist_in_psl) 1414 if (is_exist_in_psl)
1402 TRACE_TASK(current, "Page %ld exists in PSL list\n", lib_page->master_pfn); 1415 TRACE_TASK(current, "Page %x exists in PSL list\n", lib_page->master_pfn);
1403 1416
1404 if (lib_page->r_page == NULL) { 1417 if (lib_page->r_page == NULL) {
1405 newpage = get_new_page(page, private, &result); 1418 newpage = get_new_page(page, private, &result);
1406 if (!newpage) 1419 if (!newpage)
1407 return -ENOMEM; 1420 return -ENOMEM;
1421 printk(KERN_ERR "Page %lx allocated\n", page_to_pfn(newpage));
1408 } else { 1422 } else {
1409 newpage = lib_page->r_page; 1423 newpage = lib_page->r_page;
1410 has_replica = 1; 1424 has_replica = 1;
1425 printk(KERN_ERR "Page %lx found\n", page_to_pfn(newpage));
1411 } 1426 }
1412 1427
1413 if (page_count(page) == 1) { 1428 if (page_count(page) == 1) {
1414 /* page was freed from under us. So we are done. */ 1429 /* page was freed from under us. So we are done. */
1415 TRACE_TASK(current, "page %ld _count == 1\n", page_to_pfn(page)); 1430 TRACE_TASK(current, "page %x _count == 1\n", page_to_pfn(page));
1416 goto out; 1431 goto out;
1417 } 1432 }
1418 1433
@@ -1422,25 +1437,25 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1422 1437
1423 rc = __unmap_and_copy(page, newpage, force, mode, has_replica); 1438 rc = __unmap_and_copy(page, newpage, force, mode, has_replica);
1424 1439
1425 if (has_replica == 0) { 1440 if (has_replica == 0 && rc == MIGRATEPAGE_SUCCESS) {
1426 lib_page->r_page = newpage; 1441 lib_page->r_page = newpage;
1427 lib_page->r_pfn = page_to_pfn(newpage); 1442 lib_page->r_pfn = page_to_pfn(newpage);
1428 } 1443 }
1429 1444
1430out: 1445out:
1431TRACE_TASK(current, "__unmap_and_copy returned %d\n", rc); 1446TRACE_TASK(current, "__unmap_and_copy returned %s\n", rc==MIGRATEPAGE_SUCCESS?"SUCCESS":"FAIL");
1432// if (rc != -EAGAIN) { 1447 if (rc != -EAGAIN) {
1433 /* 1448 /*
1434 * A page that has been migrated has all references 1449 * A page that has been migrated has all references
1435 * removed and will be freed. A page that has not been 1450 * removed and will be freed. A page that has not been
1436 * migrated will have kepts its references and be 1451 * migrated will have kepts its references and be
1437 * restored. 1452 * restored.
1438 */ 1453 */
1439// list_del(&page->lru); 1454 list_del(&page->lru);
1440// dec_zone_page_state(page, NR_ISOLATED_ANON + 1455 dec_zone_page_state(page, NR_ISOLATED_ANON +
1441// page_is_file_cache(page)); 1456 page_is_file_cache(page));
1442// putback_lru_page(page); 1457 putback_lru_page(page);
1443// } 1458 }
1444 1459
1445//TRACE_TASK(current, "old page freed\n"); 1460//TRACE_TASK(current, "old page freed\n");
1446 /* 1461 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 950c002bbb45..3ffde2a09765 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -662,7 +662,7 @@ static inline int free_pages_check(struct page *page)
662 if (unlikely(page_mapcount(page))) 662 if (unlikely(page_mapcount(page)))
663 bad_reason = "nonzero mapcount"; 663 bad_reason = "nonzero mapcount";
664 if (unlikely(page->mapping != NULL)) 664 if (unlikely(page->mapping != NULL))
665 bad_reason = "non-NULL mapping"; 665 bad_reason = "non-NULL mapping free_check";
666 if (unlikely(atomic_read(&page->_count) != 0)) 666 if (unlikely(atomic_read(&page->_count) != 0))
667 bad_reason = "nonzero _count"; 667 bad_reason = "nonzero _count";
668 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 668 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 24dd3f9fee27..86678671506b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1317,6 +1317,172 @@ out_mlock:
1317 return ret; 1317 return ret;
1318} 1318}
1319 1319
1320/*
1321 * @arg: enum ttu_flags will be passed to this argument
1322 */
1323static int try_to_unmap_one_entry(struct page *page, struct vm_area_struct *vma,
1324 unsigned long address, void *arg)
1325{
1326 struct mm_struct *mm = vma->vm_mm;
1327 pte_t *pte;
1328 pte_t pteval;
1329 spinlock_t *ptl;
1330 int ret = SWAP_AGAIN;
1331 enum ttu_flags flags = (enum ttu_flags)arg;
1332
1333 pte = page_check_address(page, mm, address, &ptl, 0);
1334 if (!pte)
1335 goto out;
1336
1337 /*
1338 * If the page is mlock()d, we cannot swap it out.
1339 * If it's recently referenced (perhaps page_referenced
1340 * skipped over this mm) then we should reactivate it.
1341 */
1342 if (!(flags & TTU_IGNORE_MLOCK)) {
1343 if (vma->vm_flags & VM_LOCKED)
1344 goto out_mlock;
1345
1346 if (flags & TTU_MUNLOCK)
1347 goto out_unmap;
1348 }
1349 if (!(flags & TTU_IGNORE_ACCESS)) {
1350 if (ptep_clear_flush_young_notify(vma, address, pte)) {
1351 ret = SWAP_FAIL;
1352 goto out_unmap;
1353 }
1354 }
1355
1356 /* Nuke the page table entry. */
1357 flush_cache_page(vma, address, page_to_pfn(page));
1358 pteval = ptep_clear_flush(vma, address, pte);
1359
1360 /* Move the dirty bit to the physical page now the pte is gone. */
1361 if (pte_dirty(pteval))
1362 set_page_dirty(page);
1363
1364 /* Update high watermark before we lower rss */
1365 update_hiwater_rss(mm);
1366
1367 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1368 if (!PageHuge(page)) {
1369 if (PageAnon(page))
1370 dec_mm_counter(mm, MM_ANONPAGES);
1371 else
1372 dec_mm_counter(mm, MM_FILEPAGES);
1373 }
1374 set_pte_at(mm, address, pte,
1375 swp_entry_to_pte(make_hwpoison_entry(page)));
1376 } else if (pte_unused(pteval)) {
1377 /*
1378 * The guest indicated that the page content is of no
1379 * interest anymore. Simply discard the pte, vmscan
1380 * will take care of the rest.
1381 */
1382 if (PageAnon(page))
1383 dec_mm_counter(mm, MM_ANONPAGES);
1384 else
1385 dec_mm_counter(mm, MM_FILEPAGES);
1386 } else if (PageAnon(page)) {
1387 swp_entry_t entry = { .val = page_private(page) };
1388 pte_t swp_pte;
1389
1390 if (PageSwapCache(page)) {
1391 /*
1392 * Store the swap location in the pte.
1393 * See handle_pte_fault() ...
1394 */
1395 if (swap_duplicate(entry) < 0) {
1396 set_pte_at(mm, address, pte, pteval);
1397 ret = SWAP_FAIL;
1398 goto out_unmap;
1399 }
1400 if (list_empty(&mm->mmlist)) {
1401 spin_lock(&mmlist_lock);
1402 if (list_empty(&mm->mmlist))
1403 list_add(&mm->mmlist, &init_mm.mmlist);
1404 spin_unlock(&mmlist_lock);
1405 }
1406 dec_mm_counter(mm, MM_ANONPAGES);
1407 inc_mm_counter(mm, MM_SWAPENTS);
1408 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
1409 /*
1410 * Store the pfn of the page in a special migration
1411 * pte. do_swap_page() will wait until the migration
1412 * pte is removed and then restart fault handling.
1413 */
1414 BUG_ON(!(flags & TTU_MIGRATION));
1415 entry = make_migration_entry(page, pte_write(pteval));
1416 }
1417 swp_pte = swp_entry_to_pte(entry);
1418 if (pte_soft_dirty(pteval))
1419 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1420 set_pte_at(mm, address, pte, swp_pte);
1421 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1422 (flags & TTU_MIGRATION)) {
1423 /* Establish migration entry for a file page */
1424 swp_entry_t entry;
1425 entry = make_migration_entry(page, pte_write(pteval));
1426 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1427printk(KERN_ERR "established migration entry for page %05lx PTE_WRITE = %d\n", page_to_pfn(page), pte_write(pteval));
1428 } else
1429 dec_mm_counter(mm, MM_FILEPAGES);
1430
1431 page_remove_rmap(page);
1432 page_cache_release(page);
1433
1434out_unmap:
1435 pte_unmap_unlock(pte, ptl);
1436 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK)) {
1437 mmu_notifier_invalidate_page(mm, address);
1438 ret = SWAP_SUCCESS;
1439 }
1440out:
1441 return ret;
1442
1443out_mlock:
1444 pte_unmap_unlock(pte, ptl);
1445
1446
1447 /*
1448 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1449 * unstable result and race. Plus, We can't wait here because
1450 * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem.
1451 * if trylock failed, the page remain in evictable lru and later
1452 * vmscan could retry to move the page to unevictable lru if the
1453 * page is actually mlocked.
1454 */
1455 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1456 if (vma->vm_flags & VM_LOCKED) {
1457 mlock_vma_page(page);
1458 ret = SWAP_MLOCK;
1459 }
1460 up_read(&vma->vm_mm->mmap_sem);
1461 }
1462 return ret;
1463}
1464
1465int try_to_unmap_one_only(struct page *page, struct vm_area_struct *vma,
1466 unsigned long address, void *arg)
1467{
1468 struct mm_struct *mm = vma->vm_mm;
1469 struct mm_struct *current_mm;
1470 //int ret = SWAP_AGAIN;
1471
1472 rcu_read_lock();
1473 get_task_struct(current);
1474 rcu_read_unlock();
1475 current_mm = get_task_mm(current);
1476 put_task_struct(current);
1477 if (!current_mm)
1478 BUG();
1479
1480 if (mm == current_mm) {
1481 return try_to_unmap_one_entry(page, vma, address, arg);
1482 }
1483 return SWAP_AGAIN;
1484}
1485
1320bool is_vma_temporary_stack(struct vm_area_struct *vma) 1486bool is_vma_temporary_stack(struct vm_area_struct *vma)
1321{ 1487{
1322 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1488 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);