diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 81 |
1 files changed, 73 insertions, 8 deletions
| @@ -434,6 +434,71 @@ int page_referenced(struct page *page, int is_locked) | |||
| 434 | return referenced; | 434 | return referenced; |
| 435 | } | 435 | } |
| 436 | 436 | ||
| 437 | static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | ||
| 438 | { | ||
| 439 | struct mm_struct *mm = vma->vm_mm; | ||
| 440 | unsigned long address; | ||
| 441 | pte_t *pte, entry; | ||
| 442 | spinlock_t *ptl; | ||
| 443 | int ret = 0; | ||
| 444 | |||
| 445 | address = vma_address(page, vma); | ||
| 446 | if (address == -EFAULT) | ||
| 447 | goto out; | ||
| 448 | |||
| 449 | pte = page_check_address(page, mm, address, &ptl); | ||
| 450 | if (!pte) | ||
| 451 | goto out; | ||
| 452 | |||
| 453 | if (!pte_dirty(*pte) && !pte_write(*pte)) | ||
| 454 | goto unlock; | ||
| 455 | |||
| 456 | entry = ptep_get_and_clear(mm, address, pte); | ||
| 457 | entry = pte_mkclean(entry); | ||
| 458 | entry = pte_wrprotect(entry); | ||
| 459 | ptep_establish(vma, address, pte, entry); | ||
| 460 | lazy_mmu_prot_update(entry); | ||
| 461 | ret = 1; | ||
| 462 | |||
| 463 | unlock: | ||
| 464 | pte_unmap_unlock(pte, ptl); | ||
| 465 | out: | ||
| 466 | return ret; | ||
| 467 | } | ||
| 468 | |||
| 469 | static int page_mkclean_file(struct address_space *mapping, struct page *page) | ||
| 470 | { | ||
| 471 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | ||
| 472 | struct vm_area_struct *vma; | ||
| 473 | struct prio_tree_iter iter; | ||
| 474 | int ret = 0; | ||
| 475 | |||
| 476 | BUG_ON(PageAnon(page)); | ||
| 477 | |||
| 478 | spin_lock(&mapping->i_mmap_lock); | ||
| 479 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | ||
| 480 | if (vma->vm_flags & VM_SHARED) | ||
| 481 | ret += page_mkclean_one(page, vma); | ||
| 482 | } | ||
| 483 | spin_unlock(&mapping->i_mmap_lock); | ||
| 484 | return ret; | ||
| 485 | } | ||
| 486 | |||
| 487 | int page_mkclean(struct page *page) | ||
| 488 | { | ||
| 489 | int ret = 0; | ||
| 490 | |||
| 491 | BUG_ON(!PageLocked(page)); | ||
| 492 | |||
| 493 | if (page_mapped(page)) { | ||
| 494 | struct address_space *mapping = page_mapping(page); | ||
| 495 | if (mapping) | ||
| 496 | ret = page_mkclean_file(mapping, page); | ||
| 497 | } | ||
| 498 | |||
| 499 | return ret; | ||
| 500 | } | ||
| 501 | |||
| 437 | /** | 502 | /** |
| 438 | * page_set_anon_rmap - setup new anonymous rmap | 503 | * page_set_anon_rmap - setup new anonymous rmap |
| 439 | * @page: the page to add the mapping to | 504 | * @page: the page to add the mapping to |
| @@ -455,7 +520,7 @@ static void __page_set_anon_rmap(struct page *page, | |||
| 455 | * nr_mapped state can be updated without turning off | 520 | * nr_mapped state can be updated without turning off |
| 456 | * interrupts because it is not modified via interrupt. | 521 | * interrupts because it is not modified via interrupt. |
| 457 | */ | 522 | */ |
| 458 | __inc_page_state(nr_mapped); | 523 | __inc_zone_page_state(page, NR_ANON_PAGES); |
| 459 | } | 524 | } |
| 460 | 525 | ||
| 461 | /** | 526 | /** |
| @@ -499,7 +564,7 @@ void page_add_new_anon_rmap(struct page *page, | |||
| 499 | void page_add_file_rmap(struct page *page) | 564 | void page_add_file_rmap(struct page *page) |
| 500 | { | 565 | { |
| 501 | if (atomic_inc_and_test(&page->_mapcount)) | 566 | if (atomic_inc_and_test(&page->_mapcount)) |
| 502 | __inc_page_state(nr_mapped); | 567 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
| 503 | } | 568 | } |
| 504 | 569 | ||
| 505 | /** | 570 | /** |
| @@ -531,7 +596,8 @@ void page_remove_rmap(struct page *page) | |||
| 531 | */ | 596 | */ |
| 532 | if (page_test_and_clear_dirty(page)) | 597 | if (page_test_and_clear_dirty(page)) |
| 533 | set_page_dirty(page); | 598 | set_page_dirty(page); |
| 534 | __dec_page_state(nr_mapped); | 599 | __dec_zone_page_state(page, |
| 600 | PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); | ||
| 535 | } | 601 | } |
| 536 | } | 602 | } |
| 537 | 603 | ||
| @@ -562,9 +628,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 562 | * If it's recently referenced (perhaps page_referenced | 628 | * If it's recently referenced (perhaps page_referenced |
| 563 | * skipped over this mm) then we should reactivate it. | 629 | * skipped over this mm) then we should reactivate it. |
| 564 | */ | 630 | */ |
| 565 | if ((vma->vm_flags & VM_LOCKED) || | 631 | if (!migration && ((vma->vm_flags & VM_LOCKED) || |
| 566 | (ptep_clear_flush_young(vma, address, pte) | 632 | (ptep_clear_flush_young(vma, address, pte)))) { |
| 567 | && !migration)) { | ||
| 568 | ret = SWAP_FAIL; | 633 | ret = SWAP_FAIL; |
| 569 | goto out_unmap; | 634 | goto out_unmap; |
| 570 | } | 635 | } |
| @@ -771,7 +836,7 @@ static int try_to_unmap_file(struct page *page, int migration) | |||
| 771 | 836 | ||
| 772 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 837 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
| 773 | shared.vm_set.list) { | 838 | shared.vm_set.list) { |
| 774 | if (vma->vm_flags & VM_LOCKED) | 839 | if ((vma->vm_flags & VM_LOCKED) && !migration) |
| 775 | continue; | 840 | continue; |
| 776 | cursor = (unsigned long) vma->vm_private_data; | 841 | cursor = (unsigned long) vma->vm_private_data; |
| 777 | if (cursor > max_nl_cursor) | 842 | if (cursor > max_nl_cursor) |
| @@ -805,7 +870,7 @@ static int try_to_unmap_file(struct page *page, int migration) | |||
| 805 | do { | 870 | do { |
| 806 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 871 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
| 807 | shared.vm_set.list) { | 872 | shared.vm_set.list) { |
| 808 | if (vma->vm_flags & VM_LOCKED) | 873 | if ((vma->vm_flags & VM_LOCKED) && !migration) |
| 809 | continue; | 874 | continue; |
| 810 | cursor = (unsigned long) vma->vm_private_data; | 875 | cursor = (unsigned long) vma->vm_private_data; |
| 811 | while ( cursor < max_nl_cursor && | 876 | while ( cursor < max_nl_cursor && |
