aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c65
1 files changed, 38 insertions, 27 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index ebee81688736..869aaa3206a2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -49,6 +49,7 @@
49#include <linux/swapops.h> 49#include <linux/swapops.h>
50#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/ksm.h>
52#include <linux/rmap.h> 53#include <linux/rmap.h>
53#include <linux/rcupdate.h> 54#include <linux/rcupdate.h>
54#include <linux/module.h> 55#include <linux/module.h>
@@ -336,9 +337,9 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
336 * Subfunctions of page_referenced: page_referenced_one called 337 * Subfunctions of page_referenced: page_referenced_one called
337 * repeatedly from either page_referenced_anon or page_referenced_file. 338 * repeatedly from either page_referenced_anon or page_referenced_file.
338 */ 339 */
339static int page_referenced_one(struct page *page, struct vm_area_struct *vma, 340int page_referenced_one(struct page *page, struct vm_area_struct *vma,
340 unsigned long address, unsigned int *mapcount, 341 unsigned long address, unsigned int *mapcount,
341 unsigned long *vm_flags) 342 unsigned long *vm_flags)
342{ 343{
343 struct mm_struct *mm = vma->vm_mm; 344 struct mm_struct *mm = vma->vm_mm;
344 pte_t *pte; 345 pte_t *pte;
@@ -507,28 +508,33 @@ int page_referenced(struct page *page,
507 unsigned long *vm_flags) 508 unsigned long *vm_flags)
508{ 509{
509 int referenced = 0; 510 int referenced = 0;
511 int we_locked = 0;
510 512
511 if (TestClearPageReferenced(page)) 513 if (TestClearPageReferenced(page))
512 referenced++; 514 referenced++;
513 515
514 *vm_flags = 0; 516 *vm_flags = 0;
515 if (page_mapped(page) && page_rmapping(page)) { 517 if (page_mapped(page) && page_rmapping(page)) {
516 if (PageAnon(page)) 518 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
519 we_locked = trylock_page(page);
520 if (!we_locked) {
521 referenced++;
522 goto out;
523 }
524 }
525 if (unlikely(PageKsm(page)))
526 referenced += page_referenced_ksm(page, mem_cont,
527 vm_flags);
528 else if (PageAnon(page))
517 referenced += page_referenced_anon(page, mem_cont, 529 referenced += page_referenced_anon(page, mem_cont,
518 vm_flags); 530 vm_flags);
519 else if (is_locked) 531 else if (page->mapping)
520 referenced += page_referenced_file(page, mem_cont, 532 referenced += page_referenced_file(page, mem_cont,
521 vm_flags); 533 vm_flags);
522 else if (!trylock_page(page)) 534 if (we_locked)
523 referenced++;
524 else {
525 if (page->mapping)
526 referenced += page_referenced_file(page,
527 mem_cont, vm_flags);
528 unlock_page(page); 535 unlock_page(page);
529 }
530 } 536 }
531 537out:
532 if (page_test_and_clear_young(page)) 538 if (page_test_and_clear_young(page))
533 referenced++; 539 referenced++;
534 540
@@ -620,14 +626,7 @@ static void __page_set_anon_rmap(struct page *page,
620 BUG_ON(!anon_vma); 626 BUG_ON(!anon_vma);
621 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 627 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
622 page->mapping = (struct address_space *) anon_vma; 628 page->mapping = (struct address_space *) anon_vma;
623
624 page->index = linear_page_index(vma, address); 629 page->index = linear_page_index(vma, address);
625
626 /*
627 * nr_mapped state can be updated without turning off
628 * interrupts because it is not modified via interrupt.
629 */
630 __inc_zone_page_state(page, NR_ANON_PAGES);
631} 630}
632 631
633/** 632/**
@@ -665,14 +664,21 @@ static void __page_check_anon_rmap(struct page *page,
665 * @vma: the vm area in which the mapping is added 664 * @vma: the vm area in which the mapping is added
666 * @address: the user virtual address mapped 665 * @address: the user virtual address mapped
667 * 666 *
668 * The caller needs to hold the pte lock and the page must be locked. 667 * The caller needs to hold the pte lock, and the page must be locked in
668 * the anon_vma case: to serialize mapping,index checking after setting.
669 */ 669 */
670void page_add_anon_rmap(struct page *page, 670void page_add_anon_rmap(struct page *page,
671 struct vm_area_struct *vma, unsigned long address) 671 struct vm_area_struct *vma, unsigned long address)
672{ 672{
673 int first = atomic_inc_and_test(&page->_mapcount);
674 if (first)
675 __inc_zone_page_state(page, NR_ANON_PAGES);
676 if (unlikely(PageKsm(page)))
677 return;
678
673 VM_BUG_ON(!PageLocked(page)); 679 VM_BUG_ON(!PageLocked(page));
674 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 680 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
675 if (atomic_inc_and_test(&page->_mapcount)) 681 if (first)
676 __page_set_anon_rmap(page, vma, address); 682 __page_set_anon_rmap(page, vma, address);
677 else 683 else
678 __page_check_anon_rmap(page, vma, address); 684 __page_check_anon_rmap(page, vma, address);
@@ -694,6 +700,7 @@ void page_add_new_anon_rmap(struct page *page,
694 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 700 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
695 SetPageSwapBacked(page); 701 SetPageSwapBacked(page);
696 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 702 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
703 __inc_zone_page_state(page, NR_ANON_PAGES);
697 __page_set_anon_rmap(page, vma, address); 704 __page_set_anon_rmap(page, vma, address);
698 if (page_evictable(page, vma)) 705 if (page_evictable(page, vma))
699 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 706 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -760,8 +767,8 @@ void page_remove_rmap(struct page *page)
760 * Subfunctions of try_to_unmap: try_to_unmap_one called 767 * Subfunctions of try_to_unmap: try_to_unmap_one called
761 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 768 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
762 */ 769 */
763static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 770int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
764 unsigned long address, enum ttu_flags flags) 771 unsigned long address, enum ttu_flags flags)
765{ 772{
766 struct mm_struct *mm = vma->vm_mm; 773 struct mm_struct *mm = vma->vm_mm;
767 pte_t *pte; 774 pte_t *pte;
@@ -1156,7 +1163,9 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1156 1163
1157 BUG_ON(!PageLocked(page)); 1164 BUG_ON(!PageLocked(page));
1158 1165
1159 if (PageAnon(page)) 1166 if (unlikely(PageKsm(page)))
1167 ret = try_to_unmap_ksm(page, flags);
1168 else if (PageAnon(page))
1160 ret = try_to_unmap_anon(page, flags); 1169 ret = try_to_unmap_anon(page, flags);
1161 else 1170 else
1162 ret = try_to_unmap_file(page, flags); 1171 ret = try_to_unmap_file(page, flags);
@@ -1177,15 +1186,17 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1177 * 1186 *
1178 * SWAP_AGAIN - no vma is holding page mlocked, or, 1187 * SWAP_AGAIN - no vma is holding page mlocked, or,
1179 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1188 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
1189 * SWAP_FAIL - page cannot be located at present
1180 * SWAP_MLOCK - page is now mlocked. 1190 * SWAP_MLOCK - page is now mlocked.
1181 */ 1191 */
1182int try_to_munlock(struct page *page) 1192int try_to_munlock(struct page *page)
1183{ 1193{
1184 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1194 VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1185 1195
1186 if (PageAnon(page)) 1196 if (unlikely(PageKsm(page)))
1197 return try_to_unmap_ksm(page, TTU_MUNLOCK);
1198 else if (PageAnon(page))
1187 return try_to_unmap_anon(page, TTU_MUNLOCK); 1199 return try_to_unmap_anon(page, TTU_MUNLOCK);
1188 else 1200 else
1189 return try_to_unmap_file(page, TTU_MUNLOCK); 1201 return try_to_unmap_file(page, TTU_MUNLOCK);
1190} 1202}
1191