aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c29
1 files changed, 11 insertions, 18 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 08ac5c7fa91f..450f5241b5a5 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -34,9 +34,8 @@
34 * anon_vma->lock 34 * anon_vma->lock
35 * mm->page_table_lock 35 * mm->page_table_lock
36 * zone->lru_lock (in mark_page_accessed) 36 * zone->lru_lock (in mark_page_accessed)
37 * swap_list_lock (in swap_free etc's swap_info_get) 37 * swap_lock (in swap_duplicate, swap_info_get)
38 * mmlist_lock (in mmput, drain_mmlist and others) 38 * mmlist_lock (in mmput, drain_mmlist and others)
39 * swap_device_lock (in swap_duplicate, swap_info_get)
40 * mapping->private_lock (in __set_page_dirty_buffers) 39 * mapping->private_lock (in __set_page_dirty_buffers)
41 * inode_lock (in set_page_dirty's __mark_inode_dirty) 40 * inode_lock (in set_page_dirty's __mark_inode_dirty)
42 * sb_lock (within inode_lock in fs/fs-writeback.c) 41 * sb_lock (within inode_lock in fs/fs-writeback.c)
@@ -290,8 +289,6 @@ static int page_referenced_one(struct page *page,
290 pte_t *pte; 289 pte_t *pte;
291 int referenced = 0; 290 int referenced = 0;
292 291
293 if (!get_mm_counter(mm, rss))
294 goto out;
295 address = vma_address(page, vma); 292 address = vma_address(page, vma);
296 if (address == -EFAULT) 293 if (address == -EFAULT)
297 goto out; 294 goto out;
@@ -442,22 +439,19 @@ int page_referenced(struct page *page, int is_locked, int ignore_token)
442void page_add_anon_rmap(struct page *page, 439void page_add_anon_rmap(struct page *page,
443 struct vm_area_struct *vma, unsigned long address) 440 struct vm_area_struct *vma, unsigned long address)
444{ 441{
445 struct anon_vma *anon_vma = vma->anon_vma;
446 pgoff_t index;
447
448 BUG_ON(PageReserved(page)); 442 BUG_ON(PageReserved(page));
449 BUG_ON(!anon_vma);
450 443
451 inc_mm_counter(vma->vm_mm, anon_rss); 444 inc_mm_counter(vma->vm_mm, anon_rss);
452 445
453 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
454 index = (address - vma->vm_start) >> PAGE_SHIFT;
455 index += vma->vm_pgoff;
456 index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
457
458 if (atomic_inc_and_test(&page->_mapcount)) { 446 if (atomic_inc_and_test(&page->_mapcount)) {
459 page->index = index; 447 struct anon_vma *anon_vma = vma->anon_vma;
448
449 BUG_ON(!anon_vma);
450 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
460 page->mapping = (struct address_space *) anon_vma; 451 page->mapping = (struct address_space *) anon_vma;
452
453 page->index = linear_page_index(vma, address);
454
461 inc_page_state(nr_mapped); 455 inc_page_state(nr_mapped);
462 } 456 }
463 /* else checking page index and mapping is racy */ 457 /* else checking page index and mapping is racy */
@@ -518,8 +512,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
518 pte_t pteval; 512 pte_t pteval;
519 int ret = SWAP_AGAIN; 513 int ret = SWAP_AGAIN;
520 514
521 if (!get_mm_counter(mm, rss))
522 goto out;
523 address = vma_address(page, vma); 515 address = vma_address(page, vma);
524 if (address == -EFAULT) 516 if (address == -EFAULT)
525 goto out; 517 goto out;
@@ -532,6 +524,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
532 * If the page is mlock()d, we cannot swap it out. 524 * If the page is mlock()d, we cannot swap it out.
533 * If it's recently referenced (perhaps page_referenced 525 * If it's recently referenced (perhaps page_referenced
534 * skipped over this mm) then we should reactivate it. 526 * skipped over this mm) then we should reactivate it.
527 *
528 * Pages belonging to VM_RESERVED regions should not happen here.
535 */ 529 */
536 if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || 530 if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
537 ptep_clear_flush_young(vma, address, pte)) { 531 ptep_clear_flush_young(vma, address, pte)) {
@@ -767,8 +761,7 @@ static int try_to_unmap_file(struct page *page)
767 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) 761 if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
768 continue; 762 continue;
769 cursor = (unsigned long) vma->vm_private_data; 763 cursor = (unsigned long) vma->vm_private_data;
770 while (get_mm_counter(vma->vm_mm, rss) && 764 while ( cursor < max_nl_cursor &&
771 cursor < max_nl_cursor &&
772 cursor < vma->vm_end - vma->vm_start) { 765 cursor < vma->vm_end - vma->vm_start) {
773 try_to_unmap_cluster(cursor, &mapcount, vma); 766 try_to_unmap_cluster(cursor, &mapcount, vma);
774 cursor += CLUSTER_SIZE; 767 cursor += CLUSTER_SIZE;