diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 60 |
1 files changed, 26 insertions, 34 deletions
@@ -47,9 +47,9 @@ | |||
47 | #include <linux/rmap.h> | 47 | #include <linux/rmap.h> |
48 | #include <linux/rcupdate.h> | 48 | #include <linux/rcupdate.h> |
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/kallsyms.h> | ||
51 | #include <linux/memcontrol.h> | 50 | #include <linux/memcontrol.h> |
52 | #include <linux/mmu_notifier.h> | 51 | #include <linux/mmu_notifier.h> |
52 | #include <linux/migrate.h> | ||
53 | 53 | ||
54 | #include <asm/tlbflush.h> | 54 | #include <asm/tlbflush.h> |
55 | 55 | ||
@@ -191,7 +191,7 @@ void __init anon_vma_init(void) | |||
191 | * Getting a lock on a stable anon_vma from a page off the LRU is | 191 | * Getting a lock on a stable anon_vma from a page off the LRU is |
192 | * tricky: page_lock_anon_vma rely on RCU to guard against the races. | 192 | * tricky: page_lock_anon_vma rely on RCU to guard against the races. |
193 | */ | 193 | */ |
194 | struct anon_vma *page_lock_anon_vma(struct page *page) | 194 | static struct anon_vma *page_lock_anon_vma(struct page *page) |
195 | { | 195 | { |
196 | struct anon_vma *anon_vma; | 196 | struct anon_vma *anon_vma; |
197 | unsigned long anon_mapping; | 197 | unsigned long anon_mapping; |
@@ -211,7 +211,7 @@ out: | |||
211 | return NULL; | 211 | return NULL; |
212 | } | 212 | } |
213 | 213 | ||
214 | void page_unlock_anon_vma(struct anon_vma *anon_vma) | 214 | static void page_unlock_anon_vma(struct anon_vma *anon_vma) |
215 | { | 215 | { |
216 | spin_unlock(&anon_vma->lock); | 216 | spin_unlock(&anon_vma->lock); |
217 | rcu_read_unlock(); | 217 | rcu_read_unlock(); |
@@ -359,8 +359,17 @@ static int page_referenced_one(struct page *page, | |||
359 | goto out_unmap; | 359 | goto out_unmap; |
360 | } | 360 | } |
361 | 361 | ||
362 | if (ptep_clear_flush_young_notify(vma, address, pte)) | 362 | if (ptep_clear_flush_young_notify(vma, address, pte)) { |
363 | referenced++; | 363 | /* |
364 | * Don't treat a reference through a sequentially read | ||
365 | * mapping as such. If the page has been used in | ||
366 | * another mapping, we will catch it; if this other | ||
367 | * mapping is already gone, the unmap path will have | ||
368 | * set PG_referenced or activated the page. | ||
369 | */ | ||
370 | if (likely(!VM_SequentialReadHint(vma))) | ||
371 | referenced++; | ||
372 | } | ||
364 | 373 | ||
365 | /* Pretend the page is referenced if the task has the | 374 | /* Pretend the page is referenced if the task has the |
366 | swap token and is in the middle of a page fault. */ | 375 | swap token and is in the middle of a page fault. */ |
@@ -661,9 +670,14 @@ void page_add_anon_rmap(struct page *page, | |||
661 | void page_add_new_anon_rmap(struct page *page, | 670 | void page_add_new_anon_rmap(struct page *page, |
662 | struct vm_area_struct *vma, unsigned long address) | 671 | struct vm_area_struct *vma, unsigned long address) |
663 | { | 672 | { |
664 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 673 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
665 | atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ | 674 | SetPageSwapBacked(page); |
675 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ | ||
666 | __page_set_anon_rmap(page, vma, address); | 676 | __page_set_anon_rmap(page, vma, address); |
677 | if (page_evictable(page, vma)) | ||
678 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); | ||
679 | else | ||
680 | add_page_to_unevictable_list(page); | ||
667 | } | 681 | } |
668 | 682 | ||
669 | /** | 683 | /** |
@@ -693,7 +707,6 @@ void page_add_file_rmap(struct page *page) | |||
693 | */ | 707 | */ |
694 | void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) | 708 | void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) |
695 | { | 709 | { |
696 | BUG_ON(page_mapcount(page) == 0); | ||
697 | if (PageAnon(page)) | 710 | if (PageAnon(page)) |
698 | __page_check_anon_rmap(page, vma, address); | 711 | __page_check_anon_rmap(page, vma, address); |
699 | atomic_inc(&page->_mapcount); | 712 | atomic_inc(&page->_mapcount); |
@@ -703,28 +716,12 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long | |||
703 | /** | 716 | /** |
704 | * page_remove_rmap - take down pte mapping from a page | 717 | * page_remove_rmap - take down pte mapping from a page |
705 | * @page: page to remove mapping from | 718 | * @page: page to remove mapping from |
706 | * @vma: the vm area in which the mapping is removed | ||
707 | * | 719 | * |
708 | * The caller needs to hold the pte lock. | 720 | * The caller needs to hold the pte lock. |
709 | */ | 721 | */ |
710 | void page_remove_rmap(struct page *page, struct vm_area_struct *vma) | 722 | void page_remove_rmap(struct page *page) |
711 | { | 723 | { |
712 | if (atomic_add_negative(-1, &page->_mapcount)) { | 724 | if (atomic_add_negative(-1, &page->_mapcount)) { |
713 | if (unlikely(page_mapcount(page) < 0)) { | ||
714 | printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); | ||
715 | printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); | ||
716 | printk (KERN_EMERG " page->flags = %lx\n", page->flags); | ||
717 | printk (KERN_EMERG " page->count = %x\n", page_count(page)); | ||
718 | printk (KERN_EMERG " page->mapping = %p\n", page->mapping); | ||
719 | print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); | ||
720 | if (vma->vm_ops) { | ||
721 | print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault); | ||
722 | } | ||
723 | if (vma->vm_file && vma->vm_file->f_op) | ||
724 | print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); | ||
725 | BUG(); | ||
726 | } | ||
727 | |||
728 | /* | 725 | /* |
729 | * Now that the last pte has gone, s390 must transfer dirty | 726 | * Now that the last pte has gone, s390 must transfer dirty |
730 | * flag from storage key to struct page. We can usually skip | 727 | * flag from storage key to struct page. We can usually skip |
@@ -818,8 +815,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
818 | spin_unlock(&mmlist_lock); | 815 | spin_unlock(&mmlist_lock); |
819 | } | 816 | } |
820 | dec_mm_counter(mm, anon_rss); | 817 | dec_mm_counter(mm, anon_rss); |
821 | #ifdef CONFIG_MIGRATION | 818 | } else if (PAGE_MIGRATION) { |
822 | } else { | ||
823 | /* | 819 | /* |
824 | * Store the pfn of the page in a special migration | 820 | * Store the pfn of the page in a special migration |
825 | * pte. do_swap_page() will wait until the migration | 821 | * pte. do_swap_page() will wait until the migration |
@@ -827,23 +823,19 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
827 | */ | 823 | */ |
828 | BUG_ON(!migration); | 824 | BUG_ON(!migration); |
829 | entry = make_migration_entry(page, pte_write(pteval)); | 825 | entry = make_migration_entry(page, pte_write(pteval)); |
830 | #endif | ||
831 | } | 826 | } |
832 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 827 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); |
833 | BUG_ON(pte_file(*pte)); | 828 | BUG_ON(pte_file(*pte)); |
834 | } else | 829 | } else if (PAGE_MIGRATION && migration) { |
835 | #ifdef CONFIG_MIGRATION | ||
836 | if (migration) { | ||
837 | /* Establish migration entry for a file page */ | 830 | /* Establish migration entry for a file page */ |
838 | swp_entry_t entry; | 831 | swp_entry_t entry; |
839 | entry = make_migration_entry(page, pte_write(pteval)); | 832 | entry = make_migration_entry(page, pte_write(pteval)); |
840 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 833 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); |
841 | } else | 834 | } else |
842 | #endif | ||
843 | dec_mm_counter(mm, file_rss); | 835 | dec_mm_counter(mm, file_rss); |
844 | 836 | ||
845 | 837 | ||
846 | page_remove_rmap(page, vma); | 838 | page_remove_rmap(page); |
847 | page_cache_release(page); | 839 | page_cache_release(page); |
848 | 840 | ||
849 | out_unmap: | 841 | out_unmap: |
@@ -958,7 +950,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
958 | if (pte_dirty(pteval)) | 950 | if (pte_dirty(pteval)) |
959 | set_page_dirty(page); | 951 | set_page_dirty(page); |
960 | 952 | ||
961 | page_remove_rmap(page, vma); | 953 | page_remove_rmap(page); |
962 | page_cache_release(page); | 954 | page_cache_release(page); |
963 | dec_mm_counter(mm, file_rss); | 955 | dec_mm_counter(mm, file_rss); |
964 | (*mapcount)--; | 956 | (*mapcount)--; |