aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-06-23 06:26:22 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-06-23 06:26:22 -0400
commit3357d4c75f1fb67e7304998c4ad4e9a9fed66fa4 (patch)
treeceba46966a5a1112a05d257d8ecb25ae5eee95e0 /mm/memory.c
parent364f6c717deef4a3ac4982e670fa9846b43cd060 (diff)
parentee98689be1b054897ff17655008c3048fe88be94 (diff)
Automatic merge with /usr/src/ntfs-2.6.git.
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c57
1 files changed, 16 insertions, 41 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d209f745db7f..da91b7bf9986 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -840,23 +840,8 @@ check_user_page_readable(struct mm_struct *mm, unsigned long address)
840{ 840{
841 return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL; 841 return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL;
842} 842}
843
844EXPORT_SYMBOL(check_user_page_readable); 843EXPORT_SYMBOL(check_user_page_readable);
845 844
846/*
847 * Given a physical address, is there a useful struct page pointing to
848 * it? This may become more complex in the future if we start dealing
849 * with IO-aperture pages for direct-IO.
850 */
851
852static inline struct page *get_page_map(struct page *page)
853{
854 if (!pfn_valid(page_to_pfn(page)))
855 return NULL;
856 return page;
857}
858
859
860static inline int 845static inline int
861untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, 846untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
862 unsigned long address) 847 unsigned long address)
@@ -887,7 +872,6 @@ untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
887 return 0; 872 return 0;
888} 873}
889 874
890
891int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 875int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
892 unsigned long start, int len, int write, int force, 876 unsigned long start, int len, int write, int force,
893 struct page **pages, struct vm_area_struct **vmas) 877 struct page **pages, struct vm_area_struct **vmas)
@@ -951,21 +935,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
951 } 935 }
952 spin_lock(&mm->page_table_lock); 936 spin_lock(&mm->page_table_lock);
953 do { 937 do {
954 struct page *map; 938 struct page *page;
955 int lookup_write = write; 939 int lookup_write = write;
956 940
957 cond_resched_lock(&mm->page_table_lock); 941 cond_resched_lock(&mm->page_table_lock);
958 while (!(map = follow_page(mm, start, lookup_write))) { 942 while (!(page = follow_page(mm, start, lookup_write))) {
959 /* 943 /*
960 * Shortcut for anonymous pages. We don't want 944 * Shortcut for anonymous pages. We don't want
961 * to force the creation of pages tables for 945 * to force the creation of pages tables for
962 * insanly big anonymously mapped areas that 946 * insanely big anonymously mapped areas that
963 * nobody touched so far. This is important 947 * nobody touched so far. This is important
964 * for doing a core dump for these mappings. 948 * for doing a core dump for these mappings.
965 */ 949 */
966 if (!lookup_write && 950 if (!lookup_write &&
967 untouched_anonymous_page(mm,vma,start)) { 951 untouched_anonymous_page(mm,vma,start)) {
968 map = ZERO_PAGE(start); 952 page = ZERO_PAGE(start);
969 break; 953 break;
970 } 954 }
971 spin_unlock(&mm->page_table_lock); 955 spin_unlock(&mm->page_table_lock);
@@ -994,30 +978,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
994 spin_lock(&mm->page_table_lock); 978 spin_lock(&mm->page_table_lock);
995 } 979 }
996 if (pages) { 980 if (pages) {
997 pages[i] = get_page_map(map); 981 pages[i] = page;
998 if (!pages[i]) { 982 flush_dcache_page(page);
999 spin_unlock(&mm->page_table_lock); 983 if (!PageReserved(page))
1000 while (i--) 984 page_cache_get(page);
1001 page_cache_release(pages[i]);
1002 i = -EFAULT;
1003 goto out;
1004 }
1005 flush_dcache_page(pages[i]);
1006 if (!PageReserved(pages[i]))
1007 page_cache_get(pages[i]);
1008 } 985 }
1009 if (vmas) 986 if (vmas)
1010 vmas[i] = vma; 987 vmas[i] = vma;
1011 i++; 988 i++;
1012 start += PAGE_SIZE; 989 start += PAGE_SIZE;
1013 len--; 990 len--;
1014 } while(len && start < vma->vm_end); 991 } while (len && start < vma->vm_end);
1015 spin_unlock(&mm->page_table_lock); 992 spin_unlock(&mm->page_table_lock);
1016 } while(len); 993 } while (len);
1017out:
1018 return i; 994 return i;
1019} 995}
1020
1021EXPORT_SYMBOL(get_user_pages); 996EXPORT_SYMBOL(get_user_pages);
1022 997
1023static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, 998static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
@@ -1264,7 +1239,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
1264 } 1239 }
1265 old_page = pfn_to_page(pfn); 1240 old_page = pfn_to_page(pfn);
1266 1241
1267 if (!TestSetPageLocked(old_page)) { 1242 if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
1268 int reuse = can_share_swap_page(old_page); 1243 int reuse = can_share_swap_page(old_page);
1269 unlock_page(old_page); 1244 unlock_page(old_page);
1270 if (reuse) { 1245 if (reuse) {
@@ -1711,10 +1686,6 @@ static int do_swap_page(struct mm_struct * mm,
1711 } 1686 }
1712 1687
1713 /* The page isn't present yet, go ahead with the fault. */ 1688 /* The page isn't present yet, go ahead with the fault. */
1714
1715 swap_free(entry);
1716 if (vm_swap_full())
1717 remove_exclusive_swap_page(page);
1718 1689
1719 inc_mm_counter(mm, rss); 1690 inc_mm_counter(mm, rss);
1720 pte = mk_pte(page, vma->vm_page_prot); 1691 pte = mk_pte(page, vma->vm_page_prot);
@@ -1722,12 +1693,16 @@ static int do_swap_page(struct mm_struct * mm,
1722 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 1693 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
1723 write_access = 0; 1694 write_access = 0;
1724 } 1695 }
1725 unlock_page(page);
1726 1696
1727 flush_icache_page(vma, page); 1697 flush_icache_page(vma, page);
1728 set_pte_at(mm, address, page_table, pte); 1698 set_pte_at(mm, address, page_table, pte);
1729 page_add_anon_rmap(page, vma, address); 1699 page_add_anon_rmap(page, vma, address);
1730 1700
1701 swap_free(entry);
1702 if (vm_swap_full())
1703 remove_exclusive_swap_page(page);
1704 unlock_page(page);
1705
1731 if (write_access) { 1706 if (write_access) {
1732 if (do_wp_page(mm, vma, address, 1707 if (do_wp_page(mm, vma, address,
1733 page_table, pmd, pte) == VM_FAULT_OOM) 1708 page_table, pmd, pte) == VM_FAULT_OOM)