diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-06-21 20:15:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-21 21:46:21 -0400 |
commit | 08ef472937e918875a82fd350d3de138aac50414 (patch) | |
tree | 990a503a711b82f94ad78ea5794a46992db05228 /mm | |
parent | 7c2f3fda5666c280bcd00ac3b86963270b23e796 (diff) |
[PATCH] get_user_pages: kill get_page_map
Since its birth, get_user_pages has been calling a misguided get_page_map
function. follow_page has already returned NULL if the pfn is invalid, we
cannot reach an invalid pfn from a validated struct page.
Remove get_page_map, and the messy rewind in get_user_pages to cope with
its failure. Oh, and could we please call that "struct page *page" like
everywhere else, instead of "struct page *map"?
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 45 |
1 files changed, 10 insertions, 35 deletions
diff --git a/mm/memory.c b/mm/memory.c index d209f745db7f..b8846cf2358a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -840,23 +840,8 @@ check_user_page_readable(struct mm_struct *mm, unsigned long address) | |||
840 | { | 840 | { |
841 | return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL; | 841 | return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL; |
842 | } | 842 | } |
843 | |||
844 | EXPORT_SYMBOL(check_user_page_readable); | 843 | EXPORT_SYMBOL(check_user_page_readable); |
845 | 844 | ||
846 | /* | ||
847 | * Given a physical address, is there a useful struct page pointing to | ||
848 | * it? This may become more complex in the future if we start dealing | ||
849 | * with IO-aperture pages for direct-IO. | ||
850 | */ | ||
851 | |||
852 | static inline struct page *get_page_map(struct page *page) | ||
853 | { | ||
854 | if (!pfn_valid(page_to_pfn(page))) | ||
855 | return NULL; | ||
856 | return page; | ||
857 | } | ||
858 | |||
859 | |||
860 | static inline int | 845 | static inline int |
861 | untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, | 846 | untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, |
862 | unsigned long address) | 847 | unsigned long address) |
@@ -887,7 +872,6 @@ untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, | |||
887 | return 0; | 872 | return 0; |
888 | } | 873 | } |
889 | 874 | ||
890 | |||
891 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 875 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
892 | unsigned long start, int len, int write, int force, | 876 | unsigned long start, int len, int write, int force, |
893 | struct page **pages, struct vm_area_struct **vmas) | 877 | struct page **pages, struct vm_area_struct **vmas) |
@@ -951,21 +935,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
951 | } | 935 | } |
952 | spin_lock(&mm->page_table_lock); | 936 | spin_lock(&mm->page_table_lock); |
953 | do { | 937 | do { |
954 | struct page *map; | 938 | struct page *page; |
955 | int lookup_write = write; | 939 | int lookup_write = write; |
956 | 940 | ||
957 | cond_resched_lock(&mm->page_table_lock); | 941 | cond_resched_lock(&mm->page_table_lock); |
958 | while (!(map = follow_page(mm, start, lookup_write))) { | 942 | while (!(page = follow_page(mm, start, lookup_write))) { |
959 | /* | 943 | /* |
960 | * Shortcut for anonymous pages. We don't want | 944 | * Shortcut for anonymous pages. We don't want |
961 | * to force the creation of pages tables for | 945 | * to force the creation of pages tables for |
962 | * insanly big anonymously mapped areas that | 946 | * insanely big anonymously mapped areas that |
963 | * nobody touched so far. This is important | 947 | * nobody touched so far. This is important |
964 | * for doing a core dump for these mappings. | 948 | * for doing a core dump for these mappings. |
965 | */ | 949 | */ |
966 | if (!lookup_write && | 950 | if (!lookup_write && |
967 | untouched_anonymous_page(mm,vma,start)) { | 951 | untouched_anonymous_page(mm,vma,start)) { |
968 | map = ZERO_PAGE(start); | 952 | page = ZERO_PAGE(start); |
969 | break; | 953 | break; |
970 | } | 954 | } |
971 | spin_unlock(&mm->page_table_lock); | 955 | spin_unlock(&mm->page_table_lock); |
@@ -994,30 +978,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
994 | spin_lock(&mm->page_table_lock); | 978 | spin_lock(&mm->page_table_lock); |
995 | } | 979 | } |
996 | if (pages) { | 980 | if (pages) { |
997 | pages[i] = get_page_map(map); | 981 | pages[i] = page; |
998 | if (!pages[i]) { | 982 | flush_dcache_page(page); |
999 | spin_unlock(&mm->page_table_lock); | 983 | if (!PageReserved(page)) |
1000 | while (i--) | 984 | page_cache_get(page); |
1001 | page_cache_release(pages[i]); | ||
1002 | i = -EFAULT; | ||
1003 | goto out; | ||
1004 | } | ||
1005 | flush_dcache_page(pages[i]); | ||
1006 | if (!PageReserved(pages[i])) | ||
1007 | page_cache_get(pages[i]); | ||
1008 | } | 985 | } |
1009 | if (vmas) | 986 | if (vmas) |
1010 | vmas[i] = vma; | 987 | vmas[i] = vma; |
1011 | i++; | 988 | i++; |
1012 | start += PAGE_SIZE; | 989 | start += PAGE_SIZE; |
1013 | len--; | 990 | len--; |
1014 | } while(len && start < vma->vm_end); | 991 | } while (len && start < vma->vm_end); |
1015 | spin_unlock(&mm->page_table_lock); | 992 | spin_unlock(&mm->page_table_lock); |
1016 | } while(len); | 993 | } while (len); |
1017 | out: | ||
1018 | return i; | 994 | return i; |
1019 | } | 995 | } |
1020 | |||
1021 | EXPORT_SYMBOL(get_user_pages); | 996 | EXPORT_SYMBOL(get_user_pages); |
1022 | 997 | ||
1023 | static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, | 998 | static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, |