diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-08-14 23:10:00 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-08-14 23:10:00 -0400 |
commit | 4c0e176dd5e4c44dd60f398518f75eedbe1a65f3 (patch) | |
tree | 07aea7539f78f221c6fc535a94a07befa2afdb63 /mm/memory.c | |
parent | f241be74b803dcf9d70c9978292946370654320f (diff) | |
parent | 2ba84684e8cf6f980e4e95a2300f53a505eb794e (diff) |
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 48 |
1 files changed, 29 insertions, 19 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6fe77acbc1cd..e046b7e4b530 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -913,9 +913,13 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
913 | pud = pud_offset(pgd, pg); | 913 | pud = pud_offset(pgd, pg); |
914 | BUG_ON(pud_none(*pud)); | 914 | BUG_ON(pud_none(*pud)); |
915 | pmd = pmd_offset(pud, pg); | 915 | pmd = pmd_offset(pud, pg); |
916 | BUG_ON(pmd_none(*pmd)); | 916 | if (pmd_none(*pmd)) |
917 | return i ? : -EFAULT; | ||
917 | pte = pte_offset_map(pmd, pg); | 918 | pte = pte_offset_map(pmd, pg); |
918 | BUG_ON(pte_none(*pte)); | 919 | if (pte_none(*pte)) { |
920 | pte_unmap(pte); | ||
921 | return i ? : -EFAULT; | ||
922 | } | ||
919 | if (pages) { | 923 | if (pages) { |
920 | pages[i] = pte_page(*pte); | 924 | pages[i] = pte_page(*pte); |
921 | get_page(pages[i]); | 925 | get_page(pages[i]); |
@@ -940,11 +944,13 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
940 | } | 944 | } |
941 | spin_lock(&mm->page_table_lock); | 945 | spin_lock(&mm->page_table_lock); |
942 | do { | 946 | do { |
947 | int write_access = write; | ||
943 | struct page *page; | 948 | struct page *page; |
944 | int lookup_write = write; | ||
945 | 949 | ||
946 | cond_resched_lock(&mm->page_table_lock); | 950 | cond_resched_lock(&mm->page_table_lock); |
947 | while (!(page = follow_page(mm, start, lookup_write))) { | 951 | while (!(page = follow_page(mm, start, write_access))) { |
952 | int ret; | ||
953 | |||
948 | /* | 954 | /* |
949 | * Shortcut for anonymous pages. We don't want | 955 | * Shortcut for anonymous pages. We don't want |
950 | * to force the creation of pages tables for | 956 | * to force the creation of pages tables for |
@@ -952,13 +958,23 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
952 | * nobody touched so far. This is important | 958 | * nobody touched so far. This is important |
953 | * for doing a core dump for these mappings. | 959 | * for doing a core dump for these mappings. |
954 | */ | 960 | */ |
955 | if (!lookup_write && | 961 | if (!write && untouched_anonymous_page(mm,vma,start)) { |
956 | untouched_anonymous_page(mm,vma,start)) { | ||
957 | page = ZERO_PAGE(start); | 962 | page = ZERO_PAGE(start); |
958 | break; | 963 | break; |
959 | } | 964 | } |
960 | spin_unlock(&mm->page_table_lock); | 965 | spin_unlock(&mm->page_table_lock); |
961 | switch (handle_mm_fault(mm,vma,start,write)) { | 966 | ret = __handle_mm_fault(mm, vma, start, write_access); |
967 | |||
968 | /* | ||
969 | * The VM_FAULT_WRITE bit tells us that do_wp_page has | ||
970 | * broken COW when necessary, even if maybe_mkwrite | ||
971 | * decided not to set pte_write. We can thus safely do | ||
972 | * subsequent page lookups as if they were reads. | ||
973 | */ | ||
974 | if (ret & VM_FAULT_WRITE) | ||
975 | write_access = 0; | ||
976 | |||
977 | switch (ret & ~VM_FAULT_WRITE) { | ||
962 | case VM_FAULT_MINOR: | 978 | case VM_FAULT_MINOR: |
963 | tsk->min_flt++; | 979 | tsk->min_flt++; |
964 | break; | 980 | break; |
@@ -972,14 +988,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
972 | default: | 988 | default: |
973 | BUG(); | 989 | BUG(); |
974 | } | 990 | } |
975 | /* | ||
976 | * Now that we have performed a write fault | ||
977 | * and surely no longer have a shared page we | ||
978 | * shouldn't write, we shouldn't ignore an | ||
979 | * unwritable page in the page table if | ||
980 | * we are forcing write access. | ||
981 | */ | ||
982 | lookup_write = write && !force; | ||
983 | spin_lock(&mm->page_table_lock); | 991 | spin_lock(&mm->page_table_lock); |
984 | } | 992 | } |
985 | if (pages) { | 993 | if (pages) { |
@@ -1229,6 +1237,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, | |||
1229 | struct page *old_page, *new_page; | 1237 | struct page *old_page, *new_page; |
1230 | unsigned long pfn = pte_pfn(pte); | 1238 | unsigned long pfn = pte_pfn(pte); |
1231 | pte_t entry; | 1239 | pte_t entry; |
1240 | int ret; | ||
1232 | 1241 | ||
1233 | if (unlikely(!pfn_valid(pfn))) { | 1242 | if (unlikely(!pfn_valid(pfn))) { |
1234 | /* | 1243 | /* |
@@ -1256,7 +1265,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, | |||
1256 | lazy_mmu_prot_update(entry); | 1265 | lazy_mmu_prot_update(entry); |
1257 | pte_unmap(page_table); | 1266 | pte_unmap(page_table); |
1258 | spin_unlock(&mm->page_table_lock); | 1267 | spin_unlock(&mm->page_table_lock); |
1259 | return VM_FAULT_MINOR; | 1268 | return VM_FAULT_MINOR|VM_FAULT_WRITE; |
1260 | } | 1269 | } |
1261 | } | 1270 | } |
1262 | pte_unmap(page_table); | 1271 | pte_unmap(page_table); |
@@ -1283,6 +1292,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, | |||
1283 | /* | 1292 | /* |
1284 | * Re-check the pte - we dropped the lock | 1293 | * Re-check the pte - we dropped the lock |
1285 | */ | 1294 | */ |
1295 | ret = VM_FAULT_MINOR; | ||
1286 | spin_lock(&mm->page_table_lock); | 1296 | spin_lock(&mm->page_table_lock); |
1287 | page_table = pte_offset_map(pmd, address); | 1297 | page_table = pte_offset_map(pmd, address); |
1288 | if (likely(pte_same(*page_table, pte))) { | 1298 | if (likely(pte_same(*page_table, pte))) { |
@@ -1299,12 +1309,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, | |||
1299 | 1309 | ||
1300 | /* Free the old page.. */ | 1310 | /* Free the old page.. */ |
1301 | new_page = old_page; | 1311 | new_page = old_page; |
1312 | ret |= VM_FAULT_WRITE; | ||
1302 | } | 1313 | } |
1303 | pte_unmap(page_table); | 1314 | pte_unmap(page_table); |
1304 | page_cache_release(new_page); | 1315 | page_cache_release(new_page); |
1305 | page_cache_release(old_page); | 1316 | page_cache_release(old_page); |
1306 | spin_unlock(&mm->page_table_lock); | 1317 | spin_unlock(&mm->page_table_lock); |
1307 | return VM_FAULT_MINOR; | 1318 | return ret; |
1308 | 1319 | ||
1309 | no_new_page: | 1320 | no_new_page: |
1310 | page_cache_release(old_page); | 1321 | page_cache_release(old_page); |
@@ -1996,7 +2007,6 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
1996 | if (write_access) { | 2007 | if (write_access) { |
1997 | if (!pte_write(entry)) | 2008 | if (!pte_write(entry)) |
1998 | return do_wp_page(mm, vma, address, pte, pmd, entry); | 2009 | return do_wp_page(mm, vma, address, pte, pmd, entry); |
1999 | |||
2000 | entry = pte_mkdirty(entry); | 2010 | entry = pte_mkdirty(entry); |
2001 | } | 2011 | } |
2002 | entry = pte_mkyoung(entry); | 2012 | entry = pte_mkyoung(entry); |
@@ -2011,7 +2021,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
2011 | /* | 2021 | /* |
2012 | * By the time we get here, we already hold the mm semaphore | 2022 | * By the time we get here, we already hold the mm semaphore |
2013 | */ | 2023 | */ |
2014 | int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, | 2024 | int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, |
2015 | unsigned long address, int write_access) | 2025 | unsigned long address, int write_access) |
2016 | { | 2026 | { |
2017 | pgd_t *pgd; | 2027 | pgd_t *pgd; |