diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-11-22 00:32:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-22 12:13:42 -0500 |
commit | 920fc356f58d0e455bdfa53451f1c58eb211a846 (patch) | |
tree | cb5fe90bc74ad9dd95191bb3c105aadd833197f2 /mm | |
parent | 101d2be7646b7dd1c367d50208a59b29fce61398 (diff) |
[PATCH] unpaged: COW on VM_UNPAGED
Remove the BUG_ON(vma->vm_flags & VM_UNPAGED) from do_wp_page, and let it do
Copy-On-Write without touching the VM_UNPAGED's page counts - but this is
incomplete, because the anonymous page it inserts will itself need to be
handled, here and in other functions - next patch.
We still don't copy the page if the pfn is invalid, because the
copy_user_highpage interface does not allow it. But that's not been a problem
in the past: can be added in later if the need arises.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 37 |
1 files changed, 25 insertions, 12 deletions
diff --git a/mm/memory.c b/mm/memory.c index ece04963158e..107b619cfb16 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1277,22 +1277,28 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1277 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 1277 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
1278 | spinlock_t *ptl, pte_t orig_pte) | 1278 | spinlock_t *ptl, pte_t orig_pte) |
1279 | { | 1279 | { |
1280 | struct page *old_page, *new_page; | 1280 | struct page *old_page, *src_page, *new_page; |
1281 | unsigned long pfn = pte_pfn(orig_pte); | 1281 | unsigned long pfn = pte_pfn(orig_pte); |
1282 | pte_t entry; | 1282 | pte_t entry; |
1283 | int ret = VM_FAULT_MINOR; | 1283 | int ret = VM_FAULT_MINOR; |
1284 | 1284 | ||
1285 | BUG_ON(vma->vm_flags & VM_UNPAGED); | ||
1286 | |||
1287 | if (unlikely(!pfn_valid(pfn))) { | 1285 | if (unlikely(!pfn_valid(pfn))) { |
1288 | /* | 1286 | /* |
1289 | * Page table corrupted: show pte and kill process. | 1287 | * Page table corrupted: show pte and kill process. |
1288 | * Or it's an attempt to COW an out-of-map VM_UNPAGED | ||
1289 | * entry, which copy_user_highpage does not support. | ||
1290 | */ | 1290 | */ |
1291 | print_bad_pte(vma, orig_pte, address); | 1291 | print_bad_pte(vma, orig_pte, address); |
1292 | ret = VM_FAULT_OOM; | 1292 | ret = VM_FAULT_OOM; |
1293 | goto unlock; | 1293 | goto unlock; |
1294 | } | 1294 | } |
1295 | old_page = pfn_to_page(pfn); | 1295 | old_page = pfn_to_page(pfn); |
1296 | src_page = old_page; | ||
1297 | |||
1298 | if (unlikely(vma->vm_flags & VM_UNPAGED)) { | ||
1299 | old_page = NULL; | ||
1300 | goto gotten; | ||
1301 | } | ||
1296 | 1302 | ||
1297 | if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { | 1303 | if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { |
1298 | int reuse = can_share_swap_page(old_page); | 1304 | int reuse = can_share_swap_page(old_page); |
@@ -1313,11 +1319,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1313 | * Ok, we need to copy. Oh, well.. | 1319 | * Ok, we need to copy. Oh, well.. |
1314 | */ | 1320 | */ |
1315 | page_cache_get(old_page); | 1321 | page_cache_get(old_page); |
1322 | gotten: | ||
1316 | pte_unmap_unlock(page_table, ptl); | 1323 | pte_unmap_unlock(page_table, ptl); |
1317 | 1324 | ||
1318 | if (unlikely(anon_vma_prepare(vma))) | 1325 | if (unlikely(anon_vma_prepare(vma))) |
1319 | goto oom; | 1326 | goto oom; |
1320 | if (old_page == ZERO_PAGE(address)) { | 1327 | if (src_page == ZERO_PAGE(address)) { |
1321 | new_page = alloc_zeroed_user_highpage(vma, address); | 1328 | new_page = alloc_zeroed_user_highpage(vma, address); |
1322 | if (!new_page) | 1329 | if (!new_page) |
1323 | goto oom; | 1330 | goto oom; |
@@ -1325,7 +1332,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1325 | new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); | 1332 | new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); |
1326 | if (!new_page) | 1333 | if (!new_page) |
1327 | goto oom; | 1334 | goto oom; |
1328 | copy_user_highpage(new_page, old_page, address); | 1335 | copy_user_highpage(new_page, src_page, address); |
1329 | } | 1336 | } |
1330 | 1337 | ||
1331 | /* | 1338 | /* |
@@ -1333,11 +1340,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1333 | */ | 1340 | */ |
1334 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); | 1341 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
1335 | if (likely(pte_same(*page_table, orig_pte))) { | 1342 | if (likely(pte_same(*page_table, orig_pte))) { |
1336 | page_remove_rmap(old_page); | 1343 | if (old_page) { |
1337 | if (!PageAnon(old_page)) { | 1344 | page_remove_rmap(old_page); |
1345 | if (!PageAnon(old_page)) { | ||
1346 | dec_mm_counter(mm, file_rss); | ||
1347 | inc_mm_counter(mm, anon_rss); | ||
1348 | } | ||
1349 | } else | ||
1338 | inc_mm_counter(mm, anon_rss); | 1350 | inc_mm_counter(mm, anon_rss); |
1339 | dec_mm_counter(mm, file_rss); | ||
1340 | } | ||
1341 | flush_cache_page(vma, address, pfn); | 1351 | flush_cache_page(vma, address, pfn); |
1342 | entry = mk_pte(new_page, vma->vm_page_prot); | 1352 | entry = mk_pte(new_page, vma->vm_page_prot); |
1343 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 1353 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
@@ -1351,13 +1361,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1351 | new_page = old_page; | 1361 | new_page = old_page; |
1352 | ret |= VM_FAULT_WRITE; | 1362 | ret |= VM_FAULT_WRITE; |
1353 | } | 1363 | } |
1354 | page_cache_release(new_page); | 1364 | if (new_page) |
1355 | page_cache_release(old_page); | 1365 | page_cache_release(new_page); |
1366 | if (old_page) | ||
1367 | page_cache_release(old_page); | ||
1356 | unlock: | 1368 | unlock: |
1357 | pte_unmap_unlock(page_table, ptl); | 1369 | pte_unmap_unlock(page_table, ptl); |
1358 | return ret; | 1370 | return ret; |
1359 | oom: | 1371 | oom: |
1360 | page_cache_release(old_page); | 1372 | if (old_page) |
1373 | page_cache_release(old_page); | ||
1361 | return VM_FAULT_OOM; | 1374 | return VM_FAULT_OOM; |
1362 | } | 1375 | } |
1363 | 1376 | ||