diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/swapfile.c | 15 |
2 files changed, 13 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c index 3922ffcf3dff..8f471edcb985 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1861,7 +1861,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1861 | } | 1861 | } |
1862 | page_cache_release(old_page); | 1862 | page_cache_release(old_page); |
1863 | } | 1863 | } |
1864 | reuse = can_share_swap_page(old_page); | 1864 | reuse = reuse_swap_page(old_page); |
1865 | unlock_page(old_page); | 1865 | unlock_page(old_page); |
1866 | } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == | 1866 | } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == |
1867 | (VM_WRITE|VM_SHARED))) { | 1867 | (VM_WRITE|VM_SHARED))) { |
@@ -2392,7 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2392 | 2392 | ||
2393 | inc_mm_counter(mm, anon_rss); | 2393 | inc_mm_counter(mm, anon_rss); |
2394 | pte = mk_pte(page, vma->vm_page_prot); | 2394 | pte = mk_pte(page, vma->vm_page_prot); |
2395 | if (write_access && can_share_swap_page(page)) { | 2395 | if (write_access && reuse_swap_page(page)) { |
2396 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); | 2396 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); |
2397 | write_access = 0; | 2397 | write_access = 0; |
2398 | } | 2398 | } |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 214e90b94946..bfd4ee59cb88 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -326,17 +326,24 @@ static inline int page_swapcount(struct page *page) | |||
326 | } | 326 | } |
327 | 327 | ||
328 | /* | 328 | /* |
329 | * We can use this swap cache entry directly | 329 | * We can write to an anon page without COW if there are no other references |
330 | * if there are no other references to it. | 330 | * to it. And as a side-effect, free up its swap: because the old content |
331 | * on disk will never be read, and seeking back there to write new content | ||
332 | * later would only waste time away from clustering. | ||
331 | */ | 333 | */ |
332 | int can_share_swap_page(struct page *page) | 334 | int reuse_swap_page(struct page *page) |
333 | { | 335 | { |
334 | int count; | 336 | int count; |
335 | 337 | ||
336 | VM_BUG_ON(!PageLocked(page)); | 338 | VM_BUG_ON(!PageLocked(page)); |
337 | count = page_mapcount(page); | 339 | count = page_mapcount(page); |
338 | if (count <= 1 && PageSwapCache(page)) | 340 | if (count <= 1 && PageSwapCache(page)) { |
339 | count += page_swapcount(page); | 341 | count += page_swapcount(page); |
342 | if (count == 1 && !PageWriteback(page)) { | ||
343 | delete_from_swap_cache(page); | ||
344 | SetPageDirty(page); | ||
345 | } | ||
346 | } | ||
340 | return count == 1; | 347 | return count == 1; |
341 | } | 348 | } |
342 | 349 | ||