aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2007-10-08 12:54:37 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-08 15:58:14 -0400
commita200ee182a016752464a12cb2e8762e48254bb09 (patch)
tree7b273f002625a4c368f7b20b144990f7f4f81df9 /mm/memory.c
parent3eb215de26e6e94bf5fed9cb77230c383b30e53b (diff)
mm: set_page_dirty_balance() vs ->page_mkwrite()
All the current page_mkwrite() implementations also set the page dirty. Which results in the set_page_dirty_balance() call to _not_ call balance, because the page is already found dirty. This allows us to dirty a _lot_ of pages without ever hitting balance_dirty_pages(). Not good (tm). Force a balance call if ->page_mkwrite() was successful. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index c0e7741a98de..f82b359b2745 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1639 struct page *old_page, *new_page; 1639 struct page *old_page, *new_page;
1640 pte_t entry; 1640 pte_t entry;
1641 int reuse = 0, ret = 0; 1641 int reuse = 0, ret = 0;
1642 int page_mkwrite = 0;
1642 struct page *dirty_page = NULL; 1643 struct page *dirty_page = NULL;
1643 1644
1644 old_page = vm_normal_page(vma, address, orig_pte); 1645 old_page = vm_normal_page(vma, address, orig_pte);
@@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1687 page_cache_release(old_page); 1688 page_cache_release(old_page);
1688 if (!pte_same(*page_table, orig_pte)) 1689 if (!pte_same(*page_table, orig_pte))
1689 goto unlock; 1690 goto unlock;
1691
1692 page_mkwrite = 1;
1690 } 1693 }
1691 dirty_page = old_page; 1694 dirty_page = old_page;
1692 get_page(dirty_page); 1695 get_page(dirty_page);
@@ -1774,7 +1777,7 @@ unlock:
1774 * do_no_page is protected similarly. 1777 * do_no_page is protected similarly.
1775 */ 1778 */
1776 wait_on_page_locked(dirty_page); 1779 wait_on_page_locked(dirty_page);
1777 set_page_dirty_balance(dirty_page); 1780 set_page_dirty_balance(dirty_page, page_mkwrite);
1778 put_page(dirty_page); 1781 put_page(dirty_page);
1779 } 1782 }
1780 return ret; 1783 return ret;
@@ -2322,6 +2325,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2322 struct page *dirty_page = NULL; 2325 struct page *dirty_page = NULL;
2323 struct vm_fault vmf; 2326 struct vm_fault vmf;
2324 int ret; 2327 int ret;
2328 int page_mkwrite = 0;
2325 2329
2326 vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2330 vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2327 vmf.pgoff = pgoff; 2331 vmf.pgoff = pgoff;
@@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2398 anon = 1; /* no anon but release vmf.page */ 2402 anon = 1; /* no anon but release vmf.page */
2399 goto out; 2403 goto out;
2400 } 2404 }
2405 page_mkwrite = 1;
2401 } 2406 }
2402 } 2407 }
2403 2408
@@ -2453,7 +2458,7 @@ out_unlocked:
2453 if (anon) 2458 if (anon)
2454 page_cache_release(vmf.page); 2459 page_cache_release(vmf.page);
2455 else if (dirty_page) { 2460 else if (dirty_page) {
2456 set_page_dirty_balance(dirty_page); 2461 set_page_dirty_balance(dirty_page, page_mkwrite);
2457 put_page(dirty_page); 2462 put_page(dirty_page);
2458 } 2463 }
2459 2464