aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2011-01-13 18:46:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:35 -0500
commit72ddc8f72270758951ccefb7d190f364d20215ab (patch)
tree11772272825f72aa3f32c0f9be5cf35155cf1441 /mm
parentb009c024ff0059e293c1937516f2defe56263650 (diff)
do_wp_page: clarify dirty_page handling
Reorganize the code so that dirty pages are handled closer to the place that makes them dirty (handling write fault into shared, writable VMAs). No behavior changes. Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Kosaki Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Theodore Tso <tytso@google.com> Cc: Michael Rubin <mrubin@google.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c72
1 files changed, 38 insertions, 34 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d0cc1c134a64..9144fae9a68b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2229,8 +2229,45 @@ reuse:
2229 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2229 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2230 if (ptep_set_access_flags(vma, address, page_table, entry,1)) 2230 if (ptep_set_access_flags(vma, address, page_table, entry,1))
2231 update_mmu_cache(vma, address, page_table); 2231 update_mmu_cache(vma, address, page_table);
2232 pte_unmap_unlock(page_table, ptl);
2232 ret |= VM_FAULT_WRITE; 2233 ret |= VM_FAULT_WRITE;
2233 goto unlock; 2234
2235 if (!dirty_page)
2236 return ret;
2237
2238 /*
2239 * Yes, Virginia, this is actually required to prevent a race
2240 * with clear_page_dirty_for_io() from clearing the page dirty
2241 * bit after it clear all dirty ptes, but before a racing
2242 * do_wp_page installs a dirty pte.
2243 *
2244 * do_no_page is protected similarly.
2245 */
2246 if (!page_mkwrite) {
2247 wait_on_page_locked(dirty_page);
2248 set_page_dirty_balance(dirty_page, page_mkwrite);
2249 }
2250 put_page(dirty_page);
2251 if (page_mkwrite) {
2252 struct address_space *mapping = dirty_page->mapping;
2253
2254 set_page_dirty(dirty_page);
2255 unlock_page(dirty_page);
2256 page_cache_release(dirty_page);
2257 if (mapping) {
2258 /*
2259 * Some device drivers do not set page.mapping
2260 * but still dirty their pages
2261 */
2262 balance_dirty_pages_ratelimited(mapping);
2263 }
2264 }
2265
2266 /* file_update_time outside page_lock */
2267 if (vma->vm_file)
2268 file_update_time(vma->vm_file);
2269
2270 return ret;
2234 } 2271 }
2235 2272
2236 /* 2273 /*
@@ -2336,39 +2373,6 @@ gotten:
2336 page_cache_release(old_page); 2373 page_cache_release(old_page);
2337unlock: 2374unlock:
2338 pte_unmap_unlock(page_table, ptl); 2375 pte_unmap_unlock(page_table, ptl);
2339 if (dirty_page) {
2340 /*
2341 * Yes, Virginia, this is actually required to prevent a race
2342 * with clear_page_dirty_for_io() from clearing the page dirty
2343 * bit after it clear all dirty ptes, but before a racing
2344 * do_wp_page installs a dirty pte.
2345 *
2346 * do_no_page is protected similarly.
2347 */
2348 if (!page_mkwrite) {
2349 wait_on_page_locked(dirty_page);
2350 set_page_dirty_balance(dirty_page, page_mkwrite);
2351 }
2352 put_page(dirty_page);
2353 if (page_mkwrite) {
2354 struct address_space *mapping = dirty_page->mapping;
2355
2356 set_page_dirty(dirty_page);
2357 unlock_page(dirty_page);
2358 page_cache_release(dirty_page);
2359 if (mapping) {
2360 /*
2361 * Some device drivers do not set page.mapping
2362 * but still dirty their pages
2363 */
2364 balance_dirty_pages_ratelimited(mapping);
2365 }
2366 }
2367
2368 /* file_update_time outside page_lock */
2369 if (vma->vm_file)
2370 file_update_time(vma->vm_file);
2371 }
2372 return ret; 2376 return ret;
2373oom_free_new: 2377oom_free_new:
2374 page_cache_release(new_page); 2378 page_cache_release(new_page);