diff options
author | Nick Piggin <npiggin@suse.de> | 2006-09-26 02:31:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:48 -0400 |
commit | db37648cd6ce9b828abd6d49aa3d269926ee7b7d (patch) | |
tree | a0155c7897f4706386d10c8718f98687bc357c82 /mm/filemap.c | |
parent | 28e4d965e6131ace1e813e93aebca89ac6b82dc1 (diff) |
[PATCH] mm: non syncing lock_page()
lock_page needs the caller to have a reference on the page->mapping inode
due to sync_page, ergo set_page_dirty_lock is obviously buggy according to
its comments.
Solve it by introducing a new lock_page_nosync which does not do a sync_page.
akpm: unpleasant solution to an unpleasant problem. If it goes wrong it could
cause great slowdowns while the lock_page() caller waits for kblockd to
perform the unplug. And if a filesystem has special sync_page() requirements
(none presently do), permanent hangs are possible.
otoh, set_page_dirty_lock() is usually (always?) called against userspace
pages. They are always up-to-date, so there shouldn't be any pending read I/O
against these pages.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index b9a60c43b61a..d5af1cab4268 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -488,6 +488,12 @@ struct page *page_cache_alloc_cold(struct address_space *x) | |||
488 | EXPORT_SYMBOL(page_cache_alloc_cold); | 488 | EXPORT_SYMBOL(page_cache_alloc_cold); |
489 | #endif | 489 | #endif |
490 | 490 | ||
491 | static int __sleep_on_page_lock(void *word) | ||
492 | { | ||
493 | io_schedule(); | ||
494 | return 0; | ||
495 | } | ||
496 | |||
491 | /* | 497 | /* |
492 | * In order to wait for pages to become available there must be | 498 | * In order to wait for pages to become available there must be |
493 | * waitqueues associated with pages. By using a hash table of | 499 | * waitqueues associated with pages. By using a hash table of |
@@ -577,6 +583,17 @@ void fastcall __lock_page(struct page *page) | |||
577 | } | 583 | } |
578 | EXPORT_SYMBOL(__lock_page); | 584 | EXPORT_SYMBOL(__lock_page); |
579 | 585 | ||
586 | /* | ||
587 | * Variant of lock_page that does not require the caller to hold a reference | ||
588 | * on the page's mapping. | ||
589 | */ | ||
590 | void fastcall __lock_page_nosync(struct page *page) | ||
591 | { | ||
592 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | ||
593 | __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, | ||
594 | TASK_UNINTERRUPTIBLE); | ||
595 | } | ||
596 | |||
580 | /** | 597 | /** |
581 | * find_get_page - find and get a page reference | 598 | * find_get_page - find and get a page reference |
582 | * @mapping: the address_space to search | 599 | * @mapping: the address_space to search |