aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c67
1 files changed, 6 insertions, 61 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 83a45d35468b..380776c2a9ac 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -155,45 +155,15 @@ void remove_from_page_cache(struct page *page)
155} 155}
156EXPORT_SYMBOL(remove_from_page_cache); 156EXPORT_SYMBOL(remove_from_page_cache);
157 157
158static int sync_page(void *word) 158static int sleep_on_page(void *word)
159{ 159{
160 struct address_space *mapping;
161 struct page *page;
162
163 page = container_of((unsigned long *)word, struct page, flags);
164
165 /*
166 * page_mapping() is being called without PG_locked held.
167 * Some knowledge of the state and use of the page is used to
168 * reduce the requirements down to a memory barrier.
169 * The danger here is of a stale page_mapping() return value
170 * indicating a struct address_space different from the one it's
171 * associated with when it is associated with one.
172 * After smp_mb(), it's either the correct page_mapping() for
173 * the page, or an old page_mapping() and the page's own
174 * page_mapping() has gone NULL.
175 * The ->sync_page() address_space operation must tolerate
176 * page_mapping() going NULL. By an amazing coincidence,
177 * this comes about because none of the users of the page
178 * in the ->sync_page() methods make essential use of the
179 * page_mapping(), merely passing the page down to the backing
180 * device's unplug functions when it's non-NULL, which in turn
181 * ignore it for all cases but swap, where only page_private(page) is
182 * of interest. When page_mapping() does go NULL, the entire
183 * call stack gracefully ignores the page and returns.
184 * -- wli
185 */
186 smp_mb();
187 mapping = page_mapping(page);
188 if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
189 mapping->a_ops->sync_page(page);
190 io_schedule(); 160 io_schedule();
191 return 0; 161 return 0;
192} 162}
193 163
194static int sync_page_killable(void *word) 164static int sleep_on_page_killable(void *word)
195{ 165{
196 sync_page(word); 166 sleep_on_page(word);
197 return fatal_signal_pending(current) ? -EINTR : 0; 167 return fatal_signal_pending(current) ? -EINTR : 0;
198} 168}
199 169
@@ -479,12 +449,6 @@ struct page *__page_cache_alloc(gfp_t gfp)
479EXPORT_SYMBOL(__page_cache_alloc); 449EXPORT_SYMBOL(__page_cache_alloc);
480#endif 450#endif
481 451
482static int __sleep_on_page_lock(void *word)
483{
484 io_schedule();
485 return 0;
486}
487
488/* 452/*
489 * In order to wait for pages to become available there must be 453 * In order to wait for pages to become available there must be
490 * waitqueues associated with pages. By using a hash table of 454 * waitqueues associated with pages. By using a hash table of
@@ -512,7 +476,7 @@ void wait_on_page_bit(struct page *page, int bit_nr)
512 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 476 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
513 477
514 if (test_bit(bit_nr, &page->flags)) 478 if (test_bit(bit_nr, &page->flags))
515 __wait_on_bit(page_waitqueue(page), &wait, sync_page, 479 __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
516 TASK_UNINTERRUPTIBLE); 480 TASK_UNINTERRUPTIBLE);
517} 481}
518EXPORT_SYMBOL(wait_on_page_bit); 482EXPORT_SYMBOL(wait_on_page_bit);
@@ -576,17 +540,12 @@ EXPORT_SYMBOL(end_page_writeback);
576/** 540/**
577 * __lock_page - get a lock on the page, assuming we need to sleep to get it 541 * __lock_page - get a lock on the page, assuming we need to sleep to get it
578 * @page: the page to lock 542 * @page: the page to lock
579 *
580 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
581 * random driver's requestfn sets TASK_RUNNING, we could busywait. However
582 * chances are that on the second loop, the block layer's plug list is empty,
583 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
584 */ 543 */
585void __lock_page(struct page *page) 544void __lock_page(struct page *page)
586{ 545{
587 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 546 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
588 547
589 __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, 548 __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
590 TASK_UNINTERRUPTIBLE); 549 TASK_UNINTERRUPTIBLE);
591} 550}
592EXPORT_SYMBOL(__lock_page); 551EXPORT_SYMBOL(__lock_page);
@@ -596,24 +555,10 @@ int __lock_page_killable(struct page *page)
596 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 555 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
597 556
598 return __wait_on_bit_lock(page_waitqueue(page), &wait, 557 return __wait_on_bit_lock(page_waitqueue(page), &wait,
599 sync_page_killable, TASK_KILLABLE); 558 sleep_on_page_killable, TASK_KILLABLE);
600} 559}
601EXPORT_SYMBOL_GPL(__lock_page_killable); 560EXPORT_SYMBOL_GPL(__lock_page_killable);
602 561
603/**
604 * __lock_page_nosync - get a lock on the page, without calling sync_page()
605 * @page: the page to lock
606 *
607 * Variant of lock_page that does not require the caller to hold a reference
608 * on the page's mapping.
609 */
610void __lock_page_nosync(struct page *page)
611{
612 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
613 __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
614 TASK_UNINTERRUPTIBLE);
615}
616
617int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 562int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
618 unsigned int flags) 563 unsigned int flags)
619{ 564{