aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c181
1 files changed, 146 insertions, 35 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 32be3c8f3a11..82f26cde830c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -739,45 +739,159 @@ EXPORT_SYMBOL(__page_cache_alloc);
739 * at a cost of "thundering herd" phenomena during rare hash 739 * at a cost of "thundering herd" phenomena during rare hash
740 * collisions. 740 * collisions.
741 */ 741 */
742wait_queue_head_t *page_waitqueue(struct page *page) 742#define PAGE_WAIT_TABLE_BITS 8
743#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
744static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
745
746static wait_queue_head_t *page_waitqueue(struct page *page)
743{ 747{
744 return bit_waitqueue(page, 0); 748 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
745} 749}
746EXPORT_SYMBOL(page_waitqueue);
747 750
748void wait_on_page_bit(struct page *page, int bit_nr) 751void __init pagecache_init(void)
749{ 752{
750 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 753 int i;
751 754
752 if (test_bit(bit_nr, &page->flags)) 755 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
753 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, 756 init_waitqueue_head(&page_wait_table[i]);
754 TASK_UNINTERRUPTIBLE); 757
758 page_writeback_init();
755} 759}
756EXPORT_SYMBOL(wait_on_page_bit);
757 760
758int wait_on_page_bit_killable(struct page *page, int bit_nr) 761struct wait_page_key {
762 struct page *page;
763 int bit_nr;
764 int page_match;
765};
766
767struct wait_page_queue {
768 struct page *page;
769 int bit_nr;
770 wait_queue_t wait;
771};
772
773static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
759{ 774{
760 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 775 struct wait_page_key *key = arg;
776 struct wait_page_queue *wait_page
777 = container_of(wait, struct wait_page_queue, wait);
778
779 if (wait_page->page != key->page)
780 return 0;
781 key->page_match = 1;
761 782
762 if (!test_bit(bit_nr, &page->flags)) 783 if (wait_page->bit_nr != key->bit_nr)
784 return 0;
785 if (test_bit(key->bit_nr, &key->page->flags))
763 return 0; 786 return 0;
764 787
765 return __wait_on_bit(page_waitqueue(page), &wait, 788 return autoremove_wake_function(wait, mode, sync, key);
766 bit_wait_io, TASK_KILLABLE);
767} 789}
768 790
769int wait_on_page_bit_killable_timeout(struct page *page, 791void wake_up_page_bit(struct page *page, int bit_nr)
770 int bit_nr, unsigned long timeout)
771{ 792{
772 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 793 wait_queue_head_t *q = page_waitqueue(page);
794 struct wait_page_key key;
795 unsigned long flags;
773 796
774 wait.key.timeout = jiffies + timeout; 797 key.page = page;
775 if (!test_bit(bit_nr, &page->flags)) 798 key.bit_nr = bit_nr;
776 return 0; 799 key.page_match = 0;
777 return __wait_on_bit(page_waitqueue(page), &wait, 800
778 bit_wait_io_timeout, TASK_KILLABLE); 801 spin_lock_irqsave(&q->lock, flags);
802 __wake_up_locked_key(q, TASK_NORMAL, &key);
803 /*
804 * It is possible for other pages to have collided on the waitqueue
805 * hash, so in that case check for a page match. That prevents a long-
806 * term waiter
807 *
808 * It is still possible to miss a case here, when we woke page waiters
809 * and removed them from the waitqueue, but there are still other
810 * page waiters.
811 */
812 if (!waitqueue_active(q) || !key.page_match) {
813 ClearPageWaiters(page);
814 /*
815 * It's possible to miss clearing Waiters here, when we woke
816 * our page waiters, but the hashed waitqueue has waiters for
817 * other pages on it.
818 *
819 * That's okay, it's a rare case. The next waker will clear it.
820 */
821 }
822 spin_unlock_irqrestore(&q->lock, flags);
823}
824EXPORT_SYMBOL(wake_up_page_bit);
825
826static inline int wait_on_page_bit_common(wait_queue_head_t *q,
827 struct page *page, int bit_nr, int state, bool lock)
828{
829 struct wait_page_queue wait_page;
830 wait_queue_t *wait = &wait_page.wait;
831 int ret = 0;
832
833 init_wait(wait);
834 wait->func = wake_page_function;
835 wait_page.page = page;
836 wait_page.bit_nr = bit_nr;
837
838 for (;;) {
839 spin_lock_irq(&q->lock);
840
841 if (likely(list_empty(&wait->task_list))) {
842 if (lock)
843 __add_wait_queue_tail_exclusive(q, wait);
844 else
845 __add_wait_queue(q, wait);
846 SetPageWaiters(page);
847 }
848
849 set_current_state(state);
850
851 spin_unlock_irq(&q->lock);
852
853 if (likely(test_bit(bit_nr, &page->flags))) {
854 io_schedule();
855 if (unlikely(signal_pending_state(state, current))) {
856 ret = -EINTR;
857 break;
858 }
859 }
860
861 if (lock) {
862 if (!test_and_set_bit_lock(bit_nr, &page->flags))
863 break;
864 } else {
865 if (!test_bit(bit_nr, &page->flags))
866 break;
867 }
868 }
869
870 finish_wait(q, wait);
871
872 /*
873 * A signal could leave PageWaiters set. Clearing it here if
874 * !waitqueue_active would be possible (by open-coding finish_wait),
875 * but still fail to catch it in the case of wait hash collision. We
876 * already can fail to clear wait hash collision cases, so don't
877 * bother with signals either.
878 */
879
880 return ret;
881}
882
883void wait_on_page_bit(struct page *page, int bit_nr)
884{
885 wait_queue_head_t *q = page_waitqueue(page);
886 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
887}
888EXPORT_SYMBOL(wait_on_page_bit);
889
890int wait_on_page_bit_killable(struct page *page, int bit_nr)
891{
892 wait_queue_head_t *q = page_waitqueue(page);
893 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
779} 894}
780EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout);
781 895
782/** 896/**
783 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 897 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
@@ -793,6 +907,7 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
793 907
794 spin_lock_irqsave(&q->lock, flags); 908 spin_lock_irqsave(&q->lock, flags);
795 __add_wait_queue(q, waiter); 909 __add_wait_queue(q, waiter);
910 SetPageWaiters(page);
796 spin_unlock_irqrestore(&q->lock, flags); 911 spin_unlock_irqrestore(&q->lock, flags);
797} 912}
798EXPORT_SYMBOL_GPL(add_page_wait_queue); 913EXPORT_SYMBOL_GPL(add_page_wait_queue);
@@ -874,23 +989,19 @@ EXPORT_SYMBOL_GPL(page_endio);
874 * __lock_page - get a lock on the page, assuming we need to sleep to get it 989 * __lock_page - get a lock on the page, assuming we need to sleep to get it
875 * @page: the page to lock 990 * @page: the page to lock
876 */ 991 */
877void __lock_page(struct page *page) 992void __lock_page(struct page *__page)
878{ 993{
879 struct page *page_head = compound_head(page); 994 struct page *page = compound_head(__page);
880 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked); 995 wait_queue_head_t *q = page_waitqueue(page);
881 996 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true);
882 __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
883 TASK_UNINTERRUPTIBLE);
884} 997}
885EXPORT_SYMBOL(__lock_page); 998EXPORT_SYMBOL(__lock_page);
886 999
887int __lock_page_killable(struct page *page) 1000int __lock_page_killable(struct page *__page)
888{ 1001{
889 struct page *page_head = compound_head(page); 1002 struct page *page = compound_head(__page);
890 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked); 1003 wait_queue_head_t *q = page_waitqueue(page);
891 1004 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true);
892 return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
893 bit_wait_io, TASK_KILLABLE);
894} 1005}
895EXPORT_SYMBOL_GPL(__lock_page_killable); 1006EXPORT_SYMBOL_GPL(__lock_page_killable);
896 1007