aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Chen <tim.c.chen@linux.intel.com>2017-08-25 12:13:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-14 12:56:18 -0400
commit11a19c7b099f96d00a8dec52bfbb8475e89b6745 (patch)
treef1b357ea18df807c38355591ff929aaf4bb3e3f0
parent2554db916586b228ce93e6f74a12fd7fe430a004 (diff)
sched/wait: Introduce wakeup boomark in wake_up_page_bit
Now that we have added breaks in the wait queue scan and allow bookmark on scan position, we put this logic in the wake_up_page_bit function. We can have very long page wait list in large system where multiple pages share the same wait list. We break the wake up walk here to allow other cpus a chance to access the list, and not to disable the interrupts when traversing the list for too long. This reduces the interrupt and rescheduling latency, and excessive page wait queue lock hold time. [ v2: Remove bookmark_wake_function ] Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/wait.h2
-rw-r--r--kernel/sched/wait.c7
-rw-r--r--mm/filemap.c22
3 files changed, 30 insertions, 1 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 78401ef02d29..87c4641023fb 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -185,6 +185,8 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
185 185
186void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); 186void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
187void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); 187void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
188void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
189 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
188void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); 190void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
189void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); 191void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
190void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); 192void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 70701ef50465..98feab7933c7 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -165,6 +165,13 @@ void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, vo
165} 165}
166EXPORT_SYMBOL_GPL(__wake_up_locked_key); 166EXPORT_SYMBOL_GPL(__wake_up_locked_key);
167 167
168void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
169 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
170{
171 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
172}
173EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
174
168/** 175/**
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue. 176 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170 * @wq_head: the waitqueue 177 * @wq_head: the waitqueue
diff --git a/mm/filemap.c b/mm/filemap.c
index 9d21afd692b9..8c88e186a773 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -909,13 +909,33 @@ static void wake_up_page_bit(struct page *page, int bit_nr)
909 wait_queue_head_t *q = page_waitqueue(page); 909 wait_queue_head_t *q = page_waitqueue(page);
910 struct wait_page_key key; 910 struct wait_page_key key;
911 unsigned long flags; 911 unsigned long flags;
912 wait_queue_entry_t bookmark;
912 913
913 key.page = page; 914 key.page = page;
914 key.bit_nr = bit_nr; 915 key.bit_nr = bit_nr;
915 key.page_match = 0; 916 key.page_match = 0;
916 917
918 bookmark.flags = 0;
919 bookmark.private = NULL;
920 bookmark.func = NULL;
921 INIT_LIST_HEAD(&bookmark.entry);
922
917 spin_lock_irqsave(&q->lock, flags); 923 spin_lock_irqsave(&q->lock, flags);
918 __wake_up_locked_key(q, TASK_NORMAL, &key); 924 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
925
926 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
927 /*
928 * Take a breather from holding the lock,
929 * allow pages that finish wake up asynchronously
930 * to acquire the lock and remove themselves
931 * from wait queue
932 */
933 spin_unlock_irqrestore(&q->lock, flags);
934 cpu_relax();
935 spin_lock_irqsave(&q->lock, flags);
936 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
937 }
938
919 /* 939 /*
920 * It is possible for other pages to have collided on the waitqueue 940 * It is possible for other pages to have collided on the waitqueue
921 * hash, so in that case check for a page match. That prevents a long- 941 * hash, so in that case check for a page match. That prevents a long-