aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-03 16:08:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-03 16:08:04 -0400
commit9bd42183b951051f73de121f7ee17091e7d26fbb (patch)
treec85c680126a0548a3c5f083e35f5b1cadce636f6 /mm/filemap.c
parent7447d56217e215e50317f308aee1ed293ac4f749 (diff)
parent72298e5c92c50edd8cb7cfda4519483ce65fa166 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - Add the SYSTEM_SCHEDULING bootup state to move various scheduler debug checks earlier into the bootup. This turns silent and sporadically deadly bugs into nice, deterministic splats. Fix some of the splats that triggered. (Thomas Gleixner) - A round of restructuring and refactoring of the load-balancing and topology code (Peter Zijlstra) - Another round of consolidating ~20 of incremental scheduler code history: this time in terms of wait-queue nomenclature. (I didn't get much feedback on these renaming patches, and we can still easily change any names I might have misplaced, so if anyone hates a new name, please holler and I'll fix it.) (Ingo Molnar) - sched/numa improvements, fixes and updates (Rik van Riel) - Another round of x86/tsc scheduler clock code improvements, in hope of making it more robust (Peter Zijlstra) - Improve NOHZ behavior (Frederic Weisbecker) - Deadline scheduler improvements and fixes (Luca Abeni, Daniel Bristot de Oliveira) - Simplify and optimize the topology setup code (Lauro Ramos Venancio) - Debloat and decouple scheduler code some more (Nicolas Pitre) - Simplify code by making better use of llist primitives (Byungchul Park) - ... plus other fixes and improvements" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (103 commits) sched/cputime: Refactor the cputime_adjust() code sched/debug: Expose the number of RT/DL tasks that can migrate sched/numa: Hide numa_wake_affine() from UP build sched/fair: Remove effective_load() sched/numa: Implement NUMA node level wake_affine() sched/fair: Simplify wake_affine() for the single socket case sched/numa: Override part of migrate_degrades_locality() when idle balancing sched/rt: Move RT related code from sched/core.c to sched/rt.c sched/deadline: Move DL related code from sched/core.c to sched/deadline.c sched/cpuset: Only offer CONFIG_CPUSETS if SMP is enabled sched/fair: Spare idle load balancing on nohz_full CPUs nohz: Move idle balancer registration to the idle path sched/loadavg: Generalize "_idle" naming to "_nohz" sched/core: Drop the unused try_get_task_struct() helper function sched/fair: WARN() and refuse to set buddy when !se->on_rq sched/debug: Fix SCHED_WARN_ON() to return a value on !CONFIG_SCHED_DEBUG as well sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming sched/wait: Move bit_wait_table[] and related functionality from sched/core.c to sched/wait_bit.c sched/wait: Split out the wait_bit*() APIs from <linux/wait.h> into <linux/wait_bit.h> sched/wait: Re-adjust macro line continuation backslashes in <linux/wait.h> ...
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 742034e56100..aea58e983a73 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -800,10 +800,10 @@ struct wait_page_key {
800struct wait_page_queue { 800struct wait_page_queue {
801 struct page *page; 801 struct page *page;
802 int bit_nr; 802 int bit_nr;
803 wait_queue_t wait; 803 wait_queue_entry_t wait;
804}; 804};
805 805
806static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) 806static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
807{ 807{
808 struct wait_page_key *key = arg; 808 struct wait_page_key *key = arg;
809 struct wait_page_queue *wait_page 809 struct wait_page_queue *wait_page
@@ -866,7 +866,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
866 struct page *page, int bit_nr, int state, bool lock) 866 struct page *page, int bit_nr, int state, bool lock)
867{ 867{
868 struct wait_page_queue wait_page; 868 struct wait_page_queue wait_page;
869 wait_queue_t *wait = &wait_page.wait; 869 wait_queue_entry_t *wait = &wait_page.wait;
870 int ret = 0; 870 int ret = 0;
871 871
872 init_wait(wait); 872 init_wait(wait);
@@ -877,9 +877,9 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
877 for (;;) { 877 for (;;) {
878 spin_lock_irq(&q->lock); 878 spin_lock_irq(&q->lock);
879 879
880 if (likely(list_empty(&wait->task_list))) { 880 if (likely(list_empty(&wait->entry))) {
881 if (lock) 881 if (lock)
882 __add_wait_queue_tail_exclusive(q, wait); 882 __add_wait_queue_entry_tail_exclusive(q, wait);
883 else 883 else
884 __add_wait_queue(q, wait); 884 __add_wait_queue(q, wait);
885 SetPageWaiters(page); 885 SetPageWaiters(page);
@@ -939,7 +939,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
939 * 939 *
940 * Add an arbitrary @waiter to the wait queue for the nominated @page. 940 * Add an arbitrary @waiter to the wait queue for the nominated @page.
941 */ 941 */
942void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 942void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
943{ 943{
944 wait_queue_head_t *q = page_waitqueue(page); 944 wait_queue_head_t *q = page_waitqueue(page);
945 unsigned long flags; 945 unsigned long flags;