diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 19:23:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 19:23:30 -0400 |
commit | 98959948a7ba33cf8c708626e0d2a1456397e1c6 (patch) | |
tree | 8ba9b6c2679a06e89f23bdd7018e9bb0249e3bda /mm/filemap.c | |
parent | ef35ad26f8ff44d2c93e29952cdb336bda729d9d (diff) | |
parent | cd3bd4e628a6d57d66afe77835fe8d93ae3e41f8 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Move the nohz kick code out of the scheduler tick to a dedicated IPI,
from Frederic Weisbecker.
This necessiated quite some background infrastructure rework,
including:
* Clean up some irq-work internals
* Implement remote irq-work
* Implement nohz kick on top of remote irq-work
* Move full dynticks timer enqueue notification to new kick
* Move multi-task notification to new kick
* Remove unecessary barriers on multi-task notification
- Remove proliferation of wait_on_bit() action functions and allow
wait_on_bit_action() functions to support a timeout. (Neil Brown)
- Another round of sched/numa improvements, cleanups and fixes. (Rik
van Riel)
- Implement fast idling of CPUs when the system is partially loaded,
for better scalability. (Tim Chen)
- Restructure and fix the CPU hotplug handling code that may leave
cfs_rq and rt_rq's throttled when tasks are migrated away from a dead
cpu. (Kirill Tkhai)
- Robustify the sched topology setup code. (Peterz Zijlstra)
- Improve sched_feat() handling wrt. static_keys (Jason Baron)
- Misc fixes.
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
sched/fair: Fix 'make xmldocs' warning caused by missing description
sched: Use macro for magic number of -1 for setparam
sched: Robustify topology setup
sched: Fix sched_setparam() policy == -1 logic
sched: Allow wait_on_bit_action() functions to support a timeout
sched: Remove proliferation of wait_on_bit() action functions
sched/numa: Revert "Use effective_load() to balance NUMA loads"
sched: Fix static_key race with sched_feat()
sched: Remove extra static_key*() function indirection
sched/rt: Fix replenish_dl_entity() comments to match the current upstream code
sched: Transform resched_task() into resched_curr()
sched/deadline: Kill task_struct->pi_top_task
sched: Rework check_for_tasks()
sched/rt: Enqueue just unthrottled rt_rq back on the stack in __disable_runtime()
sched/fair: Disable runtime_enabled on dying rq
sched/numa: Change scan period code to match intent
sched/numa: Rework best node setting in task_numa_migrate()
sched/numa: Examine a task move when examining a task swap
sched/numa: Simplify task_numa_compare()
sched/numa: Use effective_load() to balance NUMA loads
...
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 20 |
1 files changed, 4 insertions, 16 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 900edfaf6df5..65d44fd88c78 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -241,18 +241,6 @@ void delete_from_page_cache(struct page *page) | |||
241 | } | 241 | } |
242 | EXPORT_SYMBOL(delete_from_page_cache); | 242 | EXPORT_SYMBOL(delete_from_page_cache); |
243 | 243 | ||
244 | static int sleep_on_page(void *word) | ||
245 | { | ||
246 | io_schedule(); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static int sleep_on_page_killable(void *word) | ||
251 | { | ||
252 | sleep_on_page(word); | ||
253 | return fatal_signal_pending(current) ? -EINTR : 0; | ||
254 | } | ||
255 | |||
256 | static int filemap_check_errors(struct address_space *mapping) | 244 | static int filemap_check_errors(struct address_space *mapping) |
257 | { | 245 | { |
258 | int ret = 0; | 246 | int ret = 0; |
@@ -692,7 +680,7 @@ void wait_on_page_bit(struct page *page, int bit_nr) | |||
692 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); | 680 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); |
693 | 681 | ||
694 | if (test_bit(bit_nr, &page->flags)) | 682 | if (test_bit(bit_nr, &page->flags)) |
695 | __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, | 683 | __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, |
696 | TASK_UNINTERRUPTIBLE); | 684 | TASK_UNINTERRUPTIBLE); |
697 | } | 685 | } |
698 | EXPORT_SYMBOL(wait_on_page_bit); | 686 | EXPORT_SYMBOL(wait_on_page_bit); |
@@ -705,7 +693,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr) | |||
705 | return 0; | 693 | return 0; |
706 | 694 | ||
707 | return __wait_on_bit(page_waitqueue(page), &wait, | 695 | return __wait_on_bit(page_waitqueue(page), &wait, |
708 | sleep_on_page_killable, TASK_KILLABLE); | 696 | bit_wait_io, TASK_KILLABLE); |
709 | } | 697 | } |
710 | 698 | ||
711 | /** | 699 | /** |
@@ -806,7 +794,7 @@ void __lock_page(struct page *page) | |||
806 | { | 794 | { |
807 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 795 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
808 | 796 | ||
809 | __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, | 797 | __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io, |
810 | TASK_UNINTERRUPTIBLE); | 798 | TASK_UNINTERRUPTIBLE); |
811 | } | 799 | } |
812 | EXPORT_SYMBOL(__lock_page); | 800 | EXPORT_SYMBOL(__lock_page); |
@@ -816,7 +804,7 @@ int __lock_page_killable(struct page *page) | |||
816 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 804 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
817 | 805 | ||
818 | return __wait_on_bit_lock(page_waitqueue(page), &wait, | 806 | return __wait_on_bit_lock(page_waitqueue(page), &wait, |
819 | sleep_on_page_killable, TASK_KILLABLE); | 807 | bit_wait_io, TASK_KILLABLE); |
820 | } | 808 | } |
821 | EXPORT_SYMBOL_GPL(__lock_page_killable); | 809 | EXPORT_SYMBOL_GPL(__lock_page_killable); |
822 | 810 | ||