diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 19:23:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 19:23:30 -0400 |
commit | 98959948a7ba33cf8c708626e0d2a1456397e1c6 (patch) | |
tree | 8ba9b6c2679a06e89f23bdd7018e9bb0249e3bda /drivers/md | |
parent | ef35ad26f8ff44d2c93e29952cdb336bda729d9d (diff) | |
parent | cd3bd4e628a6d57d66afe77835fe8d93ae3e41f8 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Move the nohz kick code out of the scheduler tick to a dedicated IPI,
from Frederic Weisbecker.
This necessiated quite some background infrastructure rework,
including:
* Clean up some irq-work internals
* Implement remote irq-work
* Implement nohz kick on top of remote irq-work
* Move full dynticks timer enqueue notification to new kick
* Move multi-task notification to new kick
* Remove unecessary barriers on multi-task notification
- Remove proliferation of wait_on_bit() action functions and allow
wait_on_bit_action() functions to support a timeout. (Neil Brown)
- Another round of sched/numa improvements, cleanups and fixes. (Rik
van Riel)
- Implement fast idling of CPUs when the system is partially loaded,
for better scalability. (Tim Chen)
- Restructure and fix the CPU hotplug handling code that may leave
cfs_rq and rt_rq's throttled when tasks are migrated away from a dead
cpu. (Kirill Tkhai)
- Robustify the sched topology setup code. (Peterz Zijlstra)
- Improve sched_feat() handling wrt. static_keys (Jason Baron)
- Misc fixes.
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
sched/fair: Fix 'make xmldocs' warning caused by missing description
sched: Use macro for magic number of -1 for setparam
sched: Robustify topology setup
sched: Fix sched_setparam() policy == -1 logic
sched: Allow wait_on_bit_action() functions to support a timeout
sched: Remove proliferation of wait_on_bit() action functions
sched/numa: Revert "Use effective_load() to balance NUMA loads"
sched: Fix static_key race with sched_feat()
sched: Remove extra static_key*() function indirection
sched/rt: Fix replenish_dl_entity() comments to match the current upstream code
sched: Transform resched_task() into resched_curr()
sched/deadline: Kill task_struct->pi_top_task
sched: Rework check_for_tasks()
sched/rt: Enqueue just unthrottled rt_rq back on the stack in __disable_runtime()
sched/fair: Disable runtime_enabled on dying rq
sched/numa: Change scan period code to match intent
sched/numa: Rework best node setting in task_numa_migrate()
sched/numa: Examine a task move when examining a task swap
sched/numa: Simplify task_numa_compare()
sched/numa: Use effective_load() to balance NUMA loads
...
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-bufio.c | 41 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 10 |
2 files changed, 15 insertions, 36 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d724459860d9..ab472c557d18 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -615,16 +615,6 @@ static void write_endio(struct bio *bio, int error) | |||
615 | } | 615 | } |
616 | 616 | ||
617 | /* | 617 | /* |
618 | * This function is called when wait_on_bit is actually waiting. | ||
619 | */ | ||
620 | static int do_io_schedule(void *word) | ||
621 | { | ||
622 | io_schedule(); | ||
623 | |||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | /* | ||
628 | * Initiate a write on a dirty buffer, but don't wait for it. | 618 | * Initiate a write on a dirty buffer, but don't wait for it. |
629 | * | 619 | * |
630 | * - If the buffer is not dirty, exit. | 620 | * - If the buffer is not dirty, exit. |
@@ -640,8 +630,7 @@ static void __write_dirty_buffer(struct dm_buffer *b, | |||
640 | return; | 630 | return; |
641 | 631 | ||
642 | clear_bit(B_DIRTY, &b->state); | 632 | clear_bit(B_DIRTY, &b->state); |
643 | wait_on_bit_lock(&b->state, B_WRITING, | 633 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
644 | do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
645 | 634 | ||
646 | if (!write_list) | 635 | if (!write_list) |
647 | submit_io(b, WRITE, b->block, write_endio); | 636 | submit_io(b, WRITE, b->block, write_endio); |
@@ -675,9 +664,9 @@ static void __make_buffer_clean(struct dm_buffer *b) | |||
675 | if (!b->state) /* fast case */ | 664 | if (!b->state) /* fast case */ |
676 | return; | 665 | return; |
677 | 666 | ||
678 | wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE); | 667 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
679 | __write_dirty_buffer(b, NULL); | 668 | __write_dirty_buffer(b, NULL); |
680 | wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE); | 669 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
681 | } | 670 | } |
682 | 671 | ||
683 | /* | 672 | /* |
@@ -1030,7 +1019,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, | |||
1030 | if (need_submit) | 1019 | if (need_submit) |
1031 | submit_io(b, READ, b->block, read_endio); | 1020 | submit_io(b, READ, b->block, read_endio); |
1032 | 1021 | ||
1033 | wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE); | 1022 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
1034 | 1023 | ||
1035 | if (b->read_error) { | 1024 | if (b->read_error) { |
1036 | int error = b->read_error; | 1025 | int error = b->read_error; |
@@ -1209,15 +1198,13 @@ again: | |||
1209 | dropped_lock = 1; | 1198 | dropped_lock = 1; |
1210 | b->hold_count++; | 1199 | b->hold_count++; |
1211 | dm_bufio_unlock(c); | 1200 | dm_bufio_unlock(c); |
1212 | wait_on_bit(&b->state, B_WRITING, | 1201 | wait_on_bit_io(&b->state, B_WRITING, |
1213 | do_io_schedule, | 1202 | TASK_UNINTERRUPTIBLE); |
1214 | TASK_UNINTERRUPTIBLE); | ||
1215 | dm_bufio_lock(c); | 1203 | dm_bufio_lock(c); |
1216 | b->hold_count--; | 1204 | b->hold_count--; |
1217 | } else | 1205 | } else |
1218 | wait_on_bit(&b->state, B_WRITING, | 1206 | wait_on_bit_io(&b->state, B_WRITING, |
1219 | do_io_schedule, | 1207 | TASK_UNINTERRUPTIBLE); |
1220 | TASK_UNINTERRUPTIBLE); | ||
1221 | } | 1208 | } |
1222 | 1209 | ||
1223 | if (!test_bit(B_DIRTY, &b->state) && | 1210 | if (!test_bit(B_DIRTY, &b->state) && |
@@ -1321,15 +1308,15 @@ retry: | |||
1321 | 1308 | ||
1322 | __write_dirty_buffer(b, NULL); | 1309 | __write_dirty_buffer(b, NULL); |
1323 | if (b->hold_count == 1) { | 1310 | if (b->hold_count == 1) { |
1324 | wait_on_bit(&b->state, B_WRITING, | 1311 | wait_on_bit_io(&b->state, B_WRITING, |
1325 | do_io_schedule, TASK_UNINTERRUPTIBLE); | 1312 | TASK_UNINTERRUPTIBLE); |
1326 | set_bit(B_DIRTY, &b->state); | 1313 | set_bit(B_DIRTY, &b->state); |
1327 | __unlink_buffer(b); | 1314 | __unlink_buffer(b); |
1328 | __link_buffer(b, new_block, LIST_DIRTY); | 1315 | __link_buffer(b, new_block, LIST_DIRTY); |
1329 | } else { | 1316 | } else { |
1330 | sector_t old_block; | 1317 | sector_t old_block; |
1331 | wait_on_bit_lock(&b->state, B_WRITING, | 1318 | wait_on_bit_lock_io(&b->state, B_WRITING, |
1332 | do_io_schedule, TASK_UNINTERRUPTIBLE); | 1319 | TASK_UNINTERRUPTIBLE); |
1333 | /* | 1320 | /* |
1334 | * Relink buffer to "new_block" so that write_callback | 1321 | * Relink buffer to "new_block" so that write_callback |
1335 | * sees "new_block" as a block number. | 1322 | * sees "new_block" as a block number. |
@@ -1341,8 +1328,8 @@ retry: | |||
1341 | __unlink_buffer(b); | 1328 | __unlink_buffer(b); |
1342 | __link_buffer(b, new_block, b->list_mode); | 1329 | __link_buffer(b, new_block, b->list_mode); |
1343 | submit_io(b, WRITE, new_block, write_endio); | 1330 | submit_io(b, WRITE, new_block, write_endio); |
1344 | wait_on_bit(&b->state, B_WRITING, | 1331 | wait_on_bit_io(&b->state, B_WRITING, |
1345 | do_io_schedule, TASK_UNINTERRUPTIBLE); | 1332 | TASK_UNINTERRUPTIBLE); |
1346 | __unlink_buffer(b); | 1333 | __unlink_buffer(b); |
1347 | __link_buffer(b, old_block, b->list_mode); | 1334 | __link_buffer(b, old_block, b->list_mode); |
1348 | } | 1335 | } |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 5bd2290cfb1e..864b03f47727 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1032,21 +1032,13 @@ static void start_merge(struct dm_snapshot *s) | |||
1032 | snapshot_merge_next_chunks(s); | 1032 | snapshot_merge_next_chunks(s); |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | static int wait_schedule(void *ptr) | ||
1036 | { | ||
1037 | schedule(); | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | /* | 1035 | /* |
1043 | * Stop the merging process and wait until it finishes. | 1036 | * Stop the merging process and wait until it finishes. |
1044 | */ | 1037 | */ |
1045 | static void stop_merge(struct dm_snapshot *s) | 1038 | static void stop_merge(struct dm_snapshot *s) |
1046 | { | 1039 | { |
1047 | set_bit(SHUTDOWN_MERGE, &s->state_bits); | 1040 | set_bit(SHUTDOWN_MERGE, &s->state_bits); |
1048 | wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule, | 1041 | wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); |
1049 | TASK_UNINTERRUPTIBLE); | ||
1050 | clear_bit(SHUTDOWN_MERGE, &s->state_bits); | 1042 | clear_bit(SHUTDOWN_MERGE, &s->state_bits); |
1051 | } | 1043 | } |
1052 | 1044 | ||