diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-11 16:03:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-11 16:03:24 -0400 |
commit | 36805aaea5ae3cf1bb32f1643e0a800bb69f0d5b (patch) | |
tree | 5565132549a0733772b3a2ac6b5cda516ea8cdce /fs/block_dev.c | |
parent | 6d2fa9e141ea56a571ec842fd4f3a86bea44a203 (diff) | |
parent | d50235b7bc3ee0a0427984d763ea7534149531b4 (diff) |
Merge branch 'for-3.11/core' of git://git.kernel.dk/linux-block
Pull core block IO updates from Jens Axboe:
"Here are the core IO block bits for 3.11. It contains:
- A tweak to the reserved tag logic from Jan, for weirdo devices with
just 3 free tags. But for those it improves things substantially
for random writes.
- Periodic writeback fix from Jan. Marked for stable as well.
- Fix for a race condition in IO scheduler switching from Jianpeng.
- The hierarchical blk-cgroup support from Tejun. This is the grunt
of the series.
- blk-throttle fix from Vivek.
Just a note that I'm in the middle of a relocation, whole family is
flying out tomorrow. Hence I will be awal the remainder of this week,
but back at work again on Monday the 15th. CC'ing Tejun, since any
potential "surprises" will most likely be from the blk-cgroup work.
But it's been brewing for a while and sitting in my tree and
linux-next for a long time, so should be solid."
* 'for-3.11/core' of git://git.kernel.dk/linux-block: (36 commits)
elevator: Fix a race in elevator switching
block: Reserve only one queue tag for sync IO if only 3 tags are available
writeback: Fix periodic writeback after fs mount
blk-throttle: implement proper hierarchy support
blk-throttle: implement throtl_grp->has_rules[]
blk-throttle: Account for child group's start time in parent while bio climbs up
blk-throttle: add throtl_qnode for dispatch fairness
blk-throttle: make throtl_pending_timer_fn() ready for hierarchy
blk-throttle: make tg_dispatch_one_bio() ready for hierarchy
blk-throttle: make blk_throtl_bio() ready for hierarchy
blk-throttle: make blk_throtl_drain() ready for hierarchy
blk-throttle: dispatch from throtl_pending_timer_fn()
blk-throttle: implement dispatch looping
blk-throttle: separate out throtl_service_queue->pending_timer from throtl_data->dispatch_work
blk-throttle: set REQ_THROTTLED from throtl_charge_bio() and gate stats update with it
blk-throttle: implement sq_to_tg(), sq_to_td() and throtl_log()
blk-throttle: add throtl_service_queue->parent_sq
blk-throttle: generalize update_disptime optimization in blk_throtl_bio()
blk-throttle: dispatch to throtl_data->service_queue.bio_lists[]
blk-throttle: move bio_lists[] and friends to throtl_service_queue
...
Diffstat (limited to 'fs/block_dev.c')
-rw-r--r-- | fs/block_dev.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index bb43ce081d6e..c7bda5cd3da7 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -58,17 +58,24 @@ static void bdev_inode_switch_bdi(struct inode *inode, | |||
58 | struct backing_dev_info *dst) | 58 | struct backing_dev_info *dst) |
59 | { | 59 | { |
60 | struct backing_dev_info *old = inode->i_data.backing_dev_info; | 60 | struct backing_dev_info *old = inode->i_data.backing_dev_info; |
61 | bool wakeup_bdi = false; | ||
61 | 62 | ||
62 | if (unlikely(dst == old)) /* deadlock avoidance */ | 63 | if (unlikely(dst == old)) /* deadlock avoidance */ |
63 | return; | 64 | return; |
64 | bdi_lock_two(&old->wb, &dst->wb); | 65 | bdi_lock_two(&old->wb, &dst->wb); |
65 | spin_lock(&inode->i_lock); | 66 | spin_lock(&inode->i_lock); |
66 | inode->i_data.backing_dev_info = dst; | 67 | inode->i_data.backing_dev_info = dst; |
67 | if (inode->i_state & I_DIRTY) | 68 | if (inode->i_state & I_DIRTY) { |
69 | if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb)) | ||
70 | wakeup_bdi = true; | ||
68 | list_move(&inode->i_wb_list, &dst->wb.b_dirty); | 71 | list_move(&inode->i_wb_list, &dst->wb.b_dirty); |
72 | } | ||
69 | spin_unlock(&inode->i_lock); | 73 | spin_unlock(&inode->i_lock); |
70 | spin_unlock(&old->wb.list_lock); | 74 | spin_unlock(&old->wb.list_lock); |
71 | spin_unlock(&dst->wb.list_lock); | 75 | spin_unlock(&dst->wb.list_lock); |
76 | |||
77 | if (wakeup_bdi) | ||
78 | bdi_wakeup_thread_delayed(dst); | ||
72 | } | 79 | } |
73 | 80 | ||
74 | /* Kill _all_ buffers and pagecache , dirty or not.. */ | 81 | /* Kill _all_ buffers and pagecache , dirty or not.. */ |