aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <clm@fb.com>2015-09-18 13:35:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-19 21:50:19 -0400
commit590dca3a71875461e8fea3013af74386945191b2 (patch)
tree9bd3b4a9de42fb0547fce6fb1f336af3d89e86d8
parent00ade1f553e3b947cd26228392ee47d6f0f550e1 (diff)
fs-writeback: unplug before cond_resched in writeback_sb_inodes
Commit 505a666ee3fc ("writeback: plug writeback in wb_writeback() and writeback_inodes_wb()") has us holding a plug during writeback_sb_inodes, which increases the merge rate when relatively contiguous small files are written by the filesystem. It helps both on flash and spindles. For an fs_mark workload creating 4K files in parallel across 8 drives, this commit improves performance ~9% more by unplugging before calling cond_resched(). cond_resched() doesn't trigger an implicit unplug, so explicitly getting the IO down to the device before scheduling reduces latencies for anyone waiting on clean pages. It also cuts down on how often we use kblockd to unplug, which means less work bouncing from one workqueue to another. Many more details about how we got here: https://lkml.org/lkml/2015/9/11/570 Signed-off-by: Chris Mason <clm@fb.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/fs-writeback.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 587ac08eabb6..091a36444972 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1481,6 +1481,21 @@ static long writeback_sb_inodes(struct super_block *sb,
1481 wbc_detach_inode(&wbc); 1481 wbc_detach_inode(&wbc);
1482 work->nr_pages -= write_chunk - wbc.nr_to_write; 1482 work->nr_pages -= write_chunk - wbc.nr_to_write;
1483 wrote += write_chunk - wbc.nr_to_write; 1483 wrote += write_chunk - wbc.nr_to_write;
1484
1485 if (need_resched()) {
1486 /*
1487 * We're trying to balance between building up a nice
1488 * long list of IOs to improve our merge rate, and
1489 * getting those IOs out quickly for anyone throttling
1490 * in balance_dirty_pages(). cond_resched() doesn't
1491 * unplug, so get our IOs out the door before we
1492 * give up the CPU.
1493 */
1494 blk_flush_plug(current);
1495 cond_resched();
1496 }
1497
1498
1484 spin_lock(&wb->list_lock); 1499 spin_lock(&wb->list_lock);
1485 spin_lock(&inode->i_lock); 1500 spin_lock(&inode->i_lock);
1486 if (!(inode->i_state & I_DIRTY_ALL)) 1501 if (!(inode->i_state & I_DIRTY_ALL))
@@ -1488,7 +1503,7 @@ static long writeback_sb_inodes(struct super_block *sb,
1488 requeue_inode(inode, wb, &wbc); 1503 requeue_inode(inode, wb, &wbc);
1489 inode_sync_complete(inode); 1504 inode_sync_complete(inode);
1490 spin_unlock(&inode->i_lock); 1505 spin_unlock(&inode->i_lock);
1491 cond_resched_lock(&wb->list_lock); 1506
1492 /* 1507 /*
1493 * bail out to wb_writeback() often enough to check 1508 * bail out to wb_writeback() often enough to check
1494 * background threshold and other termination conditions. 1509 * background threshold and other termination conditions.