aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 17:14:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 17:14:23 -0500
commitcaf292ae5bb9d57198ce001d8b762f7abae3a94d (patch)
tree5fd5d6d971503818ab2824407134cf36a80c53d0 /fs
parent8f4385d590d4296ec38e228d17b1d002f6031dd2 (diff)
parentfcbf6a087a7e4d3f03d28333678a1010810a53c3 (diff)
Merge branch 'for-3.19/core' of git://git.kernel.dk/linux-block
Pull block driver core update from Jens Axboe: "This is the pull request for the core block IO changes for 3.19. Not a huge round this time, mostly lots of little good fixes: - Fix a bug in sysfs blktrace interface causing a NULL pointer dereference, when enabled/disabled through that API. From Arianna Avanzini. - Various updates/fixes/improvements for blk-mq: - A set of updates from Bart, mostly fixing buts in the tag handling. - Cleanup/code consolidation from Christoph. - Extend queue_rq API to be able to handle batching issues of IO requests. NVMe will utilize this shortly. From me. - A few tag and request handling updates from me. - Cleanup of the preempt handling for running queues from Paolo. - Prevent running of unmapped hardware queues from Ming Lei. - Move the kdump memory limiting check to be in the correct location, from Shaohua. - Initialize all software queues at init time from Takashi. This prevents a kobject warning when CPUs are brought online that weren't online when a queue was registered. - Single writeback fix for I_DIRTY clearing from Tejun. Queued with the core IO changes, since it's just a single fix. - Version X of the __bio_add_page() segment addition retry from Maurizio. Hope the Xth time is the charm. - Documentation fixup for IO scheduler merging from Jan. - Introduce (and use) generic IO stat accounting helpers for non-rq drivers, from Gu Zheng. - Kill off artificial limiting of max sectors in a request from Christoph" * 'for-3.19/core' of git://git.kernel.dk/linux-block: (26 commits) bio: modify __bio_add_page() to accept pages that don't start a new segment blk-mq: Fix uninitialized kobject at CPU hotplugging blktrace: don't let the sysfs interface remove trace from running list blk-mq: Use all available hardware queues blk-mq: Micro-optimize bt_get() blk-mq: Fix a race between bt_clear_tag() and bt_get() blk-mq: Avoid that __bt_get_word() wraps multiple times blk-mq: Fix a use-after-free blk-mq: prevent unmapped hw queue from being scheduled blk-mq: re-check for available tags after running the hardware queue blk-mq: fix hang in bt_get() blk-mq: move the kdump check to blk_mq_alloc_tag_set blk-mq: cleanup tag free handling blk-mq: use 'nr_cpu_ids' as highest CPU ID count for hwq <-> cpu map blk: introduce generic io stat accounting help function blk-mq: handle the single queue case in blk_mq_hctx_next_cpu genhd: check for int overflow in disk_expand_part_tbl() blk-mq: add blk_mq_free_hctx_request() blk-mq: export blk_mq_free_request() blk-mq: use get_cpu/put_cpu instead of preempt_disable/preempt_enable ...
Diffstat (limited to 'fs')
-rw-r--r--fs/fs-writeback.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ef9bef118342..2d609a5fbfea 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -479,12 +479,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
479 * write_inode() 479 * write_inode()
480 */ 480 */
481 spin_lock(&inode->i_lock); 481 spin_lock(&inode->i_lock);
482 /* Clear I_DIRTY_PAGES if we've written out all dirty pages */ 482
483 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
484 inode->i_state &= ~I_DIRTY_PAGES;
485 dirty = inode->i_state & I_DIRTY; 483 dirty = inode->i_state & I_DIRTY;
486 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 484 inode->i_state &= ~I_DIRTY;
485
486 /*
487 * Paired with smp_mb() in __mark_inode_dirty(). This allows
488 * __mark_inode_dirty() to test i_state without grabbing i_lock -
489 * either they see the I_DIRTY bits cleared or we see the dirtied
490 * inode.
491 *
492 * I_DIRTY_PAGES is always cleared together above even if @mapping
493 * still has dirty pages. The flag is reinstated after smp_mb() if
494 * necessary. This guarantees that either __mark_inode_dirty()
495 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
496 */
497 smp_mb();
498
499 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
500 inode->i_state |= I_DIRTY_PAGES;
501
487 spin_unlock(&inode->i_lock); 502 spin_unlock(&inode->i_lock);
503
488 /* Don't write the inode if only I_DIRTY_PAGES was set */ 504 /* Don't write the inode if only I_DIRTY_PAGES was set */
489 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 505 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
490 int err = write_inode(inode, wbc); 506 int err = write_inode(inode, wbc);
@@ -1148,12 +1164,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
1148 } 1164 }
1149 1165
1150 /* 1166 /*
1151 * make sure that changes are seen by all cpus before we test i_state 1167 * Paired with smp_mb() in __writeback_single_inode() for the
1152 * -- mikulas 1168 * following lockless i_state test. See there for details.
1153 */ 1169 */
1154 smp_mb(); 1170 smp_mb();
1155 1171
1156 /* avoid the locking if we can */
1157 if ((inode->i_state & flags) == flags) 1172 if ((inode->i_state & flags) == flags)
1158 return; 1173 return;
1159 1174