aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-10-23 18:20:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-10-23 18:20:57 -0400
commitea1ee5ff1b500ccdc64782ecef13d276afb08f14 (patch)
tree085cb2fc7a5b4b4395f1f13cb05d989aa18bce8e /fs
parentef594c421a0f96197f28d205e2ee58a83c1e0e37 (diff)
parente27c5b9d23168cc2cb8fec147ae7ed1f7a2005c3 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: "A final set of fixes for 4.3. It is (again) bigger than I would have liked, but it's all been through the testing mill and has been carefully reviewed by multiple parties. Each fix is either a regression fix for this cycle, or is marked stable. You can scold me at KS. The pull request contains: - Three simple fixes for NVMe, fixing regressions since 4.3. From Arnd, Christoph, and Keith. - A single xen-blkfront fix from Cathy, fixing a NULL dereference if an error is returned through the staste change callback. - Fixup for some bad/sloppy code in nbd that got introduced earlier in this cycle. From Markus Pargmann. - A blk-mq tagset use-after-free fix from Junichi. - A backing device lifetime fix from Tejun, fixing a crash. - And finally, a set of regression/stable fixes for cgroup writeback from Tejun" * 'for-linus' of git://git.kernel.dk/linux-block: writeback: remove broken rbtree_postorder_for_each_entry_safe() usage in cgwb_bdi_destroy() NVMe: Fix memory leak on retried commands block: don't release bdi while request_queue has live references nvme: use an integer value to Linux errno values blk-mq: fix use-after-free in blk_mq_free_tag_set() nvme: fix 32-bit build warning writeback: fix incorrect calculation of available memory for memcg domains writeback: memcg dirty_throttle_control should be initialized with wb->memcg_completions writeback: bdi_writeback iteration must not skip dying ones writeback: fix bdi_writeback iteration in wakeup_dirtytime_writeback() writeback: laptop_mode_timer_fn() needs rcu_read_lock() around bdi_writeback iteration nbd: Add locking for tasks xen-blkfront: check for null drvdata in blkback_changed (XenbusStateClosing)
Diffstat (limited to 'fs')
-rw-r--r--fs/fs-writeback.c35
1 files changed, 24 insertions, 11 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 091a36444972..29e4599f6fc1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -778,19 +778,24 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
778 struct wb_writeback_work *base_work, 778 struct wb_writeback_work *base_work,
779 bool skip_if_busy) 779 bool skip_if_busy)
780{ 780{
781 int next_memcg_id = 0; 781 struct bdi_writeback *last_wb = NULL;
782 struct bdi_writeback *wb; 782 struct bdi_writeback *wb = list_entry_rcu(&bdi->wb_list,
783 struct wb_iter iter; 783 struct bdi_writeback, bdi_node);
784 784
785 might_sleep(); 785 might_sleep();
786restart: 786restart:
787 rcu_read_lock(); 787 rcu_read_lock();
788 bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) { 788 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
789 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); 789 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
790 struct wb_writeback_work fallback_work; 790 struct wb_writeback_work fallback_work;
791 struct wb_writeback_work *work; 791 struct wb_writeback_work *work;
792 long nr_pages; 792 long nr_pages;
793 793
794 if (last_wb) {
795 wb_put(last_wb);
796 last_wb = NULL;
797 }
798
794 /* SYNC_ALL writes out I_DIRTY_TIME too */ 799 /* SYNC_ALL writes out I_DIRTY_TIME too */
795 if (!wb_has_dirty_io(wb) && 800 if (!wb_has_dirty_io(wb) &&
796 (base_work->sync_mode == WB_SYNC_NONE || 801 (base_work->sync_mode == WB_SYNC_NONE ||
@@ -819,12 +824,22 @@ restart:
819 824
820 wb_queue_work(wb, work); 825 wb_queue_work(wb, work);
821 826
822 next_memcg_id = wb->memcg_css->id + 1; 827 /*
828 * Pin @wb so that it stays on @bdi->wb_list. This allows
829 * continuing iteration from @wb after dropping and
830 * regrabbing rcu read lock.
831 */
832 wb_get(wb);
833 last_wb = wb;
834
823 rcu_read_unlock(); 835 rcu_read_unlock();
824 wb_wait_for_completion(bdi, &fallback_work_done); 836 wb_wait_for_completion(bdi, &fallback_work_done);
825 goto restart; 837 goto restart;
826 } 838 }
827 rcu_read_unlock(); 839 rcu_read_unlock();
840
841 if (last_wb)
842 wb_put(last_wb);
828} 843}
829 844
830#else /* CONFIG_CGROUP_WRITEBACK */ 845#else /* CONFIG_CGROUP_WRITEBACK */
@@ -1857,12 +1872,11 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1857 rcu_read_lock(); 1872 rcu_read_lock();
1858 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1873 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1859 struct bdi_writeback *wb; 1874 struct bdi_writeback *wb;
1860 struct wb_iter iter;
1861 1875
1862 if (!bdi_has_dirty_io(bdi)) 1876 if (!bdi_has_dirty_io(bdi))
1863 continue; 1877 continue;
1864 1878
1865 bdi_for_each_wb(wb, bdi, &iter, 0) 1879 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
1866 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), 1880 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
1867 false, reason); 1881 false, reason);
1868 } 1882 }
@@ -1894,11 +1908,10 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
1894 rcu_read_lock(); 1908 rcu_read_lock();
1895 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1909 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1896 struct bdi_writeback *wb; 1910 struct bdi_writeback *wb;
1897 struct wb_iter iter;
1898 1911
1899 bdi_for_each_wb(wb, bdi, &iter, 0) 1912 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
1900 if (!list_empty(&bdi->wb.b_dirty_time)) 1913 if (!list_empty(&wb->b_dirty_time))
1901 wb_wakeup(&bdi->wb); 1914 wb_wakeup(wb);
1902 } 1915 }
1903 rcu_read_unlock(); 1916 rcu_read_unlock();
1904 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1917 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);