diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 14:53:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 14:53:51 -0400 |
commit | d3dc366bbaf07c125561e90d6da4bb147741101a (patch) | |
tree | 6eb7e79a8ec9df1fa705393c6d15ccea3d104661 /mm/backing-dev.c | |
parent | 511c41d9e6665a07aca94eb00983cf6d77dd87ff (diff) | |
parent | e19a8a0ad2d255316830ead05b59c5a704434cbb (diff) |
Merge branch 'for-3.18/core' of git://git.kernel.dk/linux-block
Pull core block layer changes from Jens Axboe:
"This is the core block IO pull request for 3.18. Apart from the new
and improved flush machinery for blk-mq, this is all mostly bug fixes
and cleanups.
- blk-mq timeout updates and fixes from Christoph.
- Removal of REQ_END, also from Christoph. We pass it through the
->queue_rq() hook for blk-mq instead, freeing up one of the request
bits. The space was overly tight on 32-bit, so Martin also killed
REQ_KERNEL since it's no longer used.
- blk integrity updates and fixes from Martin and Gu Zheng.
- Update to the flush machinery for blk-mq from Ming Lei. Now we
have a per hardware context flush request, which both cleans up the
code should scale better for flush intensive workloads on blk-mq.
- Improve the error printing, from Rob Elliott.
- Backing device improvements and cleanups from Tejun.
- Fixup of a misplaced rq_complete() tracepoint from Hannes.
- Make blk_get_request() return error pointers, fixing up issues
where we NULL deref when a device goes bad or missing. From Joe
Lawrence.
- Prep work for drastically reducing the memory consumption of dm
devices from Junichi Nomura. This allows creating clone bio sets
without preallocating a lot of memory.
- Fix a blk-mq hang on certain combinations of queue depths and
hardware queues from me.
- Limit memory consumption for blk-mq devices for crash dump
scenarios and drivers that use crazy high depths (certain SCSI
shared tag setups). We now just use a single queue and limited
depth for that"
* 'for-3.18/core' of git://git.kernel.dk/linux-block: (58 commits)
block: Remove REQ_KERNEL
blk-mq: allocate cpumask on the home node
bio-integrity: remove the needless fail handle of bip_slab creating
block: include func name in __get_request prints
block: make blk_update_request print prefix match ratelimited prefix
blk-merge: don't compute bi_phys_segments from bi_vcnt for cloned bio
block: fix alignment_offset math that assumes io_min is a power-of-2
blk-mq: Make bt_clear_tag() easier to read
blk-mq: fix potential hang if rolling wakeup depth is too high
block: add bioset_create_nobvec()
block: use bio_clone_fast() in blk_rq_prep_clone()
block: misplaced rq_complete tracepoint
sd: Honor block layer integrity handling flags
block: Replace strnicmp with strncasecmp
block: Add T10 Protection Information functions
block: Don't merge requests if integrity flags differ
block: Integrity checksum flag
block: Relocate bio integrity flags
block: Add a disk flag to block integrity profile
block: Add prefix to block integrity profile flags
...
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 40 |
1 files changed, 16 insertions, 24 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 12a992b62576..0ae0df55000b 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -40,7 +40,7 @@ LIST_HEAD(bdi_list); | |||
40 | /* bdi_wq serves all asynchronous writeback tasks */ | 40 | /* bdi_wq serves all asynchronous writeback tasks */ |
41 | struct workqueue_struct *bdi_wq; | 41 | struct workqueue_struct *bdi_wq; |
42 | 42 | ||
43 | void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) | 43 | static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) |
44 | { | 44 | { |
45 | if (wb1 < wb2) { | 45 | if (wb1 < wb2) { |
46 | spin_lock(&wb1->list_lock); | 46 | spin_lock(&wb1->list_lock); |
@@ -376,13 +376,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) | |||
376 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); | 376 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); |
377 | flush_delayed_work(&bdi->wb.dwork); | 377 | flush_delayed_work(&bdi->wb.dwork); |
378 | WARN_ON(!list_empty(&bdi->work_list)); | 378 | WARN_ON(!list_empty(&bdi->work_list)); |
379 | 379 | WARN_ON(delayed_work_pending(&bdi->wb.dwork)); | |
380 | /* | ||
381 | * This shouldn't be necessary unless @bdi for some reason has | ||
382 | * unflushed dirty IO after work_list is drained. Do it anyway | ||
383 | * just in case. | ||
384 | */ | ||
385 | cancel_delayed_work_sync(&bdi->wb.dwork); | ||
386 | } | 380 | } |
387 | 381 | ||
388 | /* | 382 | /* |
@@ -402,21 +396,15 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) | |||
402 | 396 | ||
403 | void bdi_unregister(struct backing_dev_info *bdi) | 397 | void bdi_unregister(struct backing_dev_info *bdi) |
404 | { | 398 | { |
405 | struct device *dev = bdi->dev; | 399 | if (bdi->dev) { |
406 | |||
407 | if (dev) { | ||
408 | bdi_set_min_ratio(bdi, 0); | 400 | bdi_set_min_ratio(bdi, 0); |
409 | trace_writeback_bdi_unregister(bdi); | 401 | trace_writeback_bdi_unregister(bdi); |
410 | bdi_prune_sb(bdi); | 402 | bdi_prune_sb(bdi); |
411 | 403 | ||
412 | bdi_wb_shutdown(bdi); | 404 | bdi_wb_shutdown(bdi); |
413 | bdi_debug_unregister(bdi); | 405 | bdi_debug_unregister(bdi); |
414 | 406 | device_unregister(bdi->dev); | |
415 | spin_lock_bh(&bdi->wb_lock); | ||
416 | bdi->dev = NULL; | 407 | bdi->dev = NULL; |
417 | spin_unlock_bh(&bdi->wb_lock); | ||
418 | |||
419 | device_unregister(dev); | ||
420 | } | 408 | } |
421 | } | 409 | } |
422 | EXPORT_SYMBOL(bdi_unregister); | 410 | EXPORT_SYMBOL(bdi_unregister); |
@@ -487,8 +475,17 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
487 | int i; | 475 | int i; |
488 | 476 | ||
489 | /* | 477 | /* |
490 | * Splice our entries to the default_backing_dev_info, if this | 478 | * Splice our entries to the default_backing_dev_info. This |
491 | * bdi disappears | 479 | * condition shouldn't happen. @wb must be empty at this point and |
480 | * dirty inodes on it might cause other issues. This workaround is | ||
481 | * added by ce5f8e779519 ("writeback: splice dirty inode entries to | ||
482 | * default bdi on bdi_destroy()") without root-causing the issue. | ||
483 | * | ||
484 | * http://lkml.kernel.org/g/1253038617-30204-11-git-send-email-jens.axboe@oracle.com | ||
485 | * http://thread.gmane.org/gmane.linux.file-systems/35341/focus=35350 | ||
486 | * | ||
487 | * We should probably add WARN_ON() to find out whether it still | ||
488 | * happens and track it down if so. | ||
492 | */ | 489 | */ |
493 | if (bdi_has_dirty_io(bdi)) { | 490 | if (bdi_has_dirty_io(bdi)) { |
494 | struct bdi_writeback *dst = &default_backing_dev_info.wb; | 491 | struct bdi_writeback *dst = &default_backing_dev_info.wb; |
@@ -503,12 +500,7 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
503 | 500 | ||
504 | bdi_unregister(bdi); | 501 | bdi_unregister(bdi); |
505 | 502 | ||
506 | /* | 503 | WARN_ON(delayed_work_pending(&bdi->wb.dwork)); |
507 | * If bdi_unregister() had already been called earlier, the dwork | ||
508 | * could still be pending because bdi_prune_sb() can race with the | ||
509 | * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty(). | ||
510 | */ | ||
511 | cancel_delayed_work_sync(&bdi->wb.dwork); | ||
512 | 504 | ||
513 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) | 505 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) |
514 | percpu_counter_destroy(&bdi->bdi_stat[i]); | 506 | percpu_counter_destroy(&bdi->bdi_stat[i]); |