diff options
| author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-09-29 17:52:14 -0400 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-09-29 17:52:14 -0400 |
| commit | 291d0e5d81e101392379217b06251fe8c27f1f80 (patch) | |
| tree | 25a790625bf5a763f96956710ce8e7c9589cab88 | |
| parent | e75417739b1de4f6eb99f3f080c67bfd6812d562 (diff) | |
| parent | 133424a207774d3d32a38d560c6469ed31c0472f (diff) | |
Merge tag 'for-linus-20180929' of git://git.kernel.dk/linux-block
Jens writes:
"Block fixes for 4.19-rc6
A set of fixes that should go into this release. This pull request
contains:
- A fix (hopefully) for the persistent grants for xen-blkfront. A
previous fix from this series wasn't complete, hence reverted, and
this one should hopefully be it. (Boris Ostrovsky)
- Fix for an elevator drain warning with SMR devices, which is
triggered when you switch schedulers (Damien)
- bcache deadlock fix (Guoju Fang)
- Fix for the block unplug tracepoint, which has had the
timer/explicit flag reverted since 4.11 (Ilya)
- Fix a regression in this series where the blk-mq timeout hook is
invoked with the RCU read lock held, hence preventing it from
blocking (Keith)
- NVMe pull from Christoph, with a single multipath fix (Susobhan Dey)"
* tag 'for-linus-20180929' of git://git.kernel.dk/linux-block:
xen/blkfront: correct purging of persistent grants
Revert "xen/blkfront: When purging persistent grants, keep them in the buffer"
blk-mq: I/O and timer unplugs are inverted in blktrace
bcache: add separate workqueue for journal_write to avoid deadlock
xen/blkfront: When purging persistent grants, keep them in the buffer
block: fix deadline elevator drain for zoned block devices
blk-mq: Allow blocking queue tag iter callbacks
nvme: properly propagate errors in nvme_mpath_init
| -rw-r--r-- | block/blk-mq-tag.c | 13 | ||||
| -rw-r--r-- | block/blk-mq.c | 4 | ||||
| -rw-r--r-- | block/elevator.c | 2 | ||||
| -rw-r--r-- | drivers/block/xen-blkfront.c | 4 | ||||
| -rw-r--r-- | drivers/md/bcache/bcache.h | 1 | ||||
| -rw-r--r-- | drivers/md/bcache/journal.c | 6 | ||||
| -rw-r--r-- | drivers/md/bcache/super.c | 8 | ||||
| -rw-r--r-- | drivers/nvme/host/multipath.c | 6 |
8 files changed, 25 insertions, 19 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 94e1ed667b6e..41317c50a446 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
| @@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | |||
| 322 | 322 | ||
| 323 | /* | 323 | /* |
| 324 | * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and | 324 | * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and |
| 325 | * queue_hw_ctx after freeze the queue. So we could use q_usage_counter | 325 | * queue_hw_ctx after freeze the queue, so we use q_usage_counter |
| 326 | * to avoid race with it. __blk_mq_update_nr_hw_queues will users | 326 | * to avoid race with it. |
| 327 | * synchronize_rcu to ensure all of the users go out of the critical | ||
| 328 | * section below and see zeroed q_usage_counter. | ||
| 329 | */ | 327 | */ |
| 330 | rcu_read_lock(); | 328 | if (!percpu_ref_tryget(&q->q_usage_counter)) |
| 331 | if (percpu_ref_is_zero(&q->q_usage_counter)) { | ||
| 332 | rcu_read_unlock(); | ||
| 333 | return; | 329 | return; |
| 334 | } | ||
| 335 | 330 | ||
| 336 | queue_for_each_hw_ctx(q, hctx, i) { | 331 | queue_for_each_hw_ctx(q, hctx, i) { |
| 337 | struct blk_mq_tags *tags = hctx->tags; | 332 | struct blk_mq_tags *tags = hctx->tags; |
| @@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | |||
| 347 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); | 342 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
| 348 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); | 343 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
| 349 | } | 344 | } |
| 350 | rcu_read_unlock(); | 345 | blk_queue_exit(q); |
| 351 | } | 346 | } |
| 352 | 347 | ||
| 353 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, | 348 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 85a1c1a59c72..e3c39ea8e17b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
| 1628 | BUG_ON(!rq->q); | 1628 | BUG_ON(!rq->q); |
| 1629 | if (rq->mq_ctx != this_ctx) { | 1629 | if (rq->mq_ctx != this_ctx) { |
| 1630 | if (this_ctx) { | 1630 | if (this_ctx) { |
| 1631 | trace_block_unplug(this_q, depth, from_schedule); | 1631 | trace_block_unplug(this_q, depth, !from_schedule); |
| 1632 | blk_mq_sched_insert_requests(this_q, this_ctx, | 1632 | blk_mq_sched_insert_requests(this_q, this_ctx, |
| 1633 | &ctx_list, | 1633 | &ctx_list, |
| 1634 | from_schedule); | 1634 | from_schedule); |
| @@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
| 1648 | * on 'ctx_list'. Do those. | 1648 | * on 'ctx_list'. Do those. |
| 1649 | */ | 1649 | */ |
| 1650 | if (this_ctx) { | 1650 | if (this_ctx) { |
| 1651 | trace_block_unplug(this_q, depth, from_schedule); | 1651 | trace_block_unplug(this_q, depth, !from_schedule); |
| 1652 | blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, | 1652 | blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, |
| 1653 | from_schedule); | 1653 | from_schedule); |
| 1654 | } | 1654 | } |
diff --git a/block/elevator.c b/block/elevator.c index 6a06b5d040e5..fae58b2f906f 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q) | |||
| 609 | 609 | ||
| 610 | while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) | 610 | while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) |
| 611 | ; | 611 | ; |
| 612 | if (q->nr_sorted && printed++ < 10) { | 612 | if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) { |
| 613 | printk(KERN_ERR "%s: forced dispatching is broken " | 613 | printk(KERN_ERR "%s: forced dispatching is broken " |
| 614 | "(nr_sorted=%u), please report this\n", | 614 | "(nr_sorted=%u), please report this\n", |
| 615 | q->elevator->type->elevator_name, q->nr_sorted); | 615 | q->elevator->type->elevator_name, q->nr_sorted); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a71d817e900d..429d20131c7e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info) | |||
| 2670 | list_del(&gnt_list_entry->node); | 2670 | list_del(&gnt_list_entry->node); |
| 2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); | 2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); |
| 2672 | rinfo->persistent_gnts_c--; | 2672 | rinfo->persistent_gnts_c--; |
| 2673 | __free_page(gnt_list_entry->page); | 2673 | gnt_list_entry->gref = GRANT_INVALID_REF; |
| 2674 | kfree(gnt_list_entry); | 2674 | list_add_tail(&gnt_list_entry->node, &rinfo->grants); |
| 2675 | } | 2675 | } |
| 2676 | 2676 | ||
| 2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | 2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 83504dd8100a..954dad29e6e8 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
| @@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca); | |||
| 965 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); | 965 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); |
| 966 | 966 | ||
| 967 | extern struct workqueue_struct *bcache_wq; | 967 | extern struct workqueue_struct *bcache_wq; |
| 968 | extern struct workqueue_struct *bch_journal_wq; | ||
| 968 | extern struct mutex bch_register_lock; | 969 | extern struct mutex bch_register_lock; |
| 969 | extern struct list_head bch_cache_sets; | 970 | extern struct list_head bch_cache_sets; |
| 970 | 971 | ||
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6116bbf870d8..522c7426f3a0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
| @@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca) | |||
| 485 | 485 | ||
| 486 | closure_get(&ca->set->cl); | 486 | closure_get(&ca->set->cl); |
| 487 | INIT_WORK(&ja->discard_work, journal_discard_work); | 487 | INIT_WORK(&ja->discard_work, journal_discard_work); |
| 488 | schedule_work(&ja->discard_work); | 488 | queue_work(bch_journal_wq, &ja->discard_work); |
| 489 | } | 489 | } |
| 490 | } | 490 | } |
| 491 | 491 | ||
| @@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl) | |||
| 592 | : &j->w[0]; | 592 | : &j->w[0]; |
| 593 | 593 | ||
| 594 | __closure_wake_up(&w->wait); | 594 | __closure_wake_up(&w->wait); |
| 595 | continue_at_nobarrier(cl, journal_write, system_wq); | 595 | continue_at_nobarrier(cl, journal_write, bch_journal_wq); |
| 596 | } | 596 | } |
| 597 | 597 | ||
| 598 | static void journal_write_unlock(struct closure *cl) | 598 | static void journal_write_unlock(struct closure *cl) |
| @@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl) | |||
| 627 | spin_unlock(&c->journal.lock); | 627 | spin_unlock(&c->journal.lock); |
| 628 | 628 | ||
| 629 | btree_flush_write(c); | 629 | btree_flush_write(c); |
| 630 | continue_at(cl, journal_write, system_wq); | 630 | continue_at(cl, journal_write, bch_journal_wq); |
| 631 | return; | 631 | return; |
| 632 | } | 632 | } |
| 633 | 633 | ||
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 94c756c66bd7..30ba9aeb5ee8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
| @@ -47,6 +47,7 @@ static int bcache_major; | |||
| 47 | static DEFINE_IDA(bcache_device_idx); | 47 | static DEFINE_IDA(bcache_device_idx); |
| 48 | static wait_queue_head_t unregister_wait; | 48 | static wait_queue_head_t unregister_wait; |
| 49 | struct workqueue_struct *bcache_wq; | 49 | struct workqueue_struct *bcache_wq; |
| 50 | struct workqueue_struct *bch_journal_wq; | ||
| 50 | 51 | ||
| 51 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) | 52 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) |
| 52 | /* limitation of partitions number on single bcache device */ | 53 | /* limitation of partitions number on single bcache device */ |
| @@ -2341,6 +2342,9 @@ static void bcache_exit(void) | |||
| 2341 | kobject_put(bcache_kobj); | 2342 | kobject_put(bcache_kobj); |
| 2342 | if (bcache_wq) | 2343 | if (bcache_wq) |
| 2343 | destroy_workqueue(bcache_wq); | 2344 | destroy_workqueue(bcache_wq); |
| 2345 | if (bch_journal_wq) | ||
| 2346 | destroy_workqueue(bch_journal_wq); | ||
| 2347 | |||
| 2344 | if (bcache_major) | 2348 | if (bcache_major) |
| 2345 | unregister_blkdev(bcache_major, "bcache"); | 2349 | unregister_blkdev(bcache_major, "bcache"); |
| 2346 | unregister_reboot_notifier(&reboot); | 2350 | unregister_reboot_notifier(&reboot); |
| @@ -2370,6 +2374,10 @@ static int __init bcache_init(void) | |||
| 2370 | if (!bcache_wq) | 2374 | if (!bcache_wq) |
| 2371 | goto err; | 2375 | goto err; |
| 2372 | 2376 | ||
| 2377 | bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); | ||
| 2378 | if (!bch_journal_wq) | ||
| 2379 | goto err; | ||
| 2380 | |||
| 2373 | bcache_kobj = kobject_create_and_add("bcache", fs_kobj); | 2381 | bcache_kobj = kobject_create_and_add("bcache", fs_kobj); |
| 2374 | if (!bcache_kobj) | 2382 | if (!bcache_kobj) |
| 2375 | goto err; | 2383 | goto err; |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5a9562881d4e..9fe3fff818b8 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
| @@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
| 537 | 537 | ||
| 538 | INIT_WORK(&ctrl->ana_work, nvme_ana_work); | 538 | INIT_WORK(&ctrl->ana_work, nvme_ana_work); |
| 539 | ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); | 539 | ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); |
| 540 | if (!ctrl->ana_log_buf) | 540 | if (!ctrl->ana_log_buf) { |
| 541 | error = -ENOMEM; | ||
| 541 | goto out; | 542 | goto out; |
| 543 | } | ||
| 542 | 544 | ||
| 543 | error = nvme_read_ana_log(ctrl, true); | 545 | error = nvme_read_ana_log(ctrl, true); |
| 544 | if (error) | 546 | if (error) |
| @@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
| 547 | out_free_ana_log_buf: | 549 | out_free_ana_log_buf: |
| 548 | kfree(ctrl->ana_log_buf); | 550 | kfree(ctrl->ana_log_buf); |
| 549 | out: | 551 | out: |
| 550 | return -ENOMEM; | 552 | return error; |
| 551 | } | 553 | } |
| 552 | 554 | ||
| 553 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl) | 555 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl) |
