summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2015-10-21 13:20:12 -0400
committerJens Axboe <axboe@fb.com>2015-10-21 16:43:41 -0400
commit3ef28e83ab15799742e55fd13243a5f678b04242 (patch)
treee594552aaeaafed8468ae96e54992cf1697a1fc7 /block
parent4cfc766e07a5ed709a9d5289c8644fe78e9f24de (diff)
block: generic request_queue reference counting
Allow pmem, and other synchronous/bio-based block drivers, to fallback on a per-cpu reference count managed by the core for tracking queue live/dead state. The existing per-cpu reference count for the blk_mq case is promoted to be used in all block i/o scenarios. This involves initializing it by default, waiting for it to drop to zero at exit, and holding a live reference over the invocation of q->make_request_fn() in generic_make_request(). The blk_mq code continues to take its own reference per blk_mq request and retains the ability to freeze the queue, but the check that the queue is frozen is moved to generic_make_request(). This fixes crash signatures like the following: BUG: unable to handle kernel paging request at ffff880140000000 [..] Call Trace: [<ffffffff8145e8bf>] ? copy_user_handle_tail+0x5f/0x70 [<ffffffffa004e1e0>] pmem_do_bvec.isra.11+0x70/0xf0 [nd_pmem] [<ffffffffa004e331>] pmem_make_request+0xd1/0x200 [nd_pmem] [<ffffffff811c3162>] ? mempool_alloc+0x72/0x1a0 [<ffffffff8141f8b6>] generic_make_request+0xd6/0x110 [<ffffffff8141f966>] submit_bio+0x76/0x170 [<ffffffff81286dff>] submit_bh_wbc+0x12f/0x160 [<ffffffff81286e62>] submit_bh+0x12/0x20 [<ffffffff813395bd>] jbd2_write_superblock+0x8d/0x170 [<ffffffff8133974d>] jbd2_mark_journal_empty+0x5d/0x90 [<ffffffff813399cb>] jbd2_journal_destroy+0x24b/0x270 [<ffffffff810bc4ca>] ? put_pwq_unlocked+0x2a/0x30 [<ffffffff810bc6f5>] ? destroy_workqueue+0x225/0x250 [<ffffffff81303494>] ext4_put_super+0x64/0x360 [<ffffffff8124ab1a>] generic_shutdown_super+0x6a/0xf0 Cc: Jens Axboe <axboe@kernel.dk> Cc: Keith Busch <keith.busch@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Suggested-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c71
-rw-r--r--block/blk-mq-sysfs.c6
-rw-r--r--block/blk-mq.c80
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk.h14
5 files changed, 101 insertions, 73 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2eb722d48773..9b4d735cb5b8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -554,13 +554,10 @@ void blk_cleanup_queue(struct request_queue *q)
554 * Drain all requests queued before DYING marking. Set DEAD flag to 554 * Drain all requests queued before DYING marking. Set DEAD flag to
555 * prevent that q->request_fn() gets invoked after draining finished. 555 * prevent that q->request_fn() gets invoked after draining finished.
556 */ 556 */
557 if (q->mq_ops) { 557 blk_freeze_queue(q);
558 blk_mq_freeze_queue(q); 558 spin_lock_irq(lock);
559 spin_lock_irq(lock); 559 if (!q->mq_ops)
560 } else {
561 spin_lock_irq(lock);
562 __blk_drain_queue(q, true); 560 __blk_drain_queue(q, true);
563 }
564 queue_flag_set(QUEUE_FLAG_DEAD, q); 561 queue_flag_set(QUEUE_FLAG_DEAD, q);
565 spin_unlock_irq(lock); 562 spin_unlock_irq(lock);
566 563
@@ -570,6 +567,7 @@ void blk_cleanup_queue(struct request_queue *q)
570 567
571 if (q->mq_ops) 568 if (q->mq_ops)
572 blk_mq_free_queue(q); 569 blk_mq_free_queue(q);
570 percpu_ref_exit(&q->q_usage_counter);
573 571
574 spin_lock_irq(lock); 572 spin_lock_irq(lock);
575 if (q->queue_lock != &q->__queue_lock) 573 if (q->queue_lock != &q->__queue_lock)
@@ -629,6 +627,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
629} 627}
630EXPORT_SYMBOL(blk_alloc_queue); 628EXPORT_SYMBOL(blk_alloc_queue);
631 629
630int blk_queue_enter(struct request_queue *q, gfp_t gfp)
631{
632 while (true) {
633 int ret;
634
635 if (percpu_ref_tryget_live(&q->q_usage_counter))
636 return 0;
637
638 if (!(gfp & __GFP_WAIT))
639 return -EBUSY;
640
641 ret = wait_event_interruptible(q->mq_freeze_wq,
642 !atomic_read(&q->mq_freeze_depth) ||
643 blk_queue_dying(q));
644 if (blk_queue_dying(q))
645 return -ENODEV;
646 if (ret)
647 return ret;
648 }
649}
650
651void blk_queue_exit(struct request_queue *q)
652{
653 percpu_ref_put(&q->q_usage_counter);
654}
655
656static void blk_queue_usage_counter_release(struct percpu_ref *ref)
657{
658 struct request_queue *q =
659 container_of(ref, struct request_queue, q_usage_counter);
660
661 wake_up_all(&q->mq_freeze_wq);
662}
663
632struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 664struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
633{ 665{
634 struct request_queue *q; 666 struct request_queue *q;
@@ -690,11 +722,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
690 722
691 init_waitqueue_head(&q->mq_freeze_wq); 723 init_waitqueue_head(&q->mq_freeze_wq);
692 724
693 if (blkcg_init_queue(q)) 725 /*
726 * Init percpu_ref in atomic mode so that it's faster to shutdown.
727 * See blk_register_queue() for details.
728 */
729 if (percpu_ref_init(&q->q_usage_counter,
730 blk_queue_usage_counter_release,
731 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
694 goto fail_bdi; 732 goto fail_bdi;
695 733
734 if (blkcg_init_queue(q))
735 goto fail_ref;
736
696 return q; 737 return q;
697 738
739fail_ref:
740 percpu_ref_exit(&q->q_usage_counter);
698fail_bdi: 741fail_bdi:
699 bdi_destroy(&q->backing_dev_info); 742 bdi_destroy(&q->backing_dev_info);
700fail_split: 743fail_split:
@@ -1966,9 +2009,19 @@ void generic_make_request(struct bio *bio)
1966 do { 2009 do {
1967 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2010 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1968 2011
1969 q->make_request_fn(q, bio); 2012 if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
2013
2014 q->make_request_fn(q, bio);
2015
2016 blk_queue_exit(q);
1970 2017
1971 bio = bio_list_pop(current->bio_list); 2018 bio = bio_list_pop(current->bio_list);
2019 } else {
2020 struct bio *bio_next = bio_list_pop(current->bio_list);
2021
2022 bio_io_error(bio);
2023 bio = bio_next;
2024 }
1972 } while (bio); 2025 } while (bio);
1973 current->bio_list = NULL; /* deactivate */ 2026 current->bio_list = NULL; /* deactivate */
1974} 2027}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 788fffd9b409..6f57a110289c 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -413,12 +413,6 @@ static void blk_mq_sysfs_init(struct request_queue *q)
413 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); 413 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
414} 414}
415 415
416/* see blk_register_queue() */
417void blk_mq_finish_init(struct request_queue *q)
418{
419 percpu_ref_switch_to_percpu(&q->mq_usage_counter);
420}
421
422int blk_mq_register_disk(struct gendisk *disk) 416int blk_mq_register_disk(struct gendisk *disk)
423{ 417{
424 struct device *dev = disk_to_dev(disk); 418 struct device *dev = disk_to_dev(disk);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d921cd5177f5..6c240712553a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -78,47 +78,13 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
78 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); 78 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
79} 79}
80 80
81static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
82{
83 while (true) {
84 int ret;
85
86 if (percpu_ref_tryget_live(&q->mq_usage_counter))
87 return 0;
88
89 if (!(gfp & __GFP_WAIT))
90 return -EBUSY;
91
92 ret = wait_event_interruptible(q->mq_freeze_wq,
93 !atomic_read(&q->mq_freeze_depth) ||
94 blk_queue_dying(q));
95 if (blk_queue_dying(q))
96 return -ENODEV;
97 if (ret)
98 return ret;
99 }
100}
101
102static void blk_mq_queue_exit(struct request_queue *q)
103{
104 percpu_ref_put(&q->mq_usage_counter);
105}
106
107static void blk_mq_usage_counter_release(struct percpu_ref *ref)
108{
109 struct request_queue *q =
110 container_of(ref, struct request_queue, mq_usage_counter);
111
112 wake_up_all(&q->mq_freeze_wq);
113}
114
115void blk_mq_freeze_queue_start(struct request_queue *q) 81void blk_mq_freeze_queue_start(struct request_queue *q)
116{ 82{
117 int freeze_depth; 83 int freeze_depth;
118 84
119 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); 85 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
120 if (freeze_depth == 1) { 86 if (freeze_depth == 1) {
121 percpu_ref_kill(&q->mq_usage_counter); 87 percpu_ref_kill(&q->q_usage_counter);
122 blk_mq_run_hw_queues(q, false); 88 blk_mq_run_hw_queues(q, false);
123 } 89 }
124} 90}
@@ -126,18 +92,34 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
126 92
127static void blk_mq_freeze_queue_wait(struct request_queue *q) 93static void blk_mq_freeze_queue_wait(struct request_queue *q)
128{ 94{
129 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 95 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
130} 96}
131 97
132/* 98/*
133 * Guarantee no request is in use, so we can change any data structure of 99 * Guarantee no request is in use, so we can change any data structure of
134 * the queue afterward. 100 * the queue afterward.
135 */ 101 */
136void blk_mq_freeze_queue(struct request_queue *q) 102void blk_freeze_queue(struct request_queue *q)
137{ 103{
104 /*
105 * In the !blk_mq case we are only calling this to kill the
106 * q_usage_counter, otherwise this increases the freeze depth
107 * and waits for it to return to zero. For this reason there is
108 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
109 * exported to drivers as the only user for unfreeze is blk_mq.
110 */
138 blk_mq_freeze_queue_start(q); 111 blk_mq_freeze_queue_start(q);
139 blk_mq_freeze_queue_wait(q); 112 blk_mq_freeze_queue_wait(q);
140} 113}
114
115void blk_mq_freeze_queue(struct request_queue *q)
116{
117 /*
118 * ...just an alias to keep freeze and unfreeze actions balanced
119 * in the blk_mq_* namespace
120 */
121 blk_freeze_queue(q);
122}
141EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 123EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
142 124
143void blk_mq_unfreeze_queue(struct request_queue *q) 125void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -147,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
147 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); 129 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
148 WARN_ON_ONCE(freeze_depth < 0); 130 WARN_ON_ONCE(freeze_depth < 0);
149 if (!freeze_depth) { 131 if (!freeze_depth) {
150 percpu_ref_reinit(&q->mq_usage_counter); 132 percpu_ref_reinit(&q->q_usage_counter);
151 wake_up_all(&q->mq_freeze_wq); 133 wake_up_all(&q->mq_freeze_wq);
152 } 134 }
153} 135}
@@ -256,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
256 struct blk_mq_alloc_data alloc_data; 238 struct blk_mq_alloc_data alloc_data;
257 int ret; 239 int ret;
258 240
259 ret = blk_mq_queue_enter(q, gfp); 241 ret = blk_queue_enter(q, gfp);
260 if (ret) 242 if (ret)
261 return ERR_PTR(ret); 243 return ERR_PTR(ret);
262 244
@@ -279,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
279 } 261 }
280 blk_mq_put_ctx(ctx); 262 blk_mq_put_ctx(ctx);
281 if (!rq) { 263 if (!rq) {
282 blk_mq_queue_exit(q); 264 blk_queue_exit(q);
283 return ERR_PTR(-EWOULDBLOCK); 265 return ERR_PTR(-EWOULDBLOCK);
284 } 266 }
285 return rq; 267 return rq;
@@ -298,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
298 280
299 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 281 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
300 blk_mq_put_tag(hctx, tag, &ctx->last_tag); 282 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
301 blk_mq_queue_exit(q); 283 blk_queue_exit(q);
302} 284}
303 285
304void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) 286void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -1177,11 +1159,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1177 int rw = bio_data_dir(bio); 1159 int rw = bio_data_dir(bio);
1178 struct blk_mq_alloc_data alloc_data; 1160 struct blk_mq_alloc_data alloc_data;
1179 1161
1180 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { 1162 blk_queue_enter_live(q);
1181 bio_io_error(bio);
1182 return NULL;
1183 }
1184
1185 ctx = blk_mq_get_ctx(q); 1163 ctx = blk_mq_get_ctx(q);
1186 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1164 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1187 1165
@@ -2000,14 +1978,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2000 hctxs[i]->queue_num = i; 1978 hctxs[i]->queue_num = i;
2001 } 1979 }
2002 1980
2003 /*
2004 * Init percpu_ref in atomic mode so that it's faster to shutdown.
2005 * See blk_register_queue() for details.
2006 */
2007 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
2008 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
2009 goto err_hctxs;
2010
2011 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1981 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
2012 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 1982 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2013 1983
@@ -2088,8 +2058,6 @@ void blk_mq_free_queue(struct request_queue *q)
2088 2058
2089 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2059 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2090 blk_mq_free_hw_queues(q, set); 2060 blk_mq_free_hw_queues(q, set);
2091
2092 percpu_ref_exit(&q->mq_usage_counter);
2093} 2061}
2094 2062
2095/* Basically redo blk_mq_init_queue with queue frozen */ 2063/* Basically redo blk_mq_init_queue with queue frozen */
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3e44a9da2a13..61fc2633bbea 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -599,9 +599,8 @@ int blk_register_queue(struct gendisk *disk)
599 */ 599 */
600 if (!blk_queue_init_done(q)) { 600 if (!blk_queue_init_done(q)) {
601 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); 601 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
602 percpu_ref_switch_to_percpu(&q->q_usage_counter);
602 blk_queue_bypass_end(q); 603 blk_queue_bypass_end(q);
603 if (q->mq_ops)
604 blk_mq_finish_init(q);
605 } 604 }
606 605
607 ret = blk_trace_init_sysfs(dev); 606 ret = blk_trace_init_sysfs(dev);
diff --git a/block/blk.h b/block/blk.h
index 98614ad37c81..5b2cd393afbe 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -72,6 +72,20 @@ void blk_dequeue_request(struct request *rq);
72void __blk_queue_free_tags(struct request_queue *q); 72void __blk_queue_free_tags(struct request_queue *q);
73bool __blk_end_bidi_request(struct request *rq, int error, 73bool __blk_end_bidi_request(struct request *rq, int error,
74 unsigned int nr_bytes, unsigned int bidi_bytes); 74 unsigned int nr_bytes, unsigned int bidi_bytes);
75int blk_queue_enter(struct request_queue *q, gfp_t gfp);
76void blk_queue_exit(struct request_queue *q);
77void blk_freeze_queue(struct request_queue *q);
78
79static inline void blk_queue_enter_live(struct request_queue *q)
80{
81 /*
82 * Given that running in generic_make_request() context
83 * guarantees that a live reference against q_usage_counter has
84 * been established, further references under that same context
85 * need not check that the queue has been frozen (marked dead).
86 */
87 percpu_ref_get(&q->q_usage_counter);
88}
75 89
76void blk_rq_timed_out_timer(unsigned long data); 90void blk_rq_timed_out_timer(unsigned long data);
77unsigned long blk_rq_timeout(unsigned long timeout); 91unsigned long blk_rq_timeout(unsigned long timeout);