aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@sandisk.com>2017-06-20 14:15:46 -0400
committerJens Axboe <axboe@kernel.dk>2017-06-20 21:27:14 -0400
commit332ebbf7f9efb31ffc363b99da548963ee3fd66d (patch)
tree6e14e628aefabbe5331544977f9300c9800c8aa3
parent2fff8a924d4c614b5a17b2a236a2cf09aa51af5f (diff)
block: Document what queue type each function is intended for
Some functions in block/blk-core.c must only be used on blk-sq queues while others are safe to use against any queue type. Document which functions are intended for blk-sq queues and issue a warning if the blk-sq API is misused. This does not only help block driver authors but will also make it easier to remove the blk-sq code once that code is declared obsolete. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.com> Cc: Omar Sandoval <osandov@fb.com> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c33
-rw-r--r--block/blk.h2
2 files changed, 35 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 5f87788249ce..2e02314ea331 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -241,6 +241,7 @@ static void blk_delay_work(struct work_struct *work)
241void blk_delay_queue(struct request_queue *q, unsigned long msecs) 241void blk_delay_queue(struct request_queue *q, unsigned long msecs)
242{ 242{
243 lockdep_assert_held(q->queue_lock); 243 lockdep_assert_held(q->queue_lock);
244 WARN_ON_ONCE(q->mq_ops);
244 245
245 if (likely(!blk_queue_dead(q))) 246 if (likely(!blk_queue_dead(q)))
246 queue_delayed_work(kblockd_workqueue, &q->delay_work, 247 queue_delayed_work(kblockd_workqueue, &q->delay_work,
@@ -260,6 +261,7 @@ EXPORT_SYMBOL(blk_delay_queue);
260void blk_start_queue_async(struct request_queue *q) 261void blk_start_queue_async(struct request_queue *q)
261{ 262{
262 lockdep_assert_held(q->queue_lock); 263 lockdep_assert_held(q->queue_lock);
264 WARN_ON_ONCE(q->mq_ops);
263 265
264 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 266 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
265 blk_run_queue_async(q); 267 blk_run_queue_async(q);
@@ -279,6 +281,7 @@ void blk_start_queue(struct request_queue *q)
279{ 281{
280 lockdep_assert_held(q->queue_lock); 282 lockdep_assert_held(q->queue_lock);
281 WARN_ON(!irqs_disabled()); 283 WARN_ON(!irqs_disabled());
284 WARN_ON_ONCE(q->mq_ops);
282 285
283 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 286 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
284 __blk_run_queue(q); 287 __blk_run_queue(q);
@@ -302,6 +305,7 @@ EXPORT_SYMBOL(blk_start_queue);
302void blk_stop_queue(struct request_queue *q) 305void blk_stop_queue(struct request_queue *q)
303{ 306{
304 lockdep_assert_held(q->queue_lock); 307 lockdep_assert_held(q->queue_lock);
308 WARN_ON_ONCE(q->mq_ops);
305 309
306 cancel_delayed_work(&q->delay_work); 310 cancel_delayed_work(&q->delay_work);
307 queue_flag_set(QUEUE_FLAG_STOPPED, q); 311 queue_flag_set(QUEUE_FLAG_STOPPED, q);
@@ -356,6 +360,7 @@ EXPORT_SYMBOL(blk_sync_queue);
356inline void __blk_run_queue_uncond(struct request_queue *q) 360inline void __blk_run_queue_uncond(struct request_queue *q)
357{ 361{
358 lockdep_assert_held(q->queue_lock); 362 lockdep_assert_held(q->queue_lock);
363 WARN_ON_ONCE(q->mq_ops);
359 364
360 if (unlikely(blk_queue_dead(q))) 365 if (unlikely(blk_queue_dead(q)))
361 return; 366 return;
@@ -383,6 +388,7 @@ EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
383void __blk_run_queue(struct request_queue *q) 388void __blk_run_queue(struct request_queue *q)
384{ 389{
385 lockdep_assert_held(q->queue_lock); 390 lockdep_assert_held(q->queue_lock);
391 WARN_ON_ONCE(q->mq_ops);
386 392
387 if (unlikely(blk_queue_stopped(q))) 393 if (unlikely(blk_queue_stopped(q)))
388 return; 394 return;
@@ -407,6 +413,7 @@ EXPORT_SYMBOL(__blk_run_queue);
407void blk_run_queue_async(struct request_queue *q) 413void blk_run_queue_async(struct request_queue *q)
408{ 414{
409 lockdep_assert_held(q->queue_lock); 415 lockdep_assert_held(q->queue_lock);
416 WARN_ON_ONCE(q->mq_ops);
410 417
411 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 418 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
412 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 419 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
@@ -425,6 +432,8 @@ void blk_run_queue(struct request_queue *q)
425{ 432{
426 unsigned long flags; 433 unsigned long flags;
427 434
435 WARN_ON_ONCE(q->mq_ops);
436
428 spin_lock_irqsave(q->queue_lock, flags); 437 spin_lock_irqsave(q->queue_lock, flags);
429 __blk_run_queue(q); 438 __blk_run_queue(q);
430 spin_unlock_irqrestore(q->queue_lock, flags); 439 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -453,6 +462,7 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
453 int i; 462 int i;
454 463
455 lockdep_assert_held(q->queue_lock); 464 lockdep_assert_held(q->queue_lock);
465 WARN_ON_ONCE(q->mq_ops);
456 466
457 while (true) { 467 while (true) {
458 bool drain = false; 468 bool drain = false;
@@ -531,6 +541,8 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
531 */ 541 */
532void blk_queue_bypass_start(struct request_queue *q) 542void blk_queue_bypass_start(struct request_queue *q)
533{ 543{
544 WARN_ON_ONCE(q->mq_ops);
545
534 spin_lock_irq(q->queue_lock); 546 spin_lock_irq(q->queue_lock);
535 q->bypass_depth++; 547 q->bypass_depth++;
536 queue_flag_set(QUEUE_FLAG_BYPASS, q); 548 queue_flag_set(QUEUE_FLAG_BYPASS, q);
@@ -557,6 +569,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
557 * @q: queue of interest 569 * @q: queue of interest
558 * 570 *
559 * Leave bypass mode and restore the normal queueing behavior. 571 * Leave bypass mode and restore the normal queueing behavior.
572 *
573 * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
574 * this function is called for both blk-sq and blk-mq queues.
560 */ 575 */
561void blk_queue_bypass_end(struct request_queue *q) 576void blk_queue_bypass_end(struct request_queue *q)
562{ 577{
@@ -954,6 +969,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
954 969
955int blk_init_allocated_queue(struct request_queue *q) 970int blk_init_allocated_queue(struct request_queue *q)
956{ 971{
972 WARN_ON_ONCE(q->mq_ops);
973
957 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); 974 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
958 if (!q->fq) 975 if (!q->fq)
959 return -ENOMEM; 976 return -ENOMEM;
@@ -1091,6 +1108,8 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
1091 struct request_list *rl; 1108 struct request_list *rl;
1092 int on_thresh, off_thresh; 1109 int on_thresh, off_thresh;
1093 1110
1111 WARN_ON_ONCE(q->mq_ops);
1112
1094 spin_lock_irq(q->queue_lock); 1113 spin_lock_irq(q->queue_lock);
1095 q->nr_requests = nr; 1114 q->nr_requests = nr;
1096 blk_queue_congestion_threshold(q); 1115 blk_queue_congestion_threshold(q);
@@ -1329,6 +1348,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
1329 struct request *rq; 1348 struct request *rq;
1330 1349
1331 lockdep_assert_held(q->queue_lock); 1350 lockdep_assert_held(q->queue_lock);
1351 WARN_ON_ONCE(q->mq_ops);
1332 1352
1333 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1353 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
1334retry: 1354retry:
@@ -1373,6 +1393,8 @@ static struct request *blk_old_get_request(struct request_queue *q,
1373{ 1393{
1374 struct request *rq; 1394 struct request *rq;
1375 1395
1396 WARN_ON_ONCE(q->mq_ops);
1397
1376 /* create ioc upfront */ 1398 /* create ioc upfront */
1377 create_io_context(gfp_mask, q->node); 1399 create_io_context(gfp_mask, q->node);
1378 1400
@@ -1424,6 +1446,7 @@ EXPORT_SYMBOL(blk_get_request);
1424void blk_requeue_request(struct request_queue *q, struct request *rq) 1446void blk_requeue_request(struct request_queue *q, struct request *rq)
1425{ 1447{
1426 lockdep_assert_held(q->queue_lock); 1448 lockdep_assert_held(q->queue_lock);
1449 WARN_ON_ONCE(q->mq_ops);
1427 1450
1428 blk_delete_timer(rq); 1451 blk_delete_timer(rq);
1429 blk_clear_rq_complete(rq); 1452 blk_clear_rq_complete(rq);
@@ -2495,6 +2518,7 @@ struct request *blk_peek_request(struct request_queue *q)
2495 int ret; 2518 int ret;
2496 2519
2497 lockdep_assert_held(q->queue_lock); 2520 lockdep_assert_held(q->queue_lock);
2521 WARN_ON_ONCE(q->mq_ops);
2498 2522
2499 while ((rq = __elv_next_request(q)) != NULL) { 2523 while ((rq = __elv_next_request(q)) != NULL) {
2500 2524
@@ -2615,6 +2639,7 @@ void blk_dequeue_request(struct request *rq)
2615void blk_start_request(struct request *req) 2639void blk_start_request(struct request *req)
2616{ 2640{
2617 lockdep_assert_held(req->q->queue_lock); 2641 lockdep_assert_held(req->q->queue_lock);
2642 WARN_ON_ONCE(req->q->mq_ops);
2618 2643
2619 blk_dequeue_request(req); 2644 blk_dequeue_request(req);
2620 2645
@@ -2646,6 +2671,7 @@ struct request *blk_fetch_request(struct request_queue *q)
2646 struct request *rq; 2671 struct request *rq;
2647 2672
2648 lockdep_assert_held(q->queue_lock); 2673 lockdep_assert_held(q->queue_lock);
2674 WARN_ON_ONCE(q->mq_ops);
2649 2675
2650 rq = blk_peek_request(q); 2676 rq = blk_peek_request(q);
2651 if (rq) 2677 if (rq)
@@ -2797,6 +2823,7 @@ void blk_finish_request(struct request *req, blk_status_t error)
2797 struct request_queue *q = req->q; 2823 struct request_queue *q = req->q;
2798 2824
2799 lockdep_assert_held(req->q->queue_lock); 2825 lockdep_assert_held(req->q->queue_lock);
2826 WARN_ON_ONCE(q->mq_ops);
2800 2827
2801 if (req->rq_flags & RQF_STATS) 2828 if (req->rq_flags & RQF_STATS)
2802 blk_stat_add(req); 2829 blk_stat_add(req);
@@ -2851,6 +2878,8 @@ static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
2851 struct request_queue *q = rq->q; 2878 struct request_queue *q = rq->q;
2852 unsigned long flags; 2879 unsigned long flags;
2853 2880
2881 WARN_ON_ONCE(q->mq_ops);
2882
2854 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2883 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2855 return true; 2884 return true;
2856 2885
@@ -2880,6 +2909,7 @@ static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
2880 unsigned int nr_bytes, unsigned int bidi_bytes) 2909 unsigned int nr_bytes, unsigned int bidi_bytes)
2881{ 2910{
2882 lockdep_assert_held(rq->q->queue_lock); 2911 lockdep_assert_held(rq->q->queue_lock);
2912 WARN_ON_ONCE(rq->q->mq_ops);
2883 2913
2884 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2914 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2885 return true; 2915 return true;
@@ -2906,6 +2936,7 @@ static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
2906bool blk_end_request(struct request *rq, blk_status_t error, 2936bool blk_end_request(struct request *rq, blk_status_t error,
2907 unsigned int nr_bytes) 2937 unsigned int nr_bytes)
2908{ 2938{
2939 WARN_ON_ONCE(rq->q->mq_ops);
2909 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2940 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2910} 2941}
2911EXPORT_SYMBOL(blk_end_request); 2942EXPORT_SYMBOL(blk_end_request);
@@ -2948,6 +2979,7 @@ bool __blk_end_request(struct request *rq, blk_status_t error,
2948 unsigned int nr_bytes) 2979 unsigned int nr_bytes)
2949{ 2980{
2950 lockdep_assert_held(rq->q->queue_lock); 2981 lockdep_assert_held(rq->q->queue_lock);
2982 WARN_ON_ONCE(rq->q->mq_ops);
2951 2983
2952 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2984 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2953} 2985}
@@ -2967,6 +2999,7 @@ void __blk_end_request_all(struct request *rq, blk_status_t error)
2967 unsigned int bidi_bytes = 0; 2999 unsigned int bidi_bytes = 0;
2968 3000
2969 lockdep_assert_held(rq->q->queue_lock); 3001 lockdep_assert_held(rq->q->queue_lock);
3002 WARN_ON_ONCE(rq->q->mq_ops);
2970 3003
2971 if (unlikely(blk_bidi_rq(rq))) 3004 if (unlikely(blk_bidi_rq(rq)))
2972 bidi_bytes = blk_rq_bytes(rq->next_rq); 3005 bidi_bytes = blk_rq_bytes(rq->next_rq);
diff --git a/block/blk.h b/block/blk.h
index 83c8e1100525..798691a5e5e9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -143,6 +143,8 @@ static inline struct request *__elv_next_request(struct request_queue *q)
143 struct request *rq; 143 struct request *rq;
144 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 144 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
145 145
146 WARN_ON_ONCE(q->mq_ops);
147
146 while (1) { 148 while (1) {
147 if (!list_empty(&q->queue_head)) { 149 if (!list_empty(&q->queue_head)) {
148 rq = list_entry_rq(q->queue_head.next); 150 rq = list_entry_rq(q->queue_head.next);